1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
6 #include "cc_buffer_mgr.h"
7 #include "cc_request_mgr.h"
11 #define CC_MAX_POLL_ITER 10
12 /* The highest descriptor count in used */
13 #define CC_MAX_DESC_SEQ_LEN 23
15 struct cc_req_mgr_handle
{
16 /* Request manager resources */
17 unsigned int hw_queue_size
; /* HW capability */
18 unsigned int min_free_hw_slots
;
19 unsigned int max_used_sw_slots
;
20 struct cc_crypto_req req_queue
[MAX_REQUEST_QUEUE_SIZE
];
25 /* This lock protects access to HW register
26 * that must be single request at a time
29 struct cc_hw_desc compl_desc
;
31 dma_addr_t dummy_comp_buff_dma
;
34 struct list_head backlog
;
36 spinlock_t bl_lock
; /* protect backlog queue */
39 struct workqueue_struct
*workq
;
40 struct delayed_work compwork
;
42 struct tasklet_struct comptask
;
44 bool is_runtime_suspended
;
48 struct cc_crypto_req creq
;
49 struct cc_hw_desc desc
[CC_MAX_DESC_SEQ_LEN
];
51 struct list_head list
;
55 static void comp_handler(unsigned long devarg
);
57 static void comp_work_handler(struct work_struct
*work
);
60 void cc_req_mgr_fini(struct cc_drvdata
*drvdata
)
62 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
63 struct device
*dev
= drvdata_to_dev(drvdata
);
66 return; /* Not allocated */
68 if (req_mgr_h
->dummy_comp_buff_dma
) {
69 dma_free_coherent(dev
, sizeof(u32
), req_mgr_h
->dummy_comp_buff
,
70 req_mgr_h
->dummy_comp_buff_dma
);
73 dev_dbg(dev
, "max_used_hw_slots=%d\n", (req_mgr_h
->hw_queue_size
-
74 req_mgr_h
->min_free_hw_slots
));
75 dev_dbg(dev
, "max_used_sw_slots=%d\n", req_mgr_h
->max_used_sw_slots
);
78 flush_workqueue(req_mgr_h
->workq
);
79 destroy_workqueue(req_mgr_h
->workq
);
82 tasklet_kill(&req_mgr_h
->comptask
);
85 drvdata
->request_mgr_handle
= NULL
;
88 int cc_req_mgr_init(struct cc_drvdata
*drvdata
)
90 struct cc_req_mgr_handle
*req_mgr_h
;
91 struct device
*dev
= drvdata_to_dev(drvdata
);
94 req_mgr_h
= kzalloc(sizeof(*req_mgr_h
), GFP_KERNEL
);
97 goto req_mgr_init_err
;
100 drvdata
->request_mgr_handle
= req_mgr_h
;
102 spin_lock_init(&req_mgr_h
->hw_lock
);
103 spin_lock_init(&req_mgr_h
->bl_lock
);
104 INIT_LIST_HEAD(&req_mgr_h
->backlog
);
107 dev_dbg(dev
, "Initializing completion workqueue\n");
108 req_mgr_h
->workq
= create_singlethread_workqueue("ccree");
109 if (!req_mgr_h
->workq
) {
110 dev_err(dev
, "Failed creating work queue\n");
112 goto req_mgr_init_err
;
114 INIT_DELAYED_WORK(&req_mgr_h
->compwork
, comp_work_handler
);
116 dev_dbg(dev
, "Initializing completion tasklet\n");
117 tasklet_init(&req_mgr_h
->comptask
, comp_handler
,
118 (unsigned long)drvdata
);
120 req_mgr_h
->hw_queue_size
= cc_ioread(drvdata
,
121 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE
));
122 dev_dbg(dev
, "hw_queue_size=0x%08X\n", req_mgr_h
->hw_queue_size
);
123 if (req_mgr_h
->hw_queue_size
< MIN_HW_QUEUE_SIZE
) {
124 dev_err(dev
, "Invalid HW queue size = %u (Min. required is %u)\n",
125 req_mgr_h
->hw_queue_size
, MIN_HW_QUEUE_SIZE
);
127 goto req_mgr_init_err
;
129 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->hw_queue_size
;
130 req_mgr_h
->max_used_sw_slots
= 0;
132 /* Allocate DMA word for "dummy" completion descriptor use */
133 req_mgr_h
->dummy_comp_buff
=
134 dma_alloc_coherent(dev
, sizeof(u32
),
135 &req_mgr_h
->dummy_comp_buff_dma
,
137 if (!req_mgr_h
->dummy_comp_buff
) {
138 dev_err(dev
, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
141 goto req_mgr_init_err
;
144 /* Init. "dummy" completion descriptor */
145 hw_desc_init(&req_mgr_h
->compl_desc
);
146 set_din_const(&req_mgr_h
->compl_desc
, 0, sizeof(u32
));
147 set_dout_dlli(&req_mgr_h
->compl_desc
, req_mgr_h
->dummy_comp_buff_dma
,
148 sizeof(u32
), NS_BIT
, 1);
149 set_flow_mode(&req_mgr_h
->compl_desc
, BYPASS
);
150 set_queue_last_ind(drvdata
, &req_mgr_h
->compl_desc
);
155 cc_req_mgr_fini(drvdata
);
159 static void enqueue_seq(struct cc_drvdata
*drvdata
, struct cc_hw_desc seq
[],
160 unsigned int seq_len
)
163 void __iomem
*reg
= drvdata
->cc_base
+ CC_REG(DSCRPTR_QUEUE_WORD0
);
164 struct device
*dev
= drvdata_to_dev(drvdata
);
167 * We do indeed write all 6 command words to the same
168 * register. The HW supports this.
171 for (i
= 0; i
< seq_len
; i
++) {
172 for (w
= 0; w
<= 5; w
++)
173 writel_relaxed(seq
[i
].word
[w
], reg
);
176 dev_dbg(dev
, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
177 i
, seq
[i
].word
[0], seq
[i
].word
[1],
178 seq
[i
].word
[2], seq
[i
].word
[3],
179 seq
[i
].word
[4], seq
[i
].word
[5]);
184 * Completion will take place if and only if user requested completion
185 * by cc_send_sync_request().
188 * \param dx_compl_h The completion event to signal
190 static void request_mgr_complete(struct device
*dev
, void *dx_compl_h
,
193 struct completion
*this_compl
= dx_compl_h
;
195 complete(this_compl
);
198 static int cc_queues_status(struct cc_drvdata
*drvdata
,
199 struct cc_req_mgr_handle
*req_mgr_h
,
200 unsigned int total_seq_len
)
202 unsigned long poll_queue
;
203 struct device
*dev
= drvdata_to_dev(drvdata
);
205 /* SW queue is checked only once as it will not
206 * be chaned during the poll because the spinlock_bh
207 * is held by the thread
209 if (((req_mgr_h
->req_queue_head
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1)) ==
210 req_mgr_h
->req_queue_tail
) {
211 dev_err(dev
, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
212 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
);
216 if (req_mgr_h
->q_free_slots
>= total_seq_len
)
219 /* Wait for space in HW queue. Poll constant num of iterations. */
220 for (poll_queue
= 0; poll_queue
< CC_MAX_POLL_ITER
; poll_queue
++) {
221 req_mgr_h
->q_free_slots
=
222 cc_ioread(drvdata
, CC_REG(DSCRPTR_QUEUE_CONTENT
));
223 if (req_mgr_h
->q_free_slots
< req_mgr_h
->min_free_hw_slots
)
224 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->q_free_slots
;
226 if (req_mgr_h
->q_free_slots
>= total_seq_len
) {
227 /* If there is enough place return */
231 dev_dbg(dev
, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
232 req_mgr_h
->q_free_slots
, total_seq_len
);
234 /* No room in the HW queue try again later */
235 dev_dbg(dev
, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
236 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
,
237 req_mgr_h
->q_free_slots
, total_seq_len
);
242 * Enqueue caller request to crypto hardware.
243 * Need to be called with HW lock held and PM running
246 * \param cc_req The request to enqueue
247 * \param desc The crypto sequence
248 * \param len The crypto sequence length
249 * \param add_comp If "true": add an artificial dout DMA to mark completion
251 * \return int Returns -EINPROGRESS or error code
253 static int cc_do_send_request(struct cc_drvdata
*drvdata
,
254 struct cc_crypto_req
*cc_req
,
255 struct cc_hw_desc
*desc
, unsigned int len
,
256 bool add_comp
, bool ivgen
)
258 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
259 unsigned int used_sw_slots
;
260 unsigned int iv_seq_len
= 0;
261 unsigned int total_seq_len
= len
; /*initial sequence length*/
262 struct cc_hw_desc iv_seq
[CC_IVPOOL_SEQ_LEN
];
263 struct device
*dev
= drvdata_to_dev(drvdata
);
267 dev_dbg(dev
, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
268 cc_req
->ivgen_dma_addr_len
,
269 &cc_req
->ivgen_dma_addr
[0],
270 &cc_req
->ivgen_dma_addr
[1],
271 &cc_req
->ivgen_dma_addr
[2],
274 /* Acquire IV from pool */
275 rc
= cc_get_iv(drvdata
, cc_req
->ivgen_dma_addr
,
276 cc_req
->ivgen_dma_addr_len
,
277 cc_req
->ivgen_size
, iv_seq
, &iv_seq_len
);
280 dev_err(dev
, "Failed to generate IV (rc=%d)\n", rc
);
284 total_seq_len
+= iv_seq_len
;
287 used_sw_slots
= ((req_mgr_h
->req_queue_head
-
288 req_mgr_h
->req_queue_tail
) &
289 (MAX_REQUEST_QUEUE_SIZE
- 1));
290 if (used_sw_slots
> req_mgr_h
->max_used_sw_slots
)
291 req_mgr_h
->max_used_sw_slots
= used_sw_slots
;
293 /* Enqueue request - must be locked with HW lock*/
294 req_mgr_h
->req_queue
[req_mgr_h
->req_queue_head
] = *cc_req
;
295 req_mgr_h
->req_queue_head
= (req_mgr_h
->req_queue_head
+ 1) &
296 (MAX_REQUEST_QUEUE_SIZE
- 1);
297 /* TODO: Use circ_buf.h ? */
299 dev_dbg(dev
, "Enqueue request head=%u\n", req_mgr_h
->req_queue_head
);
302 * We are about to push command to the HW via the command registers
303 * that may refernece hsot memory. We need to issue a memory barrier
304 * to make sure there are no outstnading memory writes
308 /* STAT_PHASE_4: Push sequence */
310 enqueue_seq(drvdata
, iv_seq
, iv_seq_len
);
312 enqueue_seq(drvdata
, desc
, len
);
315 enqueue_seq(drvdata
, &req_mgr_h
->compl_desc
, 1);
319 if (req_mgr_h
->q_free_slots
< total_seq_len
) {
320 /* This situation should never occur. Maybe indicating problem
321 * with resuming power. Set the free slot count to 0 and hope
324 dev_err(dev
, "HW free slot count mismatch.");
325 req_mgr_h
->q_free_slots
= 0;
327 /* Update the free slots in HW queue */
328 req_mgr_h
->q_free_slots
-= total_seq_len
;
331 /* Operation still in process */
335 static void cc_enqueue_backlog(struct cc_drvdata
*drvdata
,
336 struct cc_bl_item
*bli
)
338 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
340 spin_lock_bh(&mgr
->bl_lock
);
341 list_add_tail(&bli
->list
, &mgr
->backlog
);
343 spin_unlock_bh(&mgr
->bl_lock
);
344 tasklet_schedule(&mgr
->comptask
);
347 static void cc_proc_backlog(struct cc_drvdata
*drvdata
)
349 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
350 struct cc_bl_item
*bli
;
351 struct cc_crypto_req
*creq
;
352 struct crypto_async_request
*req
;
354 unsigned int total_len
;
355 struct device
*dev
= drvdata_to_dev(drvdata
);
358 spin_lock(&mgr
->bl_lock
);
360 while (mgr
->bl_len
) {
361 bli
= list_first_entry(&mgr
->backlog
, struct cc_bl_item
, list
);
362 spin_unlock(&mgr
->bl_lock
);
365 req
= (struct crypto_async_request
*)creq
->user_arg
;
368 * Notify the request we're moving out of the backlog
369 * but only if we haven't done so already.
372 req
->complete(req
, -EINPROGRESS
);
376 ivgen
= !!creq
->ivgen_dma_addr_len
;
377 total_len
= bli
->len
+ (ivgen
? CC_IVPOOL_SEQ_LEN
: 0);
379 spin_lock(&mgr
->hw_lock
);
381 rc
= cc_queues_status(drvdata
, mgr
, total_len
);
384 * There is still not room in the FIFO for
385 * this request. Bail out. We'll return here
386 * on the next completion irq.
388 spin_unlock(&mgr
->hw_lock
);
392 rc
= cc_do_send_request(drvdata
, &bli
->creq
, bli
->desc
,
393 bli
->len
, false, ivgen
);
395 spin_unlock(&mgr
->hw_lock
);
397 if (rc
!= -EINPROGRESS
) {
398 cc_pm_put_suspend(dev
);
399 creq
->user_cb(dev
, req
, rc
);
402 /* Remove ourselves from the backlog list */
403 spin_lock(&mgr
->bl_lock
);
404 list_del(&bli
->list
);
408 spin_unlock(&mgr
->bl_lock
);
411 int cc_send_request(struct cc_drvdata
*drvdata
, struct cc_crypto_req
*cc_req
,
412 struct cc_hw_desc
*desc
, unsigned int len
,
413 struct crypto_async_request
*req
)
416 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
417 bool ivgen
= !!cc_req
->ivgen_dma_addr_len
;
418 unsigned int total_len
= len
+ (ivgen
? CC_IVPOOL_SEQ_LEN
: 0);
419 struct device
*dev
= drvdata_to_dev(drvdata
);
420 bool backlog_ok
= req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
;
421 gfp_t flags
= cc_gfp_flags(req
);
422 struct cc_bl_item
*bli
;
426 dev_err(dev
, "ssi_power_mgr_runtime_get returned %x\n", rc
);
430 spin_lock_bh(&mgr
->hw_lock
);
431 rc
= cc_queues_status(drvdata
, mgr
, total_len
);
433 #ifdef CC_DEBUG_FORCE_BACKLOG
436 #endif /* CC_DEBUG_FORCE_BACKLOG */
438 if (rc
== -ENOSPC
&& backlog_ok
) {
439 spin_unlock_bh(&mgr
->hw_lock
);
441 bli
= kmalloc(sizeof(*bli
), flags
);
443 cc_pm_put_suspend(dev
);
447 memcpy(&bli
->creq
, cc_req
, sizeof(*cc_req
));
448 memcpy(&bli
->desc
, desc
, len
* sizeof(*desc
));
451 cc_enqueue_backlog(drvdata
, bli
);
456 rc
= cc_do_send_request(drvdata
, cc_req
, desc
, len
, false,
459 spin_unlock_bh(&mgr
->hw_lock
);
463 int cc_send_sync_request(struct cc_drvdata
*drvdata
,
464 struct cc_crypto_req
*cc_req
, struct cc_hw_desc
*desc
,
468 struct device
*dev
= drvdata_to_dev(drvdata
);
469 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
471 init_completion(&cc_req
->seq_compl
);
472 cc_req
->user_cb
= request_mgr_complete
;
473 cc_req
->user_arg
= &cc_req
->seq_compl
;
477 dev_err(dev
, "ssi_power_mgr_runtime_get returned %x\n", rc
);
482 spin_lock_bh(&mgr
->hw_lock
);
483 rc
= cc_queues_status(drvdata
, mgr
, len
+ 1);
488 spin_unlock_bh(&mgr
->hw_lock
);
490 cc_pm_put_suspend(dev
);
493 wait_for_completion_interruptible(&drvdata
->hw_queue_avail
);
494 reinit_completion(&drvdata
->hw_queue_avail
);
497 rc
= cc_do_send_request(drvdata
, cc_req
, desc
, len
, true, false);
498 spin_unlock_bh(&mgr
->hw_lock
);
500 if (rc
!= -EINPROGRESS
) {
501 cc_pm_put_suspend(dev
);
505 wait_for_completion(&cc_req
->seq_compl
);
510 * Enqueue caller request to crypto hardware during init process.
511 * assume this function is not called in middle of a flow,
512 * since we set QUEUE_LAST_IND flag in the last descriptor.
515 * \param desc The crypto sequence
516 * \param len The crypto sequence length
518 * \return int Returns "0" upon success
520 int send_request_init(struct cc_drvdata
*drvdata
, struct cc_hw_desc
*desc
,
523 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
524 unsigned int total_seq_len
= len
; /*initial sequence length*/
527 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
529 rc
= cc_queues_status(drvdata
, req_mgr_h
, total_seq_len
);
533 set_queue_last_ind(drvdata
, &desc
[(len
- 1)]);
536 * We are about to push command to the HW via the command registers
537 * that may refernece hsot memory. We need to issue a memory barrier
538 * to make sure there are no outstnading memory writes
541 enqueue_seq(drvdata
, desc
, len
);
543 /* Update the free slots in HW queue */
544 req_mgr_h
->q_free_slots
=
545 cc_ioread(drvdata
, CC_REG(DSCRPTR_QUEUE_CONTENT
));
550 void complete_request(struct cc_drvdata
*drvdata
)
552 struct cc_req_mgr_handle
*request_mgr_handle
=
553 drvdata
->request_mgr_handle
;
555 complete(&drvdata
->hw_queue_avail
);
557 queue_delayed_work(request_mgr_handle
->workq
,
558 &request_mgr_handle
->compwork
, 0);
560 tasklet_schedule(&request_mgr_handle
->comptask
);
565 static void comp_work_handler(struct work_struct
*work
)
567 struct cc_drvdata
*drvdata
=
568 container_of(work
, struct cc_drvdata
, compwork
.work
);
570 comp_handler((unsigned long)drvdata
);
574 static void proc_completions(struct cc_drvdata
*drvdata
)
576 struct cc_crypto_req
*cc_req
;
577 struct device
*dev
= drvdata_to_dev(drvdata
);
578 struct cc_req_mgr_handle
*request_mgr_handle
=
579 drvdata
->request_mgr_handle
;
580 unsigned int *tail
= &request_mgr_handle
->req_queue_tail
;
581 unsigned int *head
= &request_mgr_handle
->req_queue_head
;
583 while (request_mgr_handle
->axi_completed
) {
584 request_mgr_handle
->axi_completed
--;
586 /* Dequeue request */
587 if (*head
== *tail
) {
588 /* We are supposed to handle a completion but our
589 * queue is empty. This is not normal. Return and
592 dev_err(dev
, "Request queue is empty head == tail %u\n",
597 cc_req
= &request_mgr_handle
->req_queue
[*tail
];
600 cc_req
->user_cb(dev
, cc_req
->user_arg
, 0);
601 *tail
= (*tail
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
602 dev_dbg(dev
, "Dequeue request tail=%u\n", *tail
);
603 dev_dbg(dev
, "Request completed. axi_completed=%d\n",
604 request_mgr_handle
->axi_completed
);
605 cc_pm_put_suspend(dev
);
609 static inline u32
cc_axi_comp_count(struct cc_drvdata
*drvdata
)
611 return FIELD_GET(AXIM_MON_COMP_VALUE
,
612 cc_ioread(drvdata
, drvdata
->axim_mon_offset
));
615 /* Deferred service handler, run as interrupt-fired tasklet */
616 static void comp_handler(unsigned long devarg
)
618 struct cc_drvdata
*drvdata
= (struct cc_drvdata
*)devarg
;
619 struct cc_req_mgr_handle
*request_mgr_handle
=
620 drvdata
->request_mgr_handle
;
624 irq
= (drvdata
->irq
& CC_COMP_IRQ_MASK
);
626 if (irq
& CC_COMP_IRQ_MASK
) {
627 /* To avoid the interrupt from firing as we unmask it,
630 cc_iowrite(drvdata
, CC_REG(HOST_ICR
), CC_COMP_IRQ_MASK
);
632 /* Avoid race with above clear: Test completion counter
635 request_mgr_handle
->axi_completed
+=
636 cc_axi_comp_count(drvdata
);
638 while (request_mgr_handle
->axi_completed
) {
640 proc_completions(drvdata
);
641 /* At this point (after proc_completions()),
642 * request_mgr_handle->axi_completed is 0.
644 request_mgr_handle
->axi_completed
=
645 cc_axi_comp_count(drvdata
);
646 } while (request_mgr_handle
->axi_completed
> 0);
648 cc_iowrite(drvdata
, CC_REG(HOST_ICR
),
651 request_mgr_handle
->axi_completed
+=
652 cc_axi_comp_count(drvdata
);
655 /* after verifing that there is nothing to do,
656 * unmask AXI completion interrupt
658 cc_iowrite(drvdata
, CC_REG(HOST_IMR
),
659 cc_ioread(drvdata
, CC_REG(HOST_IMR
)) & ~irq
);
661 cc_proc_backlog(drvdata
);
665 * resume the queue configuration - no need to take the lock as this happens
666 * inside the spin lock protection
668 #if defined(CONFIG_PM)
669 int cc_resume_req_queue(struct cc_drvdata
*drvdata
)
671 struct cc_req_mgr_handle
*request_mgr_handle
=
672 drvdata
->request_mgr_handle
;
674 spin_lock_bh(&request_mgr_handle
->hw_lock
);
675 request_mgr_handle
->is_runtime_suspended
= false;
676 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
682 * suspend the queue configuration. Since it is used for the runtime suspend
683 * only verify that the queue can be suspended.
685 int cc_suspend_req_queue(struct cc_drvdata
*drvdata
)
687 struct cc_req_mgr_handle
*request_mgr_handle
=
688 drvdata
->request_mgr_handle
;
690 /* lock the send_request */
691 spin_lock_bh(&request_mgr_handle
->hw_lock
);
692 if (request_mgr_handle
->req_queue_head
!=
693 request_mgr_handle
->req_queue_tail
) {
694 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
697 request_mgr_handle
->is_runtime_suspended
= true;
698 spin_unlock_bh(&request_mgr_handle
->hw_lock
);
703 bool cc_req_queue_suspended(struct cc_drvdata
*drvdata
)
705 struct cc_req_mgr_handle
*request_mgr_handle
=
706 drvdata
->request_mgr_handle
;
708 return request_mgr_handle
->is_runtime_suspended
;