1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
6 #include "cc_buffer_mgr.h"
7 #include "cc_request_mgr.h"
11 #define CC_MAX_POLL_ITER 10
12 /* The highest descriptor count in used */
13 #define CC_MAX_DESC_SEQ_LEN 23
15 struct cc_req_mgr_handle
{
16 /* Request manager resources */
17 unsigned int hw_queue_size
; /* HW capability */
18 unsigned int min_free_hw_slots
;
19 unsigned int max_used_sw_slots
;
20 struct cc_crypto_req req_queue
[MAX_REQUEST_QUEUE_SIZE
];
25 /* This lock protects access to HW register
26 * that must be single request at a time
29 struct cc_hw_desc compl_desc
;
31 dma_addr_t dummy_comp_buff_dma
;
34 struct list_head backlog
;
36 spinlock_t bl_lock
; /* protect backlog queue */
39 struct workqueue_struct
*workq
;
40 struct delayed_work compwork
;
42 struct tasklet_struct comptask
;
47 struct cc_crypto_req creq
;
48 struct cc_hw_desc desc
[CC_MAX_DESC_SEQ_LEN
];
50 struct list_head list
;
54 static void comp_handler(unsigned long devarg
);
56 static void comp_work_handler(struct work_struct
*work
);
59 void cc_req_mgr_fini(struct cc_drvdata
*drvdata
)
61 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
62 struct device
*dev
= drvdata_to_dev(drvdata
);
65 return; /* Not allocated */
67 if (req_mgr_h
->dummy_comp_buff_dma
) {
68 dma_free_coherent(dev
, sizeof(u32
), req_mgr_h
->dummy_comp_buff
,
69 req_mgr_h
->dummy_comp_buff_dma
);
72 dev_dbg(dev
, "max_used_hw_slots=%d\n", (req_mgr_h
->hw_queue_size
-
73 req_mgr_h
->min_free_hw_slots
));
74 dev_dbg(dev
, "max_used_sw_slots=%d\n", req_mgr_h
->max_used_sw_slots
);
77 flush_workqueue(req_mgr_h
->workq
);
78 destroy_workqueue(req_mgr_h
->workq
);
81 tasklet_kill(&req_mgr_h
->comptask
);
84 drvdata
->request_mgr_handle
= NULL
;
87 int cc_req_mgr_init(struct cc_drvdata
*drvdata
)
89 struct cc_req_mgr_handle
*req_mgr_h
;
90 struct device
*dev
= drvdata_to_dev(drvdata
);
93 req_mgr_h
= kzalloc(sizeof(*req_mgr_h
), GFP_KERNEL
);
96 goto req_mgr_init_err
;
99 drvdata
->request_mgr_handle
= req_mgr_h
;
101 spin_lock_init(&req_mgr_h
->hw_lock
);
102 spin_lock_init(&req_mgr_h
->bl_lock
);
103 INIT_LIST_HEAD(&req_mgr_h
->backlog
);
106 dev_dbg(dev
, "Initializing completion workqueue\n");
107 req_mgr_h
->workq
= create_singlethread_workqueue("ccree");
108 if (!req_mgr_h
->workq
) {
109 dev_err(dev
, "Failed creating work queue\n");
111 goto req_mgr_init_err
;
113 INIT_DELAYED_WORK(&req_mgr_h
->compwork
, comp_work_handler
);
115 dev_dbg(dev
, "Initializing completion tasklet\n");
116 tasklet_init(&req_mgr_h
->comptask
, comp_handler
,
117 (unsigned long)drvdata
);
119 req_mgr_h
->hw_queue_size
= cc_ioread(drvdata
,
120 CC_REG(DSCRPTR_QUEUE_SRAM_SIZE
));
121 dev_dbg(dev
, "hw_queue_size=0x%08X\n", req_mgr_h
->hw_queue_size
);
122 if (req_mgr_h
->hw_queue_size
< MIN_HW_QUEUE_SIZE
) {
123 dev_err(dev
, "Invalid HW queue size = %u (Min. required is %u)\n",
124 req_mgr_h
->hw_queue_size
, MIN_HW_QUEUE_SIZE
);
126 goto req_mgr_init_err
;
128 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->hw_queue_size
;
129 req_mgr_h
->max_used_sw_slots
= 0;
131 /* Allocate DMA word for "dummy" completion descriptor use */
132 req_mgr_h
->dummy_comp_buff
=
133 dma_alloc_coherent(dev
, sizeof(u32
),
134 &req_mgr_h
->dummy_comp_buff_dma
,
136 if (!req_mgr_h
->dummy_comp_buff
) {
137 dev_err(dev
, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
140 goto req_mgr_init_err
;
143 /* Init. "dummy" completion descriptor */
144 hw_desc_init(&req_mgr_h
->compl_desc
);
145 set_din_const(&req_mgr_h
->compl_desc
, 0, sizeof(u32
));
146 set_dout_dlli(&req_mgr_h
->compl_desc
, req_mgr_h
->dummy_comp_buff_dma
,
147 sizeof(u32
), NS_BIT
, 1);
148 set_flow_mode(&req_mgr_h
->compl_desc
, BYPASS
);
149 set_queue_last_ind(drvdata
, &req_mgr_h
->compl_desc
);
154 cc_req_mgr_fini(drvdata
);
158 static void enqueue_seq(struct cc_drvdata
*drvdata
, struct cc_hw_desc seq
[],
159 unsigned int seq_len
)
162 void __iomem
*reg
= drvdata
->cc_base
+ CC_REG(DSCRPTR_QUEUE_WORD0
);
163 struct device
*dev
= drvdata_to_dev(drvdata
);
166 * We do indeed write all 6 command words to the same
167 * register. The HW supports this.
170 for (i
= 0; i
< seq_len
; i
++) {
171 for (w
= 0; w
<= 5; w
++)
172 writel_relaxed(seq
[i
].word
[w
], reg
);
175 dev_dbg(dev
, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
176 i
, seq
[i
].word
[0], seq
[i
].word
[1],
177 seq
[i
].word
[2], seq
[i
].word
[3],
178 seq
[i
].word
[4], seq
[i
].word
[5]);
183 * Completion will take place if and only if user requested completion
184 * by cc_send_sync_request().
187 * \param dx_compl_h The completion event to signal
189 static void request_mgr_complete(struct device
*dev
, void *dx_compl_h
,
192 struct completion
*this_compl
= dx_compl_h
;
194 complete(this_compl
);
197 static int cc_queues_status(struct cc_drvdata
*drvdata
,
198 struct cc_req_mgr_handle
*req_mgr_h
,
199 unsigned int total_seq_len
)
201 unsigned long poll_queue
;
202 struct device
*dev
= drvdata_to_dev(drvdata
);
204 /* SW queue is checked only once as it will not
205 * be chaned during the poll because the spinlock_bh
206 * is held by the thread
208 if (((req_mgr_h
->req_queue_head
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1)) ==
209 req_mgr_h
->req_queue_tail
) {
210 dev_err(dev
, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
211 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
);
215 if (req_mgr_h
->q_free_slots
>= total_seq_len
)
218 /* Wait for space in HW queue. Poll constant num of iterations. */
219 for (poll_queue
= 0; poll_queue
< CC_MAX_POLL_ITER
; poll_queue
++) {
220 req_mgr_h
->q_free_slots
=
221 cc_ioread(drvdata
, CC_REG(DSCRPTR_QUEUE_CONTENT
));
222 if (req_mgr_h
->q_free_slots
< req_mgr_h
->min_free_hw_slots
)
223 req_mgr_h
->min_free_hw_slots
= req_mgr_h
->q_free_slots
;
225 if (req_mgr_h
->q_free_slots
>= total_seq_len
) {
226 /* If there is enough place return */
230 dev_dbg(dev
, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
231 req_mgr_h
->q_free_slots
, total_seq_len
);
233 /* No room in the HW queue try again later */
234 dev_dbg(dev
, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
235 req_mgr_h
->req_queue_head
, MAX_REQUEST_QUEUE_SIZE
,
236 req_mgr_h
->q_free_slots
, total_seq_len
);
241 * Enqueue caller request to crypto hardware.
242 * Need to be called with HW lock held and PM running
245 * \param cc_req The request to enqueue
246 * \param desc The crypto sequence
247 * \param len The crypto sequence length
248 * \param add_comp If "true": add an artificial dout DMA to mark completion
250 * \return int Returns -EINPROGRESS or error code
252 static int cc_do_send_request(struct cc_drvdata
*drvdata
,
253 struct cc_crypto_req
*cc_req
,
254 struct cc_hw_desc
*desc
, unsigned int len
,
255 bool add_comp
, bool ivgen
)
257 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
258 unsigned int used_sw_slots
;
259 unsigned int iv_seq_len
= 0;
260 unsigned int total_seq_len
= len
; /*initial sequence length*/
261 struct cc_hw_desc iv_seq
[CC_IVPOOL_SEQ_LEN
];
262 struct device
*dev
= drvdata_to_dev(drvdata
);
266 dev_dbg(dev
, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
267 cc_req
->ivgen_dma_addr_len
,
268 &cc_req
->ivgen_dma_addr
[0],
269 &cc_req
->ivgen_dma_addr
[1],
270 &cc_req
->ivgen_dma_addr
[2],
273 /* Acquire IV from pool */
274 rc
= cc_get_iv(drvdata
, cc_req
->ivgen_dma_addr
,
275 cc_req
->ivgen_dma_addr_len
,
276 cc_req
->ivgen_size
, iv_seq
, &iv_seq_len
);
279 dev_err(dev
, "Failed to generate IV (rc=%d)\n", rc
);
283 total_seq_len
+= iv_seq_len
;
286 used_sw_slots
= ((req_mgr_h
->req_queue_head
-
287 req_mgr_h
->req_queue_tail
) &
288 (MAX_REQUEST_QUEUE_SIZE
- 1));
289 if (used_sw_slots
> req_mgr_h
->max_used_sw_slots
)
290 req_mgr_h
->max_used_sw_slots
= used_sw_slots
;
292 /* Enqueue request - must be locked with HW lock*/
293 req_mgr_h
->req_queue
[req_mgr_h
->req_queue_head
] = *cc_req
;
294 req_mgr_h
->req_queue_head
= (req_mgr_h
->req_queue_head
+ 1) &
295 (MAX_REQUEST_QUEUE_SIZE
- 1);
296 /* TODO: Use circ_buf.h ? */
298 dev_dbg(dev
, "Enqueue request head=%u\n", req_mgr_h
->req_queue_head
);
301 * We are about to push command to the HW via the command registers
302 * that may refernece hsot memory. We need to issue a memory barrier
303 * to make sure there are no outstnading memory writes
307 /* STAT_PHASE_4: Push sequence */
309 enqueue_seq(drvdata
, iv_seq
, iv_seq_len
);
311 enqueue_seq(drvdata
, desc
, len
);
314 enqueue_seq(drvdata
, &req_mgr_h
->compl_desc
, 1);
318 if (req_mgr_h
->q_free_slots
< total_seq_len
) {
319 /* This situation should never occur. Maybe indicating problem
320 * with resuming power. Set the free slot count to 0 and hope
323 dev_err(dev
, "HW free slot count mismatch.");
324 req_mgr_h
->q_free_slots
= 0;
326 /* Update the free slots in HW queue */
327 req_mgr_h
->q_free_slots
-= total_seq_len
;
330 /* Operation still in process */
334 static void cc_enqueue_backlog(struct cc_drvdata
*drvdata
,
335 struct cc_bl_item
*bli
)
337 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
339 spin_lock_bh(&mgr
->bl_lock
);
340 list_add_tail(&bli
->list
, &mgr
->backlog
);
342 spin_unlock_bh(&mgr
->bl_lock
);
343 tasklet_schedule(&mgr
->comptask
);
346 static void cc_proc_backlog(struct cc_drvdata
*drvdata
)
348 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
349 struct cc_bl_item
*bli
;
350 struct cc_crypto_req
*creq
;
351 struct crypto_async_request
*req
;
353 unsigned int total_len
;
354 struct device
*dev
= drvdata_to_dev(drvdata
);
357 spin_lock(&mgr
->bl_lock
);
359 while (mgr
->bl_len
) {
360 bli
= list_first_entry(&mgr
->backlog
, struct cc_bl_item
, list
);
361 spin_unlock(&mgr
->bl_lock
);
364 req
= (struct crypto_async_request
*)creq
->user_arg
;
367 * Notify the request we're moving out of the backlog
368 * but only if we haven't done so already.
371 req
->complete(req
, -EINPROGRESS
);
375 ivgen
= !!creq
->ivgen_dma_addr_len
;
376 total_len
= bli
->len
+ (ivgen
? CC_IVPOOL_SEQ_LEN
: 0);
378 spin_lock(&mgr
->hw_lock
);
380 rc
= cc_queues_status(drvdata
, mgr
, total_len
);
383 * There is still not room in the FIFO for
384 * this request. Bail out. We'll return here
385 * on the next completion irq.
387 spin_unlock(&mgr
->hw_lock
);
391 rc
= cc_do_send_request(drvdata
, &bli
->creq
, bli
->desc
,
392 bli
->len
, false, ivgen
);
394 spin_unlock(&mgr
->hw_lock
);
396 if (rc
!= -EINPROGRESS
) {
397 cc_pm_put_suspend(dev
);
398 creq
->user_cb(dev
, req
, rc
);
401 /* Remove ourselves from the backlog list */
402 spin_lock(&mgr
->bl_lock
);
403 list_del(&bli
->list
);
408 spin_unlock(&mgr
->bl_lock
);
411 int cc_send_request(struct cc_drvdata
*drvdata
, struct cc_crypto_req
*cc_req
,
412 struct cc_hw_desc
*desc
, unsigned int len
,
413 struct crypto_async_request
*req
)
416 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
417 bool ivgen
= !!cc_req
->ivgen_dma_addr_len
;
418 unsigned int total_len
= len
+ (ivgen
? CC_IVPOOL_SEQ_LEN
: 0);
419 struct device
*dev
= drvdata_to_dev(drvdata
);
420 bool backlog_ok
= req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
;
421 gfp_t flags
= cc_gfp_flags(req
);
422 struct cc_bl_item
*bli
;
426 dev_err(dev
, "ssi_power_mgr_runtime_get returned %x\n", rc
);
430 spin_lock_bh(&mgr
->hw_lock
);
431 rc
= cc_queues_status(drvdata
, mgr
, total_len
);
433 #ifdef CC_DEBUG_FORCE_BACKLOG
436 #endif /* CC_DEBUG_FORCE_BACKLOG */
438 if (rc
== -ENOSPC
&& backlog_ok
) {
439 spin_unlock_bh(&mgr
->hw_lock
);
441 bli
= kmalloc(sizeof(*bli
), flags
);
443 cc_pm_put_suspend(dev
);
447 memcpy(&bli
->creq
, cc_req
, sizeof(*cc_req
));
448 memcpy(&bli
->desc
, desc
, len
* sizeof(*desc
));
451 cc_enqueue_backlog(drvdata
, bli
);
456 rc
= cc_do_send_request(drvdata
, cc_req
, desc
, len
, false,
459 spin_unlock_bh(&mgr
->hw_lock
);
463 int cc_send_sync_request(struct cc_drvdata
*drvdata
,
464 struct cc_crypto_req
*cc_req
, struct cc_hw_desc
*desc
,
468 struct device
*dev
= drvdata_to_dev(drvdata
);
469 struct cc_req_mgr_handle
*mgr
= drvdata
->request_mgr_handle
;
471 init_completion(&cc_req
->seq_compl
);
472 cc_req
->user_cb
= request_mgr_complete
;
473 cc_req
->user_arg
= &cc_req
->seq_compl
;
477 dev_err(dev
, "ssi_power_mgr_runtime_get returned %x\n", rc
);
482 spin_lock_bh(&mgr
->hw_lock
);
483 rc
= cc_queues_status(drvdata
, mgr
, len
+ 1);
488 spin_unlock_bh(&mgr
->hw_lock
);
490 cc_pm_put_suspend(dev
);
493 wait_for_completion_interruptible(&drvdata
->hw_queue_avail
);
494 reinit_completion(&drvdata
->hw_queue_avail
);
497 rc
= cc_do_send_request(drvdata
, cc_req
, desc
, len
, true, false);
498 spin_unlock_bh(&mgr
->hw_lock
);
500 if (rc
!= -EINPROGRESS
) {
501 cc_pm_put_suspend(dev
);
505 wait_for_completion(&cc_req
->seq_compl
);
510 * Enqueue caller request to crypto hardware during init process.
511 * assume this function is not called in middle of a flow,
512 * since we set QUEUE_LAST_IND flag in the last descriptor.
515 * \param desc The crypto sequence
516 * \param len The crypto sequence length
518 * \return int Returns "0" upon success
520 int send_request_init(struct cc_drvdata
*drvdata
, struct cc_hw_desc
*desc
,
523 struct cc_req_mgr_handle
*req_mgr_h
= drvdata
->request_mgr_handle
;
524 unsigned int total_seq_len
= len
; /*initial sequence length*/
527 /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
529 rc
= cc_queues_status(drvdata
, req_mgr_h
, total_seq_len
);
533 set_queue_last_ind(drvdata
, &desc
[(len
- 1)]);
536 * We are about to push command to the HW via the command registers
537 * that may refernece hsot memory. We need to issue a memory barrier
538 * to make sure there are no outstnading memory writes
541 enqueue_seq(drvdata
, desc
, len
);
543 /* Update the free slots in HW queue */
544 req_mgr_h
->q_free_slots
=
545 cc_ioread(drvdata
, CC_REG(DSCRPTR_QUEUE_CONTENT
));
550 void complete_request(struct cc_drvdata
*drvdata
)
552 struct cc_req_mgr_handle
*request_mgr_handle
=
553 drvdata
->request_mgr_handle
;
555 complete(&drvdata
->hw_queue_avail
);
557 queue_delayed_work(request_mgr_handle
->workq
,
558 &request_mgr_handle
->compwork
, 0);
560 tasklet_schedule(&request_mgr_handle
->comptask
);
565 static void comp_work_handler(struct work_struct
*work
)
567 struct cc_drvdata
*drvdata
=
568 container_of(work
, struct cc_drvdata
, compwork
.work
);
570 comp_handler((unsigned long)drvdata
);
574 static void proc_completions(struct cc_drvdata
*drvdata
)
576 struct cc_crypto_req
*cc_req
;
577 struct device
*dev
= drvdata_to_dev(drvdata
);
578 struct cc_req_mgr_handle
*request_mgr_handle
=
579 drvdata
->request_mgr_handle
;
580 unsigned int *tail
= &request_mgr_handle
->req_queue_tail
;
581 unsigned int *head
= &request_mgr_handle
->req_queue_head
;
583 while (request_mgr_handle
->axi_completed
) {
584 request_mgr_handle
->axi_completed
--;
586 /* Dequeue request */
587 if (*head
== *tail
) {
588 /* We are supposed to handle a completion but our
589 * queue is empty. This is not normal. Return and
592 dev_err(dev
, "Request queue is empty head == tail %u\n",
597 cc_req
= &request_mgr_handle
->req_queue
[*tail
];
600 cc_req
->user_cb(dev
, cc_req
->user_arg
, 0);
601 *tail
= (*tail
+ 1) & (MAX_REQUEST_QUEUE_SIZE
- 1);
602 dev_dbg(dev
, "Dequeue request tail=%u\n", *tail
);
603 dev_dbg(dev
, "Request completed. axi_completed=%d\n",
604 request_mgr_handle
->axi_completed
);
605 cc_pm_put_suspend(dev
);
609 static inline u32
cc_axi_comp_count(struct cc_drvdata
*drvdata
)
611 return FIELD_GET(AXIM_MON_COMP_VALUE
,
612 cc_ioread(drvdata
, drvdata
->axim_mon_offset
));
615 /* Deferred service handler, run as interrupt-fired tasklet */
616 static void comp_handler(unsigned long devarg
)
618 struct cc_drvdata
*drvdata
= (struct cc_drvdata
*)devarg
;
619 struct cc_req_mgr_handle
*request_mgr_handle
=
620 drvdata
->request_mgr_handle
;
624 irq
= (drvdata
->irq
& CC_COMP_IRQ_MASK
);
626 if (irq
& CC_COMP_IRQ_MASK
) {
627 /* To avoid the interrupt from firing as we unmask it,
630 cc_iowrite(drvdata
, CC_REG(HOST_ICR
), CC_COMP_IRQ_MASK
);
632 /* Avoid race with above clear: Test completion counter
635 request_mgr_handle
->axi_completed
+=
636 cc_axi_comp_count(drvdata
);
638 while (request_mgr_handle
->axi_completed
) {
640 proc_completions(drvdata
);
641 /* At this point (after proc_completions()),
642 * request_mgr_handle->axi_completed is 0.
644 request_mgr_handle
->axi_completed
=
645 cc_axi_comp_count(drvdata
);
646 } while (request_mgr_handle
->axi_completed
> 0);
648 cc_iowrite(drvdata
, CC_REG(HOST_ICR
),
651 request_mgr_handle
->axi_completed
+=
652 cc_axi_comp_count(drvdata
);
655 /* after verifing that there is nothing to do,
656 * unmask AXI completion interrupt
658 cc_iowrite(drvdata
, CC_REG(HOST_IMR
),
659 cc_ioread(drvdata
, CC_REG(HOST_IMR
)) & ~irq
);
661 cc_proc_backlog(drvdata
);