2 * CAAM/SEC 4.x transport/backend driver
3 * JobR backend functionality
5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
8 #include <linux/of_irq.h>
9 #include <linux/of_address.h>
18 struct jr_driver_data
{
19 /* List of Physical JobR's with the Driver */
20 struct list_head jr_list
;
21 spinlock_t jr_alloc_lock
; /* jr_list lock */
22 } ____cacheline_aligned
;
24 static struct jr_driver_data driver_data
;
26 static int caam_reset_hw_jr(struct device
*dev
)
28 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
29 unsigned int timeout
= 100000;
32 * mask interrupts since we are going to poll
33 * for reset completion status
35 clrsetbits_32(&jrp
->rregs
->rconfig_lo
, 0, JRCFG_IMSK
);
37 /* initiate flush (required prior to reset) */
38 wr_reg32(&jrp
->rregs
->jrcommand
, JRCR_RESET
);
39 while (((rd_reg32(&jrp
->rregs
->jrintstatus
) & JRINT_ERR_HALT_MASK
) ==
40 JRINT_ERR_HALT_INPROGRESS
) && --timeout
)
43 if ((rd_reg32(&jrp
->rregs
->jrintstatus
) & JRINT_ERR_HALT_MASK
) !=
44 JRINT_ERR_HALT_COMPLETE
|| timeout
== 0) {
45 dev_err(dev
, "failed to flush job ring %d\n", jrp
->ridx
);
51 wr_reg32(&jrp
->rregs
->jrcommand
, JRCR_RESET
);
52 while ((rd_reg32(&jrp
->rregs
->jrcommand
) & JRCR_RESET
) && --timeout
)
56 dev_err(dev
, "failed to reset job ring %d\n", jrp
->ridx
);
60 /* unmask interrupts */
61 clrsetbits_32(&jrp
->rregs
->rconfig_lo
, JRCFG_IMSK
, 0);
67 * Shutdown JobR independent of platform property code
69 static int caam_jr_shutdown(struct device
*dev
)
71 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
72 dma_addr_t inpbusaddr
, outbusaddr
;
75 ret
= caam_reset_hw_jr(dev
);
77 tasklet_kill(&jrp
->irqtask
);
79 /* Release interrupt */
80 free_irq(jrp
->irq
, dev
);
83 inpbusaddr
= rd_reg64(&jrp
->rregs
->inpring_base
);
84 outbusaddr
= rd_reg64(&jrp
->rregs
->outring_base
);
85 dma_free_coherent(dev
, sizeof(dma_addr_t
) * JOBR_DEPTH
,
86 jrp
->inpring
, inpbusaddr
);
87 dma_free_coherent(dev
, sizeof(struct jr_outentry
) * JOBR_DEPTH
,
88 jrp
->outring
, outbusaddr
);
94 static int caam_jr_remove(struct platform_device
*pdev
)
98 struct caam_drv_private_jr
*jrpriv
;
101 jrpriv
= dev_get_drvdata(jrdev
);
104 * Return EBUSY if job ring already allocated.
106 if (atomic_read(&jrpriv
->tfm_count
)) {
107 dev_err(jrdev
, "Device is busy\n");
111 /* Remove the node from Physical JobR list maintained by driver */
112 spin_lock(&driver_data
.jr_alloc_lock
);
113 list_del(&jrpriv
->list_node
);
114 spin_unlock(&driver_data
.jr_alloc_lock
);
117 ret
= caam_jr_shutdown(jrdev
);
119 dev_err(jrdev
, "Failed to shut down job ring\n");
120 irq_dispose_mapping(jrpriv
->irq
);
125 /* Main per-ring interrupt handler */
126 static irqreturn_t
caam_jr_interrupt(int irq
, void *st_dev
)
128 struct device
*dev
= st_dev
;
129 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
133 * Check the output ring for ready responses, kick
134 * tasklet if jobs done.
136 irqstate
= rd_reg32(&jrp
->rregs
->jrintstatus
);
141 * If JobR error, we got more development work to do
142 * Flag a bug now, but we really need to shut down and
143 * restart the queue (and fix code).
145 if (irqstate
& JRINT_JR_ERROR
) {
146 dev_err(dev
, "job ring error: irqstate: %08x\n", irqstate
);
150 /* mask valid interrupts */
151 clrsetbits_32(&jrp
->rregs
->rconfig_lo
, 0, JRCFG_IMSK
);
153 /* Have valid interrupt at this point, just ACK and trigger */
154 wr_reg32(&jrp
->rregs
->jrintstatus
, irqstate
);
157 tasklet_schedule(&jrp
->irqtask
);
163 /* Deferred service handler, run as interrupt-fired tasklet */
164 static void caam_jr_dequeue(unsigned long devarg
)
166 int hw_idx
, sw_idx
, i
, head
, tail
;
167 struct device
*dev
= (struct device
*)devarg
;
168 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
169 void (*usercall
)(struct device
*dev
, u32
*desc
, u32 status
, void *arg
);
170 u32
*userdesc
, userstatus
;
173 while (rd_reg32(&jrp
->rregs
->outring_used
)) {
175 head
= READ_ONCE(jrp
->head
);
177 spin_lock(&jrp
->outlock
);
179 sw_idx
= tail
= jrp
->tail
;
180 hw_idx
= jrp
->out_ring_read_index
;
182 for (i
= 0; CIRC_CNT(head
, tail
+ i
, JOBR_DEPTH
) >= 1; i
++) {
183 sw_idx
= (tail
+ i
) & (JOBR_DEPTH
- 1);
185 if (jrp
->outring
[hw_idx
].desc
==
186 caam_dma_to_cpu(jrp
->entinfo
[sw_idx
].desc_addr_dma
))
189 /* we should never fail to find a matching descriptor */
190 BUG_ON(CIRC_CNT(head
, tail
+ i
, JOBR_DEPTH
) <= 0);
192 /* Unmap just-run descriptor so we can post-process */
193 dma_unmap_single(dev
,
194 caam_dma_to_cpu(jrp
->outring
[hw_idx
].desc
),
195 jrp
->entinfo
[sw_idx
].desc_size
,
198 /* mark completed, avoid matching on a recycled desc addr */
199 jrp
->entinfo
[sw_idx
].desc_addr_dma
= 0;
201 /* Stash callback params for use outside of lock */
202 usercall
= jrp
->entinfo
[sw_idx
].callbk
;
203 userarg
= jrp
->entinfo
[sw_idx
].cbkarg
;
204 userdesc
= jrp
->entinfo
[sw_idx
].desc_addr_virt
;
205 userstatus
= caam32_to_cpu(jrp
->outring
[hw_idx
].jrstatus
);
208 * Make sure all information from the job has been obtained
209 * before telling CAAM that the job has been removed from the
215 wr_reg32(&jrp
->rregs
->outring_rmvd
, 1);
217 jrp
->out_ring_read_index
= (jrp
->out_ring_read_index
+ 1) &
221 * if this job completed out-of-order, do not increment
222 * the tail. Otherwise, increment tail by 1 plus the
223 * number of subsequent jobs already completed out-of-order
225 if (sw_idx
== tail
) {
227 tail
= (tail
+ 1) & (JOBR_DEPTH
- 1);
228 } while (CIRC_CNT(head
, tail
, JOBR_DEPTH
) >= 1 &&
229 jrp
->entinfo
[tail
].desc_addr_dma
== 0);
234 spin_unlock(&jrp
->outlock
);
236 /* Finally, execute user's callback */
237 usercall(dev
, userdesc
, userstatus
, userarg
);
240 /* reenable / unmask IRQs */
241 clrsetbits_32(&jrp
->rregs
->rconfig_lo
, JRCFG_IMSK
, 0);
245 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
247 * returns : pointer to the newly allocated physical
248 * JobR dev can be written to if successful.
250 struct device
*caam_jr_alloc(void)
252 struct caam_drv_private_jr
*jrpriv
, *min_jrpriv
= NULL
;
253 struct device
*dev
= ERR_PTR(-ENODEV
);
254 int min_tfm_cnt
= INT_MAX
;
257 spin_lock(&driver_data
.jr_alloc_lock
);
259 if (list_empty(&driver_data
.jr_list
)) {
260 spin_unlock(&driver_data
.jr_alloc_lock
);
261 return ERR_PTR(-ENODEV
);
264 list_for_each_entry(jrpriv
, &driver_data
.jr_list
, list_node
) {
265 tfm_cnt
= atomic_read(&jrpriv
->tfm_count
);
266 if (tfm_cnt
< min_tfm_cnt
) {
267 min_tfm_cnt
= tfm_cnt
;
275 atomic_inc(&min_jrpriv
->tfm_count
);
276 dev
= min_jrpriv
->dev
;
278 spin_unlock(&driver_data
.jr_alloc_lock
);
282 EXPORT_SYMBOL(caam_jr_alloc
);
285 * caam_jr_free() - Free the Job Ring
286 * @rdev - points to the dev that identifies the Job ring to
289 void caam_jr_free(struct device
*rdev
)
291 struct caam_drv_private_jr
*jrpriv
= dev_get_drvdata(rdev
);
293 atomic_dec(&jrpriv
->tfm_count
);
295 EXPORT_SYMBOL(caam_jr_free
);
298 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
299 * -EBUSY if the queue is full, -EIO if it cannot map the caller's
301 * @dev: device of the job ring to be used. This device should have
302 * been assigned prior by caam_jr_register().
303 * @desc: points to a job descriptor that execute our request. All
304 * descriptors (and all referenced data) must be in a DMAable
305 * region, and all data references must be physical addresses
306 * accessible to CAAM (i.e. within a PAMU window granted
308 * @cbk: pointer to a callback function to be invoked upon completion
309 * of this request. This has the form:
310 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
312 * @dev: contains the job ring device that processed this
314 * @desc: descriptor that initiated the request, same as
315 * "desc" being argued to caam_jr_enqueue().
316 * @status: untranslated status received from CAAM. See the
317 * reference manual for a detailed description of
318 * error meaning, or see the JRSTA definitions in the
319 * register header file
320 * @areq: optional pointer to an argument passed with the
322 * @areq: optional pointer to a user argument for use at callback
325 int caam_jr_enqueue(struct device
*dev
, u32
*desc
,
326 void (*cbk
)(struct device
*dev
, u32
*desc
,
327 u32 status
, void *areq
),
330 struct caam_drv_private_jr
*jrp
= dev_get_drvdata(dev
);
331 struct caam_jrentry_info
*head_entry
;
332 int head
, tail
, desc_size
;
335 desc_size
= (caam32_to_cpu(*desc
) & HDR_JD_LENGTH_MASK
) * sizeof(u32
);
336 desc_dma
= dma_map_single(dev
, desc
, desc_size
, DMA_TO_DEVICE
);
337 if (dma_mapping_error(dev
, desc_dma
)) {
338 dev_err(dev
, "caam_jr_enqueue(): can't map jobdesc\n");
342 spin_lock_bh(&jrp
->inplock
);
345 tail
= READ_ONCE(jrp
->tail
);
347 if (!rd_reg32(&jrp
->rregs
->inpring_avail
) ||
348 CIRC_SPACE(head
, tail
, JOBR_DEPTH
) <= 0) {
349 spin_unlock_bh(&jrp
->inplock
);
350 dma_unmap_single(dev
, desc_dma
, desc_size
, DMA_TO_DEVICE
);
354 head_entry
= &jrp
->entinfo
[head
];
355 head_entry
->desc_addr_virt
= desc
;
356 head_entry
->desc_size
= desc_size
;
357 head_entry
->callbk
= (void *)cbk
;
358 head_entry
->cbkarg
= areq
;
359 head_entry
->desc_addr_dma
= desc_dma
;
361 jrp
->inpring
[jrp
->inp_ring_write_index
] = cpu_to_caam_dma(desc_dma
);
364 * Guarantee that the descriptor's DMA address has been written to
365 * the next slot in the ring before the write index is updated, since
366 * other cores may update this index independently.
370 jrp
->inp_ring_write_index
= (jrp
->inp_ring_write_index
+ 1) &
372 jrp
->head
= (head
+ 1) & (JOBR_DEPTH
- 1);
375 * Ensure that all job information has been written before
376 * notifying CAAM that a new job was added to the input ring.
380 wr_reg32(&jrp
->rregs
->inpring_jobadd
, 1);
382 spin_unlock_bh(&jrp
->inplock
);
386 EXPORT_SYMBOL(caam_jr_enqueue
);
389 * Init JobR independent of platform property detection
391 static int caam_jr_init(struct device
*dev
)
393 struct caam_drv_private_jr
*jrp
;
394 dma_addr_t inpbusaddr
, outbusaddr
;
397 jrp
= dev_get_drvdata(dev
);
399 tasklet_init(&jrp
->irqtask
, caam_jr_dequeue
, (unsigned long)dev
);
401 /* Connect job ring interrupt handler. */
402 error
= request_irq(jrp
->irq
, caam_jr_interrupt
, IRQF_SHARED
,
405 dev_err(dev
, "can't connect JobR %d interrupt (%d)\n",
406 jrp
->ridx
, jrp
->irq
);
410 error
= caam_reset_hw_jr(dev
);
415 jrp
->inpring
= dma_alloc_coherent(dev
, sizeof(*jrp
->inpring
) *
416 JOBR_DEPTH
, &inpbusaddr
, GFP_KERNEL
);
420 jrp
->outring
= dma_alloc_coherent(dev
, sizeof(*jrp
->outring
) *
421 JOBR_DEPTH
, &outbusaddr
, GFP_KERNEL
);
423 goto out_free_inpring
;
425 jrp
->entinfo
= kcalloc(JOBR_DEPTH
, sizeof(*jrp
->entinfo
), GFP_KERNEL
);
427 goto out_free_outring
;
429 for (i
= 0; i
< JOBR_DEPTH
; i
++)
430 jrp
->entinfo
[i
].desc_addr_dma
= !0;
433 jrp
->inp_ring_write_index
= 0;
434 jrp
->out_ring_read_index
= 0;
438 wr_reg64(&jrp
->rregs
->inpring_base
, inpbusaddr
);
439 wr_reg64(&jrp
->rregs
->outring_base
, outbusaddr
);
440 wr_reg32(&jrp
->rregs
->inpring_size
, JOBR_DEPTH
);
441 wr_reg32(&jrp
->rregs
->outring_size
, JOBR_DEPTH
);
443 jrp
->ringsize
= JOBR_DEPTH
;
445 spin_lock_init(&jrp
->inplock
);
446 spin_lock_init(&jrp
->outlock
);
448 /* Select interrupt coalescing parameters */
449 clrsetbits_32(&jrp
->rregs
->rconfig_lo
, 0, JOBR_INTC
|
450 (JOBR_INTC_COUNT_THLD
<< JRCFG_ICDCT_SHIFT
) |
451 (JOBR_INTC_TIME_THLD
<< JRCFG_ICTT_SHIFT
));
456 dma_free_coherent(dev
, sizeof(struct jr_outentry
) * JOBR_DEPTH
,
457 jrp
->outring
, outbusaddr
);
459 dma_free_coherent(dev
, sizeof(dma_addr_t
) * JOBR_DEPTH
,
460 jrp
->inpring
, inpbusaddr
);
461 dev_err(dev
, "can't allocate job rings for %d\n", jrp
->ridx
);
463 free_irq(jrp
->irq
, dev
);
465 tasklet_kill(&jrp
->irqtask
);
471 * Probe routine for each detected JobR subsystem.
473 static int caam_jr_probe(struct platform_device
*pdev
)
475 struct device
*jrdev
;
476 struct device_node
*nprop
;
477 struct caam_job_ring __iomem
*ctrl
;
478 struct caam_drv_private_jr
*jrpriv
;
479 static int total_jobrs
;
483 jrpriv
= devm_kmalloc(jrdev
, sizeof(*jrpriv
), GFP_KERNEL
);
487 dev_set_drvdata(jrdev
, jrpriv
);
489 /* save ring identity relative to detection */
490 jrpriv
->ridx
= total_jobrs
++;
492 nprop
= pdev
->dev
.of_node
;
493 /* Get configuration properties from device tree */
494 /* First, get register page */
495 ctrl
= of_iomap(nprop
, 0);
497 dev_err(jrdev
, "of_iomap() failed\n");
501 jrpriv
->rregs
= (struct caam_job_ring __iomem __force
*)ctrl
;
503 if (sizeof(dma_addr_t
) == sizeof(u64
)) {
505 error
= dma_set_mask_and_coherent(jrdev
,
507 else if (of_device_is_compatible(nprop
,
508 "fsl,sec-v5.0-job-ring"))
509 error
= dma_set_mask_and_coherent(jrdev
,
512 error
= dma_set_mask_and_coherent(jrdev
,
515 error
= dma_set_mask_and_coherent(jrdev
, DMA_BIT_MASK(32));
518 dev_err(jrdev
, "dma_set_mask_and_coherent failed (%d)\n",
524 /* Identify the interrupt */
525 jrpriv
->irq
= irq_of_parse_and_map(nprop
, 0);
527 /* Now do the platform independent part */
528 error
= caam_jr_init(jrdev
); /* now turn on hardware */
530 irq_dispose_mapping(jrpriv
->irq
);
536 spin_lock(&driver_data
.jr_alloc_lock
);
537 list_add_tail(&jrpriv
->list_node
, &driver_data
.jr_list
);
538 spin_unlock(&driver_data
.jr_alloc_lock
);
540 atomic_set(&jrpriv
->tfm_count
, 0);
545 static const struct of_device_id caam_jr_match
[] = {
547 .compatible
= "fsl,sec-v4.0-job-ring",
550 .compatible
= "fsl,sec4.0-job-ring",
554 MODULE_DEVICE_TABLE(of
, caam_jr_match
);
556 static struct platform_driver caam_jr_driver
= {
559 .of_match_table
= caam_jr_match
,
561 .probe
= caam_jr_probe
,
562 .remove
= caam_jr_remove
,
565 static int __init
jr_driver_init(void)
567 spin_lock_init(&driver_data
.jr_alloc_lock
);
568 INIT_LIST_HEAD(&driver_data
.jr_list
);
569 return platform_driver_register(&caam_jr_driver
);
572 static void __exit
jr_driver_exit(void)
574 platform_driver_unregister(&caam_jr_driver
);
577 module_init(jr_driver_init
);
578 module_exit(jr_driver_exit
);
580 MODULE_LICENSE("GPL");
581 MODULE_DESCRIPTION("FSL CAAM JR request backend");
582 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");