1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2014-2016 Freescale Semiconductor Inc.
7 #include <linux/types.h>
8 #include <linux/fsl/mc.h>
9 #include <soc/fsl/dpaa2-io.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
18 #include "qbman-portal.h"
21 struct dpaa2_io_desc dpio_desc
;
22 struct qbman_swp_desc swp_desc
;
23 struct qbman_swp
*swp
;
24 struct list_head node
;
25 /* protect against multiple management commands */
26 spinlock_t lock_mgmt_cmd
;
27 /* protect notifications list */
28 spinlock_t lock_notifications
;
29 struct list_head notifications
;
33 struct dpaa2_io_store
{
36 struct dpaa2_dq
*vaddr
;
37 void *alloced_addr
; /* unaligned value from kmalloc() */
38 unsigned int idx
; /* position of the next-to-be-returned entry */
39 struct qbman_swp
*swp
; /* portal used to issue VDQCR */
40 struct device
*dev
; /* device used for DMA mapping */
43 /* keep a per cpu array of DPIOs for fast access */
44 static struct dpaa2_io
*dpio_by_cpu
[NR_CPUS
];
45 static struct list_head dpio_list
= LIST_HEAD_INIT(dpio_list
);
46 static DEFINE_SPINLOCK(dpio_list_lock
);
48 static inline struct dpaa2_io
*service_select_by_cpu(struct dpaa2_io
*d
,
54 if (cpu
!= DPAA2_IO_ANY_CPU
&& cpu
>= num_possible_cpus())
58 * If cpu == -1, choose the current cpu, with no guarantees about
59 * potentially being migrated away.
61 if (unlikely(cpu
< 0))
62 cpu
= smp_processor_id();
64 /* If a specific cpu was requested, pick it up immediately */
65 return dpio_by_cpu
[cpu
];
68 static inline struct dpaa2_io
*service_select(struct dpaa2_io
*d
)
73 spin_lock(&dpio_list_lock
);
74 d
= list_entry(dpio_list
.next
, struct dpaa2_io
, node
);
76 list_add_tail(&d
->node
, &dpio_list
);
77 spin_unlock(&dpio_list_lock
);
83 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
86 * Return the affine dpaa2_io service, or NULL if there is no service affined
87 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
90 struct dpaa2_io
*dpaa2_io_service_select(int cpu
)
92 if (cpu
== DPAA2_IO_ANY_CPU
)
93 return service_select(NULL
);
95 return service_select_by_cpu(NULL
, cpu
);
97 EXPORT_SYMBOL_GPL(dpaa2_io_service_select
);
100 * dpaa2_io_create() - create a dpaa2_io object.
101 * @desc: the dpaa2_io descriptor
102 * @dev: the actual DPIO device
104 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
107 * Return a valid dpaa2_io object for success, or NULL for failure.
109 struct dpaa2_io
*dpaa2_io_create(const struct dpaa2_io_desc
*desc
,
112 struct dpaa2_io
*obj
= kmalloc(sizeof(*obj
), GFP_KERNEL
);
117 /* check if CPU is out of range (-1 means any cpu) */
118 if (desc
->cpu
!= DPAA2_IO_ANY_CPU
&& desc
->cpu
>= num_possible_cpus()) {
123 obj
->dpio_desc
= *desc
;
124 obj
->swp_desc
.cena_bar
= obj
->dpio_desc
.regs_cena
;
125 obj
->swp_desc
.cinh_bar
= obj
->dpio_desc
.regs_cinh
;
126 obj
->swp_desc
.qman_version
= obj
->dpio_desc
.qman_version
;
127 obj
->swp
= qbman_swp_init(&obj
->swp_desc
);
134 INIT_LIST_HEAD(&obj
->node
);
135 spin_lock_init(&obj
->lock_mgmt_cmd
);
136 spin_lock_init(&obj
->lock_notifications
);
137 INIT_LIST_HEAD(&obj
->notifications
);
139 /* For now only enable DQRR interrupts */
140 qbman_swp_interrupt_set_trigger(obj
->swp
,
141 QBMAN_SWP_INTERRUPT_DQRI
);
142 qbman_swp_interrupt_clear_status(obj
->swp
, 0xffffffff);
143 if (obj
->dpio_desc
.receives_notifications
)
144 qbman_swp_push_set(obj
->swp
, 0, 1);
146 spin_lock(&dpio_list_lock
);
147 list_add_tail(&obj
->node
, &dpio_list
);
148 if (desc
->cpu
>= 0 && !dpio_by_cpu
[desc
->cpu
])
149 dpio_by_cpu
[desc
->cpu
] = obj
;
150 spin_unlock(&dpio_list_lock
);
158 * dpaa2_io_down() - release the dpaa2_io object.
159 * @d: the dpaa2_io object to be released.
161 * The "struct dpaa2_io" type can represent an individual DPIO object (as
162 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
163 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
164 * each handle obtained should be released using this function.
166 void dpaa2_io_down(struct dpaa2_io
*d
)
168 spin_lock(&dpio_list_lock
);
169 dpio_by_cpu
[d
->dpio_desc
.cpu
] = NULL
;
171 spin_unlock(&dpio_list_lock
);
176 #define DPAA_POLL_MAX 32
179 * dpaa2_io_irq() - ISR for DPIO interrupts
181 * @obj: the given DPIO object.
183 * Return IRQ_HANDLED for success or IRQ_NONE if there
184 * were no pending interrupts.
186 irqreturn_t
dpaa2_io_irq(struct dpaa2_io
*obj
)
188 const struct dpaa2_dq
*dq
;
190 struct qbman_swp
*swp
;
194 status
= qbman_swp_interrupt_read_status(swp
);
198 dq
= qbman_swp_dqrr_next(swp
);
200 if (qbman_result_is_SCN(dq
)) {
201 struct dpaa2_io_notification_ctx
*ctx
;
204 q64
= qbman_result_SCN_ctx(dq
);
205 ctx
= (void *)(uintptr_t)q64
;
208 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
210 qbman_swp_dqrr_consume(swp
, dq
);
212 if (max
> DPAA_POLL_MAX
)
214 dq
= qbman_swp_dqrr_next(swp
);
217 qbman_swp_interrupt_clear_status(swp
, status
);
218 qbman_swp_interrupt_set_inhibit(swp
, 0);
223 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
225 * @d: the given DPIO object.
227 * Return the cpu associated with the DPIO object
229 int dpaa2_io_get_cpu(struct dpaa2_io
*d
)
231 return d
->dpio_desc
.cpu
;
233 EXPORT_SYMBOL(dpaa2_io_get_cpu
);
236 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
237 * notifications on the given DPIO service.
238 * @d: the given DPIO service.
239 * @ctx: the notification context.
240 * @dev: the device that requests the register
242 * The caller should make the MC command to attach a DPAA2 object to
243 * a DPIO after this function completes successfully. In that way:
244 * (a) The DPIO service is "ready" to handle a notification arrival
245 * (which might happen before the "attach" command to MC has
246 * returned control of execution back to the caller)
247 * (b) The DPIO service can provide back to the caller the 'dpio_id' and
248 * 'qman64' parameters that it should pass along in the MC command
249 * in order for the object to be configured to produce the right
250 * notification fields to the DPIO service.
252 * Return 0 for success, or -ENODEV for failure.
254 int dpaa2_io_service_register(struct dpaa2_io
*d
,
255 struct dpaa2_io_notification_ctx
*ctx
,
258 struct device_link
*link
;
259 unsigned long irqflags
;
261 d
= service_select_by_cpu(d
, ctx
->desired_cpu
);
265 link
= device_link_add(dev
, d
->dev
, DL_FLAG_AUTOREMOVE_CONSUMER
);
269 ctx
->dpio_id
= d
->dpio_desc
.dpio_id
;
270 ctx
->qman64
= (u64
)(uintptr_t)ctx
;
271 ctx
->dpio_private
= d
;
272 spin_lock_irqsave(&d
->lock_notifications
, irqflags
);
273 list_add(&ctx
->node
, &d
->notifications
);
274 spin_unlock_irqrestore(&d
->lock_notifications
, irqflags
);
276 /* Enable the generation of CDAN notifications */
278 return qbman_swp_CDAN_set_context_enable(d
->swp
,
283 EXPORT_SYMBOL_GPL(dpaa2_io_service_register
);
286 * dpaa2_io_service_deregister - The opposite of 'register'.
287 * @service: the given DPIO service.
288 * @ctx: the notification context.
289 * @dev: the device that requests to be deregistered
291 * This function should be called only after sending the MC command to
292 * to detach the notification-producing device from the DPIO.
294 void dpaa2_io_service_deregister(struct dpaa2_io
*service
,
295 struct dpaa2_io_notification_ctx
*ctx
,
298 struct dpaa2_io
*d
= ctx
->dpio_private
;
299 unsigned long irqflags
;
302 qbman_swp_CDAN_disable(d
->swp
, (u16
)ctx
->id
);
304 spin_lock_irqsave(&d
->lock_notifications
, irqflags
);
305 list_del(&ctx
->node
);
306 spin_unlock_irqrestore(&d
->lock_notifications
, irqflags
);
309 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister
);
312 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
313 * @d: the given DPIO service.
314 * @ctx: the notification context.
316 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
317 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
318 * traffic source for as long as it likes. Eventually it may wish to "rearm"
319 * that source to allow it to produce another FQDAN/CDAN, that's what this
322 * Return 0 for success.
324 int dpaa2_io_service_rearm(struct dpaa2_io
*d
,
325 struct dpaa2_io_notification_ctx
*ctx
)
327 unsigned long irqflags
;
330 d
= service_select_by_cpu(d
, ctx
->desired_cpu
);
334 spin_lock_irqsave(&d
->lock_mgmt_cmd
, irqflags
);
336 err
= qbman_swp_CDAN_enable(d
->swp
, (u16
)ctx
->id
);
338 err
= qbman_swp_fq_schedule(d
->swp
, ctx
->id
);
339 spin_unlock_irqrestore(&d
->lock_mgmt_cmd
, irqflags
);
343 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm
);
346 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
347 * @d: the given DPIO service.
348 * @fqid: the given frame queue id.
349 * @s: the dpaa2_io_store object for the result.
351 * Return 0 for success, or error code for failure.
353 int dpaa2_io_service_pull_fq(struct dpaa2_io
*d
, u32 fqid
,
354 struct dpaa2_io_store
*s
)
356 struct qbman_pull_desc pd
;
359 qbman_pull_desc_clear(&pd
);
360 qbman_pull_desc_set_storage(&pd
, s
->vaddr
, s
->paddr
, 1);
361 qbman_pull_desc_set_numframes(&pd
, (u8
)s
->max
);
362 qbman_pull_desc_set_fq(&pd
, fqid
);
364 d
= service_select(d
);
368 err
= qbman_swp_pull(d
->swp
, &pd
);
374 EXPORT_SYMBOL(dpaa2_io_service_pull_fq
);
377 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
378 * @d: the given DPIO service.
379 * @channelid: the given channel id.
380 * @s: the dpaa2_io_store object for the result.
382 * Return 0 for success, or error code for failure.
384 int dpaa2_io_service_pull_channel(struct dpaa2_io
*d
, u32 channelid
,
385 struct dpaa2_io_store
*s
)
387 struct qbman_pull_desc pd
;
390 qbman_pull_desc_clear(&pd
);
391 qbman_pull_desc_set_storage(&pd
, s
->vaddr
, s
->paddr
, 1);
392 qbman_pull_desc_set_numframes(&pd
, (u8
)s
->max
);
393 qbman_pull_desc_set_channel(&pd
, channelid
, qbman_pull_type_prio
);
395 d
= service_select(d
);
400 err
= qbman_swp_pull(d
->swp
, &pd
);
406 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel
);
409 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
410 * @d: the given DPIO service.
411 * @fqid: the given frame queue id.
412 * @fd: the frame descriptor which is enqueued.
414 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
415 * or -ENODEV if there is no dpio service.
417 int dpaa2_io_service_enqueue_fq(struct dpaa2_io
*d
,
419 const struct dpaa2_fd
*fd
)
421 struct qbman_eq_desc ed
;
423 d
= service_select(d
);
427 qbman_eq_desc_clear(&ed
);
428 qbman_eq_desc_set_no_orp(&ed
, 0);
429 qbman_eq_desc_set_fq(&ed
, fqid
);
431 return qbman_swp_enqueue(d
->swp
, &ed
, fd
);
433 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq
);
436 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
437 * @d: the given DPIO service.
438 * @qdid: the given queuing destination id.
439 * @prio: the given queuing priority.
440 * @qdbin: the given queuing destination bin.
441 * @fd: the frame descriptor which is enqueued.
443 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
444 * or -ENODEV if there is no dpio service.
446 int dpaa2_io_service_enqueue_qd(struct dpaa2_io
*d
,
447 u32 qdid
, u8 prio
, u16 qdbin
,
448 const struct dpaa2_fd
*fd
)
450 struct qbman_eq_desc ed
;
452 d
= service_select(d
);
456 qbman_eq_desc_clear(&ed
);
457 qbman_eq_desc_set_no_orp(&ed
, 0);
458 qbman_eq_desc_set_qd(&ed
, qdid
, qdbin
, prio
);
460 return qbman_swp_enqueue(d
->swp
, &ed
, fd
);
462 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd
);
465 * dpaa2_io_service_release() - Release buffers to a buffer pool.
466 * @d: the given DPIO object.
467 * @bpid: the buffer pool id.
468 * @buffers: the buffers to be released.
469 * @num_buffers: the number of the buffers to be released.
471 * Return 0 for success, and negative error code for failure.
473 int dpaa2_io_service_release(struct dpaa2_io
*d
,
476 unsigned int num_buffers
)
478 struct qbman_release_desc rd
;
480 d
= service_select(d
);
484 qbman_release_desc_clear(&rd
);
485 qbman_release_desc_set_bpid(&rd
, bpid
);
487 return qbman_swp_release(d
->swp
, &rd
, buffers
, num_buffers
);
489 EXPORT_SYMBOL_GPL(dpaa2_io_service_release
);
492 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
493 * @d: the given DPIO object.
494 * @bpid: the buffer pool id.
495 * @buffers: the buffer addresses for acquired buffers.
496 * @num_buffers: the expected number of the buffers to acquire.
498 * Return a negative error code if the command failed, otherwise it returns
499 * the number of buffers acquired, which may be less than the number requested.
500 * Eg. if the buffer pool is empty, this will return zero.
502 int dpaa2_io_service_acquire(struct dpaa2_io
*d
,
505 unsigned int num_buffers
)
507 unsigned long irqflags
;
510 d
= service_select(d
);
514 spin_lock_irqsave(&d
->lock_mgmt_cmd
, irqflags
);
515 err
= qbman_swp_acquire(d
->swp
, bpid
, buffers
, num_buffers
);
516 spin_unlock_irqrestore(&d
->lock_mgmt_cmd
, irqflags
);
520 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire
);
523 * 'Stores' are reusable memory blocks for holding dequeue results, and to
524 * assist with parsing those results.
528 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
529 * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
530 * @dev: the device to allow mapping/unmapping the DMAable region.
532 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
533 * The 'dpaa2_io_store' returned is a DPIO service managed object.
535 * Return pointer to dpaa2_io_store struct for successfully created storage
536 * memory, or NULL on error.
538 struct dpaa2_io_store
*dpaa2_io_store_create(unsigned int max_frames
,
541 struct dpaa2_io_store
*ret
;
544 if (!max_frames
|| (max_frames
> 16))
547 ret
= kmalloc(sizeof(*ret
), GFP_KERNEL
);
551 ret
->max
= max_frames
;
552 size
= max_frames
* sizeof(struct dpaa2_dq
) + 64;
553 ret
->alloced_addr
= kzalloc(size
, GFP_KERNEL
);
554 if (!ret
->alloced_addr
) {
559 ret
->vaddr
= PTR_ALIGN(ret
->alloced_addr
, 64);
560 ret
->paddr
= dma_map_single(dev
, ret
->vaddr
,
561 sizeof(struct dpaa2_dq
) * max_frames
,
563 if (dma_mapping_error(dev
, ret
->paddr
)) {
564 kfree(ret
->alloced_addr
);
574 EXPORT_SYMBOL_GPL(dpaa2_io_store_create
);
577 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
579 * @s: the storage memory to be destroyed.
581 void dpaa2_io_store_destroy(struct dpaa2_io_store
*s
)
583 dma_unmap_single(s
->dev
, s
->paddr
, sizeof(struct dpaa2_dq
) * s
->max
,
585 kfree(s
->alloced_addr
);
588 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy
);
591 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
592 * @s: the dpaa2_io_store object.
593 * @is_last: indicate whether this is the last frame in the pull command.
595 * When an object driver performs dequeues to a dpaa2_io_store, this function
596 * can be used to determine when the next frame result is available. Once
597 * this function returns non-NULL, a subsequent call to it will try to find
598 * the next dequeue result.
600 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
601 * was empty, then this function will also return NULL (rather than expecting
602 * the caller to always check for this. As such, "is_last" can be used to
603 * differentiate between "end-of-empty-dequeue" and "still-waiting".
605 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
607 struct dpaa2_dq
*dpaa2_io_store_next(struct dpaa2_io_store
*s
, int *is_last
)
610 struct dpaa2_dq
*ret
= &s
->vaddr
[s
->idx
];
612 match
= qbman_result_has_new_result(s
->swp
, ret
);
620 if (dpaa2_dq_is_pull_complete(ret
)) {
624 * If we get an empty dequeue result to terminate a zero-results
625 * vdqcr, return NULL to the caller rather than expecting him to
626 * check non-NULL results every time.
628 if (!(dpaa2_dq_flags(ret
) & DPAA2_DQ_STAT_VALIDFRAME
))
631 prefetch(&s
->vaddr
[s
->idx
]);
637 EXPORT_SYMBOL_GPL(dpaa2_io_store_next
);
640 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
641 * @d: the given DPIO object.
642 * @fqid: the id of frame queue to be queried.
643 * @fcnt: the queried frame count.
644 * @bcnt: the queried byte count.
646 * Knowing the FQ count at run-time can be useful in debugging situations.
647 * The instantaneous frame- and byte-count are hereby returned.
649 * Return 0 for a successful query, and negative error code if query fails.
651 int dpaa2_io_query_fq_count(struct dpaa2_io
*d
, u32 fqid
,
652 u32
*fcnt
, u32
*bcnt
)
654 struct qbman_fq_query_np_rslt state
;
655 struct qbman_swp
*swp
;
656 unsigned long irqflags
;
659 d
= service_select(d
);
664 spin_lock_irqsave(&d
->lock_mgmt_cmd
, irqflags
);
665 ret
= qbman_fq_query_state(swp
, fqid
, &state
);
666 spin_unlock_irqrestore(&d
->lock_mgmt_cmd
, irqflags
);
669 *fcnt
= qbman_fq_state_frame_count(&state
);
670 *bcnt
= qbman_fq_state_byte_count(&state
);
674 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count
);
677 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
679 * @d: the given DPIO object.
680 * @bpid: the index of buffer pool to be queried.
681 * @num: the queried number of buffers in the buffer pool.
683 * Return 0 for a successful query, and negative error code if query fails.
685 int dpaa2_io_query_bp_count(struct dpaa2_io
*d
, u16 bpid
, u32
*num
)
687 struct qbman_bp_query_rslt state
;
688 struct qbman_swp
*swp
;
689 unsigned long irqflags
;
692 d
= service_select(d
);
697 spin_lock_irqsave(&d
->lock_mgmt_cmd
, irqflags
);
698 ret
= qbman_bp_query(swp
, bpid
, &state
);
699 spin_unlock_irqrestore(&d
->lock_mgmt_cmd
, irqflags
);
702 *num
= qbman_bp_info_num_free_bufs(&state
);
705 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count
);