WIP FPC-III support
[linux/fpc-iii.git] / drivers / soc / fsl / dpio / dpio-service.c
blob7351f303055063c7a93cd1e1f1e3fb85297c5818
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2014-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2019 NXP
6 */
7 #include <linux/types.h>
8 #include <linux/fsl/mc.h>
9 #include <soc/fsl/dpaa2-io.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
17 #include "dpio.h"
18 #include "qbman-portal.h"
20 struct dpaa2_io {
21 struct dpaa2_io_desc dpio_desc;
22 struct qbman_swp_desc swp_desc;
23 struct qbman_swp *swp;
24 struct list_head node;
25 /* protect against multiple management commands */
26 spinlock_t lock_mgmt_cmd;
27 /* protect notifications list */
28 spinlock_t lock_notifications;
29 struct list_head notifications;
30 struct device *dev;
33 struct dpaa2_io_store {
34 unsigned int max;
35 dma_addr_t paddr;
36 struct dpaa2_dq *vaddr;
37 void *alloced_addr; /* unaligned value from kmalloc() */
38 unsigned int idx; /* position of the next-to-be-returned entry */
39 struct qbman_swp *swp; /* portal used to issue VDQCR */
40 struct device *dev; /* device used for DMA mapping */
43 /* keep a per cpu array of DPIOs for fast access */
44 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
45 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
46 static DEFINE_SPINLOCK(dpio_list_lock);
48 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
49 int cpu)
51 if (d)
52 return d;
54 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
55 return NULL;
58 * If cpu == -1, choose the current cpu, with no guarantees about
59 * potentially being migrated away.
61 if (cpu < 0)
62 cpu = smp_processor_id();
64 /* If a specific cpu was requested, pick it up immediately */
65 return dpio_by_cpu[cpu];
68 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
70 if (d)
71 return d;
73 d = service_select_by_cpu(d, -1);
74 if (d)
75 return d;
77 spin_lock(&dpio_list_lock);
78 d = list_entry(dpio_list.next, struct dpaa2_io, node);
79 list_del(&d->node);
80 list_add_tail(&d->node, &dpio_list);
81 spin_unlock(&dpio_list_lock);
83 return d;
86 /**
87 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
88 * @cpu: the cpu id
90 * Return the affine dpaa2_io service, or NULL if there is no service affined
91 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
92 * service.
94 struct dpaa2_io *dpaa2_io_service_select(int cpu)
96 if (cpu == DPAA2_IO_ANY_CPU)
97 return service_select(NULL);
99 return service_select_by_cpu(NULL, cpu);
101 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
104 * dpaa2_io_create() - create a dpaa2_io object.
105 * @desc: the dpaa2_io descriptor
106 * @dev: the actual DPIO device
108 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
109 * DPIO object.
111 * Return a valid dpaa2_io object for success, or NULL for failure.
113 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
114 struct device *dev)
116 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
118 if (!obj)
119 return NULL;
121 /* check if CPU is out of range (-1 means any cpu) */
122 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
123 kfree(obj);
124 return NULL;
127 obj->dpio_desc = *desc;
128 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
129 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
130 obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
131 obj->swp = qbman_swp_init(&obj->swp_desc);
133 if (!obj->swp) {
134 kfree(obj);
135 return NULL;
138 INIT_LIST_HEAD(&obj->node);
139 spin_lock_init(&obj->lock_mgmt_cmd);
140 spin_lock_init(&obj->lock_notifications);
141 INIT_LIST_HEAD(&obj->notifications);
143 /* For now only enable DQRR interrupts */
144 qbman_swp_interrupt_set_trigger(obj->swp,
145 QBMAN_SWP_INTERRUPT_DQRI);
146 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
147 if (obj->dpio_desc.receives_notifications)
148 qbman_swp_push_set(obj->swp, 0, 1);
150 spin_lock(&dpio_list_lock);
151 list_add_tail(&obj->node, &dpio_list);
152 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
153 dpio_by_cpu[desc->cpu] = obj;
154 spin_unlock(&dpio_list_lock);
156 obj->dev = dev;
158 return obj;
162 * dpaa2_io_down() - release the dpaa2_io object.
163 * @d: the dpaa2_io object to be released.
165 * The "struct dpaa2_io" type can represent an individual DPIO object (as
166 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
167 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
168 * each handle obtained should be released using this function.
170 void dpaa2_io_down(struct dpaa2_io *d)
172 spin_lock(&dpio_list_lock);
173 dpio_by_cpu[d->dpio_desc.cpu] = NULL;
174 list_del(&d->node);
175 spin_unlock(&dpio_list_lock);
177 kfree(d);
180 #define DPAA_POLL_MAX 32
183 * dpaa2_io_irq() - ISR for DPIO interrupts
185 * @obj: the given DPIO object.
187 * Return IRQ_HANDLED for success or IRQ_NONE if there
188 * were no pending interrupts.
190 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
192 const struct dpaa2_dq *dq;
193 int max = 0;
194 struct qbman_swp *swp;
195 u32 status;
197 swp = obj->swp;
198 status = qbman_swp_interrupt_read_status(swp);
199 if (!status)
200 return IRQ_NONE;
202 dq = qbman_swp_dqrr_next(swp);
203 while (dq) {
204 if (qbman_result_is_SCN(dq)) {
205 struct dpaa2_io_notification_ctx *ctx;
206 u64 q64;
208 q64 = qbman_result_SCN_ctx(dq);
209 ctx = (void *)(uintptr_t)q64;
210 ctx->cb(ctx);
211 } else {
212 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
214 qbman_swp_dqrr_consume(swp, dq);
215 ++max;
216 if (max > DPAA_POLL_MAX)
217 goto done;
218 dq = qbman_swp_dqrr_next(swp);
220 done:
221 qbman_swp_interrupt_clear_status(swp, status);
222 qbman_swp_interrupt_set_inhibit(swp, 0);
223 return IRQ_HANDLED;
227 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
229 * @d: the given DPIO object.
231 * Return the cpu associated with the DPIO object
233 int dpaa2_io_get_cpu(struct dpaa2_io *d)
235 return d->dpio_desc.cpu;
237 EXPORT_SYMBOL(dpaa2_io_get_cpu);
240 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
241 * notifications on the given DPIO service.
242 * @d: the given DPIO service.
243 * @ctx: the notification context.
244 * @dev: the device that requests the register
246 * The caller should make the MC command to attach a DPAA2 object to
247 * a DPIO after this function completes successfully. In that way:
248 * (a) The DPIO service is "ready" to handle a notification arrival
249 * (which might happen before the "attach" command to MC has
250 * returned control of execution back to the caller)
251 * (b) The DPIO service can provide back to the caller the 'dpio_id' and
252 * 'qman64' parameters that it should pass along in the MC command
253 * in order for the object to be configured to produce the right
254 * notification fields to the DPIO service.
256 * Return 0 for success, or -ENODEV for failure.
258 int dpaa2_io_service_register(struct dpaa2_io *d,
259 struct dpaa2_io_notification_ctx *ctx,
260 struct device *dev)
262 struct device_link *link;
263 unsigned long irqflags;
265 d = service_select_by_cpu(d, ctx->desired_cpu);
266 if (!d)
267 return -ENODEV;
269 link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
270 if (!link)
271 return -EINVAL;
273 ctx->dpio_id = d->dpio_desc.dpio_id;
274 ctx->qman64 = (u64)(uintptr_t)ctx;
275 ctx->dpio_private = d;
276 spin_lock_irqsave(&d->lock_notifications, irqflags);
277 list_add(&ctx->node, &d->notifications);
278 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
280 /* Enable the generation of CDAN notifications */
281 if (ctx->is_cdan)
282 return qbman_swp_CDAN_set_context_enable(d->swp,
283 (u16)ctx->id,
284 ctx->qman64);
285 return 0;
287 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
290 * dpaa2_io_service_deregister - The opposite of 'register'.
291 * @service: the given DPIO service.
292 * @ctx: the notification context.
293 * @dev: the device that requests to be deregistered
295 * This function should be called only after sending the MC command to
296 * to detach the notification-producing device from the DPIO.
298 void dpaa2_io_service_deregister(struct dpaa2_io *service,
299 struct dpaa2_io_notification_ctx *ctx,
300 struct device *dev)
302 struct dpaa2_io *d = ctx->dpio_private;
303 unsigned long irqflags;
305 if (ctx->is_cdan)
306 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
308 spin_lock_irqsave(&d->lock_notifications, irqflags);
309 list_del(&ctx->node);
310 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
313 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
316 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
317 * @d: the given DPIO service.
318 * @ctx: the notification context.
320 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
321 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
322 * traffic source for as long as it likes. Eventually it may wish to "rearm"
323 * that source to allow it to produce another FQDAN/CDAN, that's what this
324 * function achieves.
326 * Return 0 for success.
328 int dpaa2_io_service_rearm(struct dpaa2_io *d,
329 struct dpaa2_io_notification_ctx *ctx)
331 unsigned long irqflags;
332 int err;
334 d = service_select_by_cpu(d, ctx->desired_cpu);
335 if (!unlikely(d))
336 return -ENODEV;
338 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
339 if (ctx->is_cdan)
340 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
341 else
342 err = qbman_swp_fq_schedule(d->swp, ctx->id);
343 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
345 return err;
347 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
350 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
351 * @d: the given DPIO service.
352 * @fqid: the given frame queue id.
353 * @s: the dpaa2_io_store object for the result.
355 * Return 0 for success, or error code for failure.
357 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
358 struct dpaa2_io_store *s)
360 struct qbman_pull_desc pd;
361 int err;
363 qbman_pull_desc_clear(&pd);
364 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
365 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
366 qbman_pull_desc_set_fq(&pd, fqid);
368 d = service_select(d);
369 if (!d)
370 return -ENODEV;
371 s->swp = d->swp;
372 err = qbman_swp_pull(d->swp, &pd);
373 if (err)
374 s->swp = NULL;
376 return err;
378 EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
381 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
382 * @d: the given DPIO service.
383 * @channelid: the given channel id.
384 * @s: the dpaa2_io_store object for the result.
386 * Return 0 for success, or error code for failure.
388 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
389 struct dpaa2_io_store *s)
391 struct qbman_pull_desc pd;
392 int err;
394 qbman_pull_desc_clear(&pd);
395 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
396 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
397 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
399 d = service_select(d);
400 if (!d)
401 return -ENODEV;
403 s->swp = d->swp;
404 err = qbman_swp_pull(d->swp, &pd);
405 if (err)
406 s->swp = NULL;
408 return err;
410 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
413 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
414 * @d: the given DPIO service.
415 * @fqid: the given frame queue id.
416 * @fd: the frame descriptor which is enqueued.
418 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
419 * or -ENODEV if there is no dpio service.
421 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
422 u32 fqid,
423 const struct dpaa2_fd *fd)
425 struct qbman_eq_desc ed;
427 d = service_select(d);
428 if (!d)
429 return -ENODEV;
431 qbman_eq_desc_clear(&ed);
432 qbman_eq_desc_set_no_orp(&ed, 0);
433 qbman_eq_desc_set_fq(&ed, fqid);
435 return qbman_swp_enqueue(d->swp, &ed, fd);
437 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
440 * dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
441 * to a frame queue using one fqid.
442 * @d: the given DPIO service.
443 * @fqid: the given frame queue id.
444 * @fd: the frame descriptor which is enqueued.
445 * @nb: number of frames to be enqueud
447 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
448 * or -ENODEV if there is no dpio service.
450 int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
451 u32 fqid,
452 const struct dpaa2_fd *fd,
453 int nb)
455 struct qbman_eq_desc ed;
457 d = service_select(d);
458 if (!d)
459 return -ENODEV;
461 qbman_eq_desc_clear(&ed);
462 qbman_eq_desc_set_no_orp(&ed, 0);
463 qbman_eq_desc_set_fq(&ed, fqid);
465 return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
467 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
470 * dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
471 * to different frame queue using a list of fqids.
472 * @d: the given DPIO service.
473 * @fqid: the given list of frame queue ids.
474 * @fd: the frame descriptor which is enqueued.
475 * @nb: number of frames to be enqueud
477 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
478 * or -ENODEV if there is no dpio service.
480 int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
481 u32 *fqid,
482 const struct dpaa2_fd *fd,
483 int nb)
485 struct qbman_eq_desc *ed;
486 int i, ret;
488 ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
489 if (!ed)
490 return -ENOMEM;
492 d = service_select(d);
493 if (!d) {
494 ret = -ENODEV;
495 goto out;
498 for (i = 0; i < nb; i++) {
499 qbman_eq_desc_clear(&ed[i]);
500 qbman_eq_desc_set_no_orp(&ed[i], 0);
501 qbman_eq_desc_set_fq(&ed[i], fqid[i]);
504 ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
505 out:
506 kfree(ed);
507 return ret;
509 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
512 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
513 * @d: the given DPIO service.
514 * @qdid: the given queuing destination id.
515 * @prio: the given queuing priority.
516 * @qdbin: the given queuing destination bin.
517 * @fd: the frame descriptor which is enqueued.
519 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
520 * or -ENODEV if there is no dpio service.
522 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
523 u32 qdid, u8 prio, u16 qdbin,
524 const struct dpaa2_fd *fd)
526 struct qbman_eq_desc ed;
528 d = service_select(d);
529 if (!d)
530 return -ENODEV;
532 qbman_eq_desc_clear(&ed);
533 qbman_eq_desc_set_no_orp(&ed, 0);
534 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
536 return qbman_swp_enqueue(d->swp, &ed, fd);
538 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
541 * dpaa2_io_service_release() - Release buffers to a buffer pool.
542 * @d: the given DPIO object.
543 * @bpid: the buffer pool id.
544 * @buffers: the buffers to be released.
545 * @num_buffers: the number of the buffers to be released.
547 * Return 0 for success, and negative error code for failure.
549 int dpaa2_io_service_release(struct dpaa2_io *d,
550 u16 bpid,
551 const u64 *buffers,
552 unsigned int num_buffers)
554 struct qbman_release_desc rd;
556 d = service_select(d);
557 if (!d)
558 return -ENODEV;
560 qbman_release_desc_clear(&rd);
561 qbman_release_desc_set_bpid(&rd, bpid);
563 return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
565 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
568 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
569 * @d: the given DPIO object.
570 * @bpid: the buffer pool id.
571 * @buffers: the buffer addresses for acquired buffers.
572 * @num_buffers: the expected number of the buffers to acquire.
574 * Return a negative error code if the command failed, otherwise it returns
575 * the number of buffers acquired, which may be less than the number requested.
576 * Eg. if the buffer pool is empty, this will return zero.
578 int dpaa2_io_service_acquire(struct dpaa2_io *d,
579 u16 bpid,
580 u64 *buffers,
581 unsigned int num_buffers)
583 unsigned long irqflags;
584 int err;
586 d = service_select(d);
587 if (!d)
588 return -ENODEV;
590 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
591 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
592 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
594 return err;
596 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
599 * 'Stores' are reusable memory blocks for holding dequeue results, and to
600 * assist with parsing those results.
604 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
605 * @max_frames: the maximum number of dequeued result for frames, must be <= 32.
606 * @dev: the device to allow mapping/unmapping the DMAable region.
608 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
609 * The 'dpaa2_io_store' returned is a DPIO service managed object.
611 * Return pointer to dpaa2_io_store struct for successfully created storage
612 * memory, or NULL on error.
614 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
615 struct device *dev)
617 struct dpaa2_io_store *ret;
618 size_t size;
620 if (!max_frames || (max_frames > 32))
621 return NULL;
623 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
624 if (!ret)
625 return NULL;
627 ret->max = max_frames;
628 size = max_frames * sizeof(struct dpaa2_dq) + 64;
629 ret->alloced_addr = kzalloc(size, GFP_KERNEL);
630 if (!ret->alloced_addr) {
631 kfree(ret);
632 return NULL;
635 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
636 ret->paddr = dma_map_single(dev, ret->vaddr,
637 sizeof(struct dpaa2_dq) * max_frames,
638 DMA_FROM_DEVICE);
639 if (dma_mapping_error(dev, ret->paddr)) {
640 kfree(ret->alloced_addr);
641 kfree(ret);
642 return NULL;
645 ret->idx = 0;
646 ret->dev = dev;
648 return ret;
650 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
653 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
654 * result.
655 * @s: the storage memory to be destroyed.
657 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
659 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
660 DMA_FROM_DEVICE);
661 kfree(s->alloced_addr);
662 kfree(s);
664 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
667 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
668 * @s: the dpaa2_io_store object.
669 * @is_last: indicate whether this is the last frame in the pull command.
671 * When an object driver performs dequeues to a dpaa2_io_store, this function
672 * can be used to determine when the next frame result is available. Once
673 * this function returns non-NULL, a subsequent call to it will try to find
674 * the next dequeue result.
676 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
677 * was empty, then this function will also return NULL (rather than expecting
678 * the caller to always check for this. As such, "is_last" can be used to
679 * differentiate between "end-of-empty-dequeue" and "still-waiting".
681 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
683 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
685 int match;
686 struct dpaa2_dq *ret = &s->vaddr[s->idx];
688 match = qbman_result_has_new_result(s->swp, ret);
689 if (!match) {
690 *is_last = 0;
691 return NULL;
694 s->idx++;
696 if (dpaa2_dq_is_pull_complete(ret)) {
697 *is_last = 1;
698 s->idx = 0;
700 * If we get an empty dequeue result to terminate a zero-results
701 * vdqcr, return NULL to the caller rather than expecting him to
702 * check non-NULL results every time.
704 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
705 ret = NULL;
706 } else {
707 prefetch(&s->vaddr[s->idx]);
708 *is_last = 0;
711 return ret;
713 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
716 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
717 * @d: the given DPIO object.
718 * @fqid: the id of frame queue to be queried.
719 * @fcnt: the queried frame count.
720 * @bcnt: the queried byte count.
722 * Knowing the FQ count at run-time can be useful in debugging situations.
723 * The instantaneous frame- and byte-count are hereby returned.
725 * Return 0 for a successful query, and negative error code if query fails.
727 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
728 u32 *fcnt, u32 *bcnt)
730 struct qbman_fq_query_np_rslt state;
731 struct qbman_swp *swp;
732 unsigned long irqflags;
733 int ret;
735 d = service_select(d);
736 if (!d)
737 return -ENODEV;
739 swp = d->swp;
740 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
741 ret = qbman_fq_query_state(swp, fqid, &state);
742 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
743 if (ret)
744 return ret;
745 *fcnt = qbman_fq_state_frame_count(&state);
746 *bcnt = qbman_fq_state_byte_count(&state);
748 return 0;
750 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
753 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
754 * buffer pool.
755 * @d: the given DPIO object.
756 * @bpid: the index of buffer pool to be queried.
757 * @num: the queried number of buffers in the buffer pool.
759 * Return 0 for a successful query, and negative error code if query fails.
761 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
763 struct qbman_bp_query_rslt state;
764 struct qbman_swp *swp;
765 unsigned long irqflags;
766 int ret;
768 d = service_select(d);
769 if (!d)
770 return -ENODEV;
772 swp = d->swp;
773 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
774 ret = qbman_bp_query(swp, bpid, &state);
775 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
776 if (ret)
777 return ret;
778 *num = qbman_bp_info_num_free_bufs(&state);
779 return 0;
781 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);