treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / soc / fsl / dpio / dpio-service.c
blob518a8e081b490e96aeb2c32b185cbe81e8d72327
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2014-2016 Freescale Semiconductor Inc.
4 * Copyright 2016 NXP
6 */
7 #include <linux/types.h>
8 #include <linux/fsl/mc.h>
9 #include <soc/fsl/dpaa2-io.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/interrupt.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/slab.h>
17 #include "dpio.h"
18 #include "qbman-portal.h"
20 struct dpaa2_io {
21 struct dpaa2_io_desc dpio_desc;
22 struct qbman_swp_desc swp_desc;
23 struct qbman_swp *swp;
24 struct list_head node;
25 /* protect against multiple management commands */
26 spinlock_t lock_mgmt_cmd;
27 /* protect notifications list */
28 spinlock_t lock_notifications;
29 struct list_head notifications;
30 struct device *dev;
33 struct dpaa2_io_store {
34 unsigned int max;
35 dma_addr_t paddr;
36 struct dpaa2_dq *vaddr;
37 void *alloced_addr; /* unaligned value from kmalloc() */
38 unsigned int idx; /* position of the next-to-be-returned entry */
39 struct qbman_swp *swp; /* portal used to issue VDQCR */
40 struct device *dev; /* device used for DMA mapping */
43 /* keep a per cpu array of DPIOs for fast access */
44 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
45 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
46 static DEFINE_SPINLOCK(dpio_list_lock);
48 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
49 int cpu)
51 if (d)
52 return d;
54 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
55 return NULL;
58 * If cpu == -1, choose the current cpu, with no guarantees about
59 * potentially being migrated away.
61 if (unlikely(cpu < 0))
62 cpu = smp_processor_id();
64 /* If a specific cpu was requested, pick it up immediately */
65 return dpio_by_cpu[cpu];
68 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
70 if (d)
71 return d;
73 spin_lock(&dpio_list_lock);
74 d = list_entry(dpio_list.next, struct dpaa2_io, node);
75 list_del(&d->node);
76 list_add_tail(&d->node, &dpio_list);
77 spin_unlock(&dpio_list_lock);
79 return d;
82 /**
83 * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
84 * @cpu: the cpu id
86 * Return the affine dpaa2_io service, or NULL if there is no service affined
87 * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
88 * service.
90 struct dpaa2_io *dpaa2_io_service_select(int cpu)
92 if (cpu == DPAA2_IO_ANY_CPU)
93 return service_select(NULL);
95 return service_select_by_cpu(NULL, cpu);
97 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
99 /**
100 * dpaa2_io_create() - create a dpaa2_io object.
101 * @desc: the dpaa2_io descriptor
102 * @dev: the actual DPIO device
104 * Activates a "struct dpaa2_io" corresponding to the given config of an actual
105 * DPIO object.
107 * Return a valid dpaa2_io object for success, or NULL for failure.
109 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
110 struct device *dev)
112 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
114 if (!obj)
115 return NULL;
117 /* check if CPU is out of range (-1 means any cpu) */
118 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
119 kfree(obj);
120 return NULL;
123 obj->dpio_desc = *desc;
124 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
125 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
126 obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
127 obj->swp = qbman_swp_init(&obj->swp_desc);
129 if (!obj->swp) {
130 kfree(obj);
131 return NULL;
134 INIT_LIST_HEAD(&obj->node);
135 spin_lock_init(&obj->lock_mgmt_cmd);
136 spin_lock_init(&obj->lock_notifications);
137 INIT_LIST_HEAD(&obj->notifications);
139 /* For now only enable DQRR interrupts */
140 qbman_swp_interrupt_set_trigger(obj->swp,
141 QBMAN_SWP_INTERRUPT_DQRI);
142 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
143 if (obj->dpio_desc.receives_notifications)
144 qbman_swp_push_set(obj->swp, 0, 1);
146 spin_lock(&dpio_list_lock);
147 list_add_tail(&obj->node, &dpio_list);
148 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
149 dpio_by_cpu[desc->cpu] = obj;
150 spin_unlock(&dpio_list_lock);
152 obj->dev = dev;
154 return obj;
158 * dpaa2_io_down() - release the dpaa2_io object.
159 * @d: the dpaa2_io object to be released.
161 * The "struct dpaa2_io" type can represent an individual DPIO object (as
162 * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
163 * which can be used to group/encapsulate multiple DPIO objects. In all cases,
164 * each handle obtained should be released using this function.
166 void dpaa2_io_down(struct dpaa2_io *d)
168 spin_lock(&dpio_list_lock);
169 dpio_by_cpu[d->dpio_desc.cpu] = NULL;
170 list_del(&d->node);
171 spin_unlock(&dpio_list_lock);
173 kfree(d);
176 #define DPAA_POLL_MAX 32
179 * dpaa2_io_irq() - ISR for DPIO interrupts
181 * @obj: the given DPIO object.
183 * Return IRQ_HANDLED for success or IRQ_NONE if there
184 * were no pending interrupts.
186 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
188 const struct dpaa2_dq *dq;
189 int max = 0;
190 struct qbman_swp *swp;
191 u32 status;
193 swp = obj->swp;
194 status = qbman_swp_interrupt_read_status(swp);
195 if (!status)
196 return IRQ_NONE;
198 dq = qbman_swp_dqrr_next(swp);
199 while (dq) {
200 if (qbman_result_is_SCN(dq)) {
201 struct dpaa2_io_notification_ctx *ctx;
202 u64 q64;
204 q64 = qbman_result_SCN_ctx(dq);
205 ctx = (void *)(uintptr_t)q64;
206 ctx->cb(ctx);
207 } else {
208 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
210 qbman_swp_dqrr_consume(swp, dq);
211 ++max;
212 if (max > DPAA_POLL_MAX)
213 goto done;
214 dq = qbman_swp_dqrr_next(swp);
216 done:
217 qbman_swp_interrupt_clear_status(swp, status);
218 qbman_swp_interrupt_set_inhibit(swp, 0);
219 return IRQ_HANDLED;
223 * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
225 * @d: the given DPIO object.
227 * Return the cpu associated with the DPIO object
229 int dpaa2_io_get_cpu(struct dpaa2_io *d)
231 return d->dpio_desc.cpu;
233 EXPORT_SYMBOL(dpaa2_io_get_cpu);
236 * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
237 * notifications on the given DPIO service.
238 * @d: the given DPIO service.
239 * @ctx: the notification context.
240 * @dev: the device that requests the register
242 * The caller should make the MC command to attach a DPAA2 object to
243 * a DPIO after this function completes successfully. In that way:
244 * (a) The DPIO service is "ready" to handle a notification arrival
245 * (which might happen before the "attach" command to MC has
246 * returned control of execution back to the caller)
247 * (b) The DPIO service can provide back to the caller the 'dpio_id' and
248 * 'qman64' parameters that it should pass along in the MC command
249 * in order for the object to be configured to produce the right
250 * notification fields to the DPIO service.
252 * Return 0 for success, or -ENODEV for failure.
254 int dpaa2_io_service_register(struct dpaa2_io *d,
255 struct dpaa2_io_notification_ctx *ctx,
256 struct device *dev)
258 struct device_link *link;
259 unsigned long irqflags;
261 d = service_select_by_cpu(d, ctx->desired_cpu);
262 if (!d)
263 return -ENODEV;
265 link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
266 if (!link)
267 return -EINVAL;
269 ctx->dpio_id = d->dpio_desc.dpio_id;
270 ctx->qman64 = (u64)(uintptr_t)ctx;
271 ctx->dpio_private = d;
272 spin_lock_irqsave(&d->lock_notifications, irqflags);
273 list_add(&ctx->node, &d->notifications);
274 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
276 /* Enable the generation of CDAN notifications */
277 if (ctx->is_cdan)
278 return qbman_swp_CDAN_set_context_enable(d->swp,
279 (u16)ctx->id,
280 ctx->qman64);
281 return 0;
283 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
286 * dpaa2_io_service_deregister - The opposite of 'register'.
287 * @service: the given DPIO service.
288 * @ctx: the notification context.
289 * @dev: the device that requests to be deregistered
291 * This function should be called only after sending the MC command to
292 * to detach the notification-producing device from the DPIO.
294 void dpaa2_io_service_deregister(struct dpaa2_io *service,
295 struct dpaa2_io_notification_ctx *ctx,
296 struct device *dev)
298 struct dpaa2_io *d = ctx->dpio_private;
299 unsigned long irqflags;
301 if (ctx->is_cdan)
302 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
304 spin_lock_irqsave(&d->lock_notifications, irqflags);
305 list_del(&ctx->node);
306 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
309 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
312 * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
313 * @d: the given DPIO service.
314 * @ctx: the notification context.
316 * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
317 * considered "disarmed". Ie. the user can issue pull dequeue operations on that
318 * traffic source for as long as it likes. Eventually it may wish to "rearm"
319 * that source to allow it to produce another FQDAN/CDAN, that's what this
320 * function achieves.
322 * Return 0 for success.
324 int dpaa2_io_service_rearm(struct dpaa2_io *d,
325 struct dpaa2_io_notification_ctx *ctx)
327 unsigned long irqflags;
328 int err;
330 d = service_select_by_cpu(d, ctx->desired_cpu);
331 if (!unlikely(d))
332 return -ENODEV;
334 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
335 if (ctx->is_cdan)
336 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
337 else
338 err = qbman_swp_fq_schedule(d->swp, ctx->id);
339 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
341 return err;
343 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
346 * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
347 * @d: the given DPIO service.
348 * @fqid: the given frame queue id.
349 * @s: the dpaa2_io_store object for the result.
351 * Return 0 for success, or error code for failure.
353 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
354 struct dpaa2_io_store *s)
356 struct qbman_pull_desc pd;
357 int err;
359 qbman_pull_desc_clear(&pd);
360 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
361 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
362 qbman_pull_desc_set_fq(&pd, fqid);
364 d = service_select(d);
365 if (!d)
366 return -ENODEV;
367 s->swp = d->swp;
368 err = qbman_swp_pull(d->swp, &pd);
369 if (err)
370 s->swp = NULL;
372 return err;
374 EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
377 * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
378 * @d: the given DPIO service.
379 * @channelid: the given channel id.
380 * @s: the dpaa2_io_store object for the result.
382 * Return 0 for success, or error code for failure.
384 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
385 struct dpaa2_io_store *s)
387 struct qbman_pull_desc pd;
388 int err;
390 qbman_pull_desc_clear(&pd);
391 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
392 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
393 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
395 d = service_select(d);
396 if (!d)
397 return -ENODEV;
399 s->swp = d->swp;
400 err = qbman_swp_pull(d->swp, &pd);
401 if (err)
402 s->swp = NULL;
404 return err;
406 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
409 * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
410 * @d: the given DPIO service.
411 * @fqid: the given frame queue id.
412 * @fd: the frame descriptor which is enqueued.
414 * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
415 * or -ENODEV if there is no dpio service.
417 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
418 u32 fqid,
419 const struct dpaa2_fd *fd)
421 struct qbman_eq_desc ed;
423 d = service_select(d);
424 if (!d)
425 return -ENODEV;
427 qbman_eq_desc_clear(&ed);
428 qbman_eq_desc_set_no_orp(&ed, 0);
429 qbman_eq_desc_set_fq(&ed, fqid);
431 return qbman_swp_enqueue(d->swp, &ed, fd);
433 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
436 * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
437 * @d: the given DPIO service.
438 * @qdid: the given queuing destination id.
439 * @prio: the given queuing priority.
440 * @qdbin: the given queuing destination bin.
441 * @fd: the frame descriptor which is enqueued.
443 * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
444 * or -ENODEV if there is no dpio service.
446 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
447 u32 qdid, u8 prio, u16 qdbin,
448 const struct dpaa2_fd *fd)
450 struct qbman_eq_desc ed;
452 d = service_select(d);
453 if (!d)
454 return -ENODEV;
456 qbman_eq_desc_clear(&ed);
457 qbman_eq_desc_set_no_orp(&ed, 0);
458 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
460 return qbman_swp_enqueue(d->swp, &ed, fd);
462 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
465 * dpaa2_io_service_release() - Release buffers to a buffer pool.
466 * @d: the given DPIO object.
467 * @bpid: the buffer pool id.
468 * @buffers: the buffers to be released.
469 * @num_buffers: the number of the buffers to be released.
471 * Return 0 for success, and negative error code for failure.
473 int dpaa2_io_service_release(struct dpaa2_io *d,
474 u16 bpid,
475 const u64 *buffers,
476 unsigned int num_buffers)
478 struct qbman_release_desc rd;
480 d = service_select(d);
481 if (!d)
482 return -ENODEV;
484 qbman_release_desc_clear(&rd);
485 qbman_release_desc_set_bpid(&rd, bpid);
487 return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
489 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
492 * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
493 * @d: the given DPIO object.
494 * @bpid: the buffer pool id.
495 * @buffers: the buffer addresses for acquired buffers.
496 * @num_buffers: the expected number of the buffers to acquire.
498 * Return a negative error code if the command failed, otherwise it returns
499 * the number of buffers acquired, which may be less than the number requested.
500 * Eg. if the buffer pool is empty, this will return zero.
502 int dpaa2_io_service_acquire(struct dpaa2_io *d,
503 u16 bpid,
504 u64 *buffers,
505 unsigned int num_buffers)
507 unsigned long irqflags;
508 int err;
510 d = service_select(d);
511 if (!d)
512 return -ENODEV;
514 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
515 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
516 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
518 return err;
520 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
523 * 'Stores' are reusable memory blocks for holding dequeue results, and to
524 * assist with parsing those results.
528 * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
529 * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
530 * @dev: the device to allow mapping/unmapping the DMAable region.
532 * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
533 * The 'dpaa2_io_store' returned is a DPIO service managed object.
535 * Return pointer to dpaa2_io_store struct for successfully created storage
536 * memory, or NULL on error.
538 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
539 struct device *dev)
541 struct dpaa2_io_store *ret;
542 size_t size;
544 if (!max_frames || (max_frames > 16))
545 return NULL;
547 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
548 if (!ret)
549 return NULL;
551 ret->max = max_frames;
552 size = max_frames * sizeof(struct dpaa2_dq) + 64;
553 ret->alloced_addr = kzalloc(size, GFP_KERNEL);
554 if (!ret->alloced_addr) {
555 kfree(ret);
556 return NULL;
559 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
560 ret->paddr = dma_map_single(dev, ret->vaddr,
561 sizeof(struct dpaa2_dq) * max_frames,
562 DMA_FROM_DEVICE);
563 if (dma_mapping_error(dev, ret->paddr)) {
564 kfree(ret->alloced_addr);
565 kfree(ret);
566 return NULL;
569 ret->idx = 0;
570 ret->dev = dev;
572 return ret;
574 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
577 * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
578 * result.
579 * @s: the storage memory to be destroyed.
581 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
583 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
584 DMA_FROM_DEVICE);
585 kfree(s->alloced_addr);
586 kfree(s);
588 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
591 * dpaa2_io_store_next() - Determine when the next dequeue result is available.
592 * @s: the dpaa2_io_store object.
593 * @is_last: indicate whether this is the last frame in the pull command.
595 * When an object driver performs dequeues to a dpaa2_io_store, this function
596 * can be used to determine when the next frame result is available. Once
597 * this function returns non-NULL, a subsequent call to it will try to find
598 * the next dequeue result.
600 * Note that if a pull-dequeue has a NULL result because the target FQ/channel
601 * was empty, then this function will also return NULL (rather than expecting
602 * the caller to always check for this. As such, "is_last" can be used to
603 * differentiate between "end-of-empty-dequeue" and "still-waiting".
605 * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
607 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
609 int match;
610 struct dpaa2_dq *ret = &s->vaddr[s->idx];
612 match = qbman_result_has_new_result(s->swp, ret);
613 if (!match) {
614 *is_last = 0;
615 return NULL;
618 s->idx++;
620 if (dpaa2_dq_is_pull_complete(ret)) {
621 *is_last = 1;
622 s->idx = 0;
624 * If we get an empty dequeue result to terminate a zero-results
625 * vdqcr, return NULL to the caller rather than expecting him to
626 * check non-NULL results every time.
628 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
629 ret = NULL;
630 } else {
631 prefetch(&s->vaddr[s->idx]);
632 *is_last = 0;
635 return ret;
637 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
640 * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
641 * @d: the given DPIO object.
642 * @fqid: the id of frame queue to be queried.
643 * @fcnt: the queried frame count.
644 * @bcnt: the queried byte count.
646 * Knowing the FQ count at run-time can be useful in debugging situations.
647 * The instantaneous frame- and byte-count are hereby returned.
649 * Return 0 for a successful query, and negative error code if query fails.
651 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
652 u32 *fcnt, u32 *bcnt)
654 struct qbman_fq_query_np_rslt state;
655 struct qbman_swp *swp;
656 unsigned long irqflags;
657 int ret;
659 d = service_select(d);
660 if (!d)
661 return -ENODEV;
663 swp = d->swp;
664 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
665 ret = qbman_fq_query_state(swp, fqid, &state);
666 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
667 if (ret)
668 return ret;
669 *fcnt = qbman_fq_state_frame_count(&state);
670 *bcnt = qbman_fq_state_byte_count(&state);
672 return 0;
674 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
677 * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
678 * buffer pool.
679 * @d: the given DPIO object.
680 * @bpid: the index of buffer pool to be queried.
681 * @num: the queried number of buffers in the buffer pool.
683 * Return 0 for a successful query, and negative error code if query fails.
685 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
687 struct qbman_bp_query_rslt state;
688 struct qbman_swp *swp;
689 unsigned long irqflags;
690 int ret;
692 d = service_select(d);
693 if (!d)
694 return -ENODEV;
696 swp = d->swp;
697 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
698 ret = qbman_bp_query(swp, bpid, &state);
699 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
700 if (ret)
701 return ret;
702 *num = qbman_bp_info_num_free_bufs(&state);
703 return 0;
705 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);