treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / soc / ti / k3-ringacc.c
blob5fb2ee2ac978aa71f4918d03221152bca693c655
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * TI K3 NAVSS Ring Accelerator subsystem driver
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 */
8 #include <linux/dma-mapping.h>
9 #include <linux/io.h>
10 #include <linux/init.h>
11 #include <linux/of.h>
12 #include <linux/platform_device.h>
13 #include <linux/soc/ti/k3-ringacc.h>
14 #include <linux/soc/ti/ti_sci_protocol.h>
15 #include <linux/soc/ti/ti_sci_inta_msi.h>
16 #include <linux/of_irq.h>
17 #include <linux/irqdomain.h>
19 static LIST_HEAD(k3_ringacc_list);
20 static DEFINE_MUTEX(k3_ringacc_list_lock);
22 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
24 /**
25 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
27 * @resv_16: Reserved
28 * @db: Ring Doorbell Register
29 * @resv_4: Reserved
30 * @occ: Ring Occupancy Register
31 * @indx: Ring Current Index Register
32 * @hwocc: Ring Hardware Occupancy Register
33 * @hwindx: Ring Hardware Current Index Register
35 struct k3_ring_rt_regs {
36 u32 resv_16[4];
37 u32 db;
38 u32 resv_4[1];
39 u32 occ;
40 u32 indx;
41 u32 hwocc;
42 u32 hwindx;
45 #define K3_RINGACC_RT_REGS_STEP 0x1000
47 /**
48 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
50 * @head_data: Ring Head Entry Data Registers
51 * @tail_data: Ring Tail Entry Data Registers
52 * @peek_head_data: Ring Peek Head Entry Data Regs
53 * @peek_tail_data: Ring Peek Tail Entry Data Regs
55 struct k3_ring_fifo_regs {
56 u32 head_data[128];
57 u32 tail_data[128];
58 u32 peek_head_data[128];
59 u32 peek_tail_data[128];
62 /**
63 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
65 * @revision: Revision Register
66 * @config: Config Register
68 struct k3_ringacc_proxy_gcfg_regs {
69 u32 revision;
70 u32 config;
73 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
75 /**
76 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
78 * @control: Proxy Control Register
79 * @status: Proxy Status Register
80 * @resv_512: Reserved
81 * @data: Proxy Data Register
83 struct k3_ringacc_proxy_target_regs {
84 u32 control;
85 u32 status;
86 u8 resv_512[504];
87 u32 data[128];
90 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
91 #define K3_RINGACC_PROXY_NOT_USED (-1)
93 enum k3_ringacc_proxy_access_mode {
94 PROXY_ACCESS_MODE_HEAD = 0,
95 PROXY_ACCESS_MODE_TAIL = 1,
96 PROXY_ACCESS_MODE_PEEK_HEAD = 2,
97 PROXY_ACCESS_MODE_PEEK_TAIL = 3,
100 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
101 #define K3_RINGACC_FIFO_REGS_STEP 0x1000
102 #define K3_RINGACC_MAX_DB_RING_CNT (127U)
104 struct k3_ring_ops {
105 int (*push_tail)(struct k3_ring *ring, void *elm);
106 int (*push_head)(struct k3_ring *ring, void *elm);
107 int (*pop_tail)(struct k3_ring *ring, void *elm);
108 int (*pop_head)(struct k3_ring *ring, void *elm);
112 * struct k3_ring - RA Ring descriptor
114 * @rt: Ring control/status registers
115 * @fifos: Ring queues registers
116 * @proxy: Ring Proxy Datapath registers
117 * @ring_mem_dma: Ring buffer dma address
118 * @ring_mem_virt: Ring buffer virt address
119 * @ops: Ring operations
120 * @size: Ring size in elements
121 * @elm_size: Size of the ring element
122 * @mode: Ring mode
123 * @flags: flags
124 * @free: Number of free elements
125 * @occ: Ring occupancy
126 * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
127 * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
128 * @ring_id: Ring Id
129 * @parent: Pointer on struct @k3_ringacc
130 * @use_count: Use count for shared rings
131 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
133 struct k3_ring {
134 struct k3_ring_rt_regs __iomem *rt;
135 struct k3_ring_fifo_regs __iomem *fifos;
136 struct k3_ringacc_proxy_target_regs __iomem *proxy;
137 dma_addr_t ring_mem_dma;
138 void *ring_mem_virt;
139 struct k3_ring_ops *ops;
140 u32 size;
141 enum k3_ring_size elm_size;
142 enum k3_ring_mode mode;
143 u32 flags;
144 #define K3_RING_FLAG_BUSY BIT(1)
145 #define K3_RING_FLAG_SHARED BIT(2)
146 u32 free;
147 u32 occ;
148 u32 windex;
149 u32 rindex;
150 u32 ring_id;
151 struct k3_ringacc *parent;
152 u32 use_count;
153 int proxy_id;
157 * struct k3_ringacc - Rings accelerator descriptor
159 * @dev: pointer on RA device
160 * @proxy_gcfg: RA proxy global config registers
161 * @proxy_target_base: RA proxy datapath region
162 * @num_rings: number of ring in RA
163 * @rings_inuse: bitfield for ring usage tracking
164 * @rm_gp_range: general purpose rings range from tisci
165 * @dma_ring_reset_quirk: DMA reset w/a enable
166 * @num_proxies: number of RA proxies
167 * @proxy_inuse: bitfield for proxy usage tracking
168 * @rings: array of rings descriptors (struct @k3_ring)
169 * @list: list of RAs in the system
170 * @req_lock: protect rings allocation
171 * @tisci: pointer ti-sci handle
172 * @tisci_ring_ops: ti-sci rings ops
173 * @tisci_dev_id: ti-sci device id
175 struct k3_ringacc {
176 struct device *dev;
177 struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg;
178 void __iomem *proxy_target_base;
179 u32 num_rings; /* number of rings in Ringacc module */
180 unsigned long *rings_inuse;
181 struct ti_sci_resource *rm_gp_range;
183 bool dma_ring_reset_quirk;
184 u32 num_proxies;
185 unsigned long *proxy_inuse;
187 struct k3_ring *rings;
188 struct list_head list;
189 struct mutex req_lock; /* protect rings allocation */
191 const struct ti_sci_handle *tisci;
192 const struct ti_sci_rm_ringacc_ops *tisci_ring_ops;
193 u32 tisci_dev_id;
196 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
198 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
199 (4 << ring->elm_size);
202 static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
204 return (ring->ring_mem_virt + idx * (4 << ring->elm_size));
207 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
208 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
210 static struct k3_ring_ops k3_ring_mode_ring_ops = {
211 .push_tail = k3_ringacc_ring_push_mem,
212 .pop_head = k3_ringacc_ring_pop_mem,
215 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
216 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
217 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
218 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem);
220 static struct k3_ring_ops k3_ring_mode_msg_ops = {
221 .push_tail = k3_ringacc_ring_push_io,
222 .push_head = k3_ringacc_ring_push_head_io,
223 .pop_tail = k3_ringacc_ring_pop_tail_io,
224 .pop_head = k3_ringacc_ring_pop_io,
227 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem);
228 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem);
229 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem);
230 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem);
232 static struct k3_ring_ops k3_ring_mode_proxy_ops = {
233 .push_tail = k3_ringacc_ring_push_tail_proxy,
234 .push_head = k3_ringacc_ring_push_head_proxy,
235 .pop_tail = k3_ringacc_ring_pop_tail_proxy,
236 .pop_head = k3_ringacc_ring_pop_head_proxy,
239 static void k3_ringacc_ring_dump(struct k3_ring *ring)
241 struct device *dev = ring->parent->dev;
243 dev_dbg(dev, "dump ring: %d\n", ring->ring_id);
244 dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt,
245 &ring->ring_mem_dma);
246 dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
247 ring->elm_size, ring->size, ring->mode, ring->proxy_id);
249 dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db));
250 dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ));
251 dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx));
252 dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc));
253 dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx));
255 if (ring->ring_mem_virt)
256 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE,
257 16, 1, ring->ring_mem_virt, 16 * 8, false);
260 struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
261 int id, u32 flags)
263 int proxy_id = K3_RINGACC_PROXY_NOT_USED;
265 mutex_lock(&ringacc->req_lock);
267 if (id == K3_RINGACC_RING_ID_ANY) {
268 /* Request for any general purpose ring */
269 struct ti_sci_resource_desc *gp_rings =
270 &ringacc->rm_gp_range->desc[0];
271 unsigned long size;
273 size = gp_rings->start + gp_rings->num;
274 id = find_next_zero_bit(ringacc->rings_inuse, size,
275 gp_rings->start);
276 if (id == size)
277 goto error;
278 } else if (id < 0) {
279 goto error;
282 if (test_bit(id, ringacc->rings_inuse) &&
283 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED))
284 goto error;
285 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED)
286 goto out;
288 if (flags & K3_RINGACC_RING_USE_PROXY) {
289 proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
290 ringacc->num_proxies, 0);
291 if (proxy_id == ringacc->num_proxies)
292 goto error;
295 if (proxy_id != K3_RINGACC_PROXY_NOT_USED) {
296 set_bit(proxy_id, ringacc->proxy_inuse);
297 ringacc->rings[id].proxy_id = proxy_id;
298 dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id,
299 proxy_id);
300 } else {
301 dev_dbg(ringacc->dev, "Giving ring#%d\n", id);
304 set_bit(id, ringacc->rings_inuse);
305 out:
306 ringacc->rings[id].use_count++;
307 mutex_unlock(&ringacc->req_lock);
308 return &ringacc->rings[id];
310 error:
311 mutex_unlock(&ringacc->req_lock);
312 return NULL;
314 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
316 static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
318 struct k3_ringacc *ringacc = ring->parent;
319 int ret;
321 ret = ringacc->tisci_ring_ops->config(
322 ringacc->tisci,
323 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
324 ringacc->tisci_dev_id,
325 ring->ring_id,
328 ring->size,
332 if (ret)
333 dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
334 ret, ring->ring_id);
337 void k3_ringacc_ring_reset(struct k3_ring *ring)
339 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
340 return;
342 ring->occ = 0;
343 ring->free = 0;
344 ring->rindex = 0;
345 ring->windex = 0;
347 k3_ringacc_ring_reset_sci(ring);
349 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
351 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
352 enum k3_ring_mode mode)
354 struct k3_ringacc *ringacc = ring->parent;
355 int ret;
357 ret = ringacc->tisci_ring_ops->config(
358 ringacc->tisci,
359 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
360 ringacc->tisci_dev_id,
361 ring->ring_id,
365 mode,
368 if (ret)
369 dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
370 ret, ring->ring_id);
373 void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
375 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
376 return;
378 if (!ring->parent->dma_ring_reset_quirk)
379 goto reset;
381 if (!occ)
382 occ = readl(&ring->rt->occ);
384 if (occ) {
385 u32 db_ring_cnt, db_ring_cnt_cur;
387 dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__,
388 ring->ring_id, occ);
389 /* TI-SCI ring reset */
390 k3_ringacc_ring_reset_sci(ring);
393 * Setup the ring in ring/doorbell mode (if not already in this
394 * mode)
396 if (ring->mode != K3_RINGACC_RING_MODE_RING)
397 k3_ringacc_ring_reconfig_qmode_sci(
398 ring, K3_RINGACC_RING_MODE_RING);
400 * Ring the doorbell 2**22 – ringOcc times.
401 * This will wrap the internal UDMAP ring state occupancy
402 * counter (which is 21-bits wide) to 0.
404 db_ring_cnt = (1U << 22) - occ;
406 while (db_ring_cnt != 0) {
408 * Ring the doorbell with the maximum count each
409 * iteration if possible to minimize the total
410 * of writes
412 if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT)
413 db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT;
414 else
415 db_ring_cnt_cur = db_ring_cnt;
417 writel(db_ring_cnt_cur, &ring->rt->db);
418 db_ring_cnt -= db_ring_cnt_cur;
421 /* Restore the original ring mode (if not ring mode) */
422 if (ring->mode != K3_RINGACC_RING_MODE_RING)
423 k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode);
426 reset:
427 /* Reset the ring */
428 k3_ringacc_ring_reset(ring);
430 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
432 static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
434 struct k3_ringacc *ringacc = ring->parent;
435 int ret;
437 ret = ringacc->tisci_ring_ops->config(
438 ringacc->tisci,
439 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
440 ringacc->tisci_dev_id,
441 ring->ring_id,
448 if (ret)
449 dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
450 ret, ring->ring_id);
453 int k3_ringacc_ring_free(struct k3_ring *ring)
455 struct k3_ringacc *ringacc;
457 if (!ring)
458 return -EINVAL;
460 ringacc = ring->parent;
462 dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
464 if (!test_bit(ring->ring_id, ringacc->rings_inuse))
465 return -EINVAL;
467 mutex_lock(&ringacc->req_lock);
469 if (--ring->use_count)
470 goto out;
472 if (!(ring->flags & K3_RING_FLAG_BUSY))
473 goto no_init;
475 k3_ringacc_ring_free_sci(ring);
477 dma_free_coherent(ringacc->dev,
478 ring->size * (4 << ring->elm_size),
479 ring->ring_mem_virt, ring->ring_mem_dma);
480 ring->flags = 0;
481 ring->ops = NULL;
482 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
483 clear_bit(ring->proxy_id, ringacc->proxy_inuse);
484 ring->proxy = NULL;
485 ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
488 no_init:
489 clear_bit(ring->ring_id, ringacc->rings_inuse);
491 out:
492 mutex_unlock(&ringacc->req_lock);
493 return 0;
495 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free);
497 u32 k3_ringacc_get_ring_id(struct k3_ring *ring)
499 if (!ring)
500 return -EINVAL;
502 return ring->ring_id;
504 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id);
506 u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring)
508 if (!ring)
509 return -EINVAL;
511 return ring->parent->tisci_dev_id;
513 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id);
515 int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
517 int irq_num;
519 if (!ring)
520 return -EINVAL;
522 irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
523 if (irq_num <= 0)
524 irq_num = -EINVAL;
525 return irq_num;
527 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
529 static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
531 struct k3_ringacc *ringacc = ring->parent;
532 u32 ring_idx;
533 int ret;
535 if (!ringacc->tisci)
536 return -EINVAL;
538 ring_idx = ring->ring_id;
539 ret = ringacc->tisci_ring_ops->config(
540 ringacc->tisci,
541 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
542 ringacc->tisci_dev_id,
543 ring_idx,
544 lower_32_bits(ring->ring_mem_dma),
545 upper_32_bits(ring->ring_mem_dma),
546 ring->size,
547 ring->mode,
548 ring->elm_size,
550 if (ret)
551 dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
552 ret, ring_idx);
554 return ret;
557 int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
559 struct k3_ringacc *ringacc = ring->parent;
560 int ret = 0;
562 if (!ring || !cfg)
563 return -EINVAL;
564 if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
565 cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
566 cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
567 !test_bit(ring->ring_id, ringacc->rings_inuse))
568 return -EINVAL;
570 if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE &&
571 ring->proxy_id == K3_RINGACC_PROXY_NOT_USED &&
572 cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) {
573 dev_err(ringacc->dev,
574 "Message mode must use proxy for %u element size\n",
575 4 << ring->elm_size);
576 return -EINVAL;
580 * In case of shared ring only the first user (master user) can
581 * configure the ring. The sequence should be by the client:
582 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
583 * k3_ringacc_ring_cfg(ring, cfg); # master configuration
584 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
585 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
587 if (ring->use_count != 1)
588 return 0;
590 ring->size = cfg->size;
591 ring->elm_size = cfg->elm_size;
592 ring->mode = cfg->mode;
593 ring->occ = 0;
594 ring->free = 0;
595 ring->rindex = 0;
596 ring->windex = 0;
598 if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED)
599 ring->proxy = ringacc->proxy_target_base +
600 ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP;
602 switch (ring->mode) {
603 case K3_RINGACC_RING_MODE_RING:
604 ring->ops = &k3_ring_mode_ring_ops;
605 break;
606 case K3_RINGACC_RING_MODE_MESSAGE:
607 if (ring->proxy)
608 ring->ops = &k3_ring_mode_proxy_ops;
609 else
610 ring->ops = &k3_ring_mode_msg_ops;
611 break;
612 default:
613 ring->ops = NULL;
614 ret = -EINVAL;
615 goto err_free_proxy;
618 ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
619 ring->size * (4 << ring->elm_size),
620 &ring->ring_mem_dma, GFP_KERNEL);
621 if (!ring->ring_mem_virt) {
622 dev_err(ringacc->dev, "Failed to alloc ring mem\n");
623 ret = -ENOMEM;
624 goto err_free_ops;
627 ret = k3_ringacc_ring_cfg_sci(ring);
629 if (ret)
630 goto err_free_mem;
632 ring->flags |= K3_RING_FLAG_BUSY;
633 ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ?
634 K3_RING_FLAG_SHARED : 0;
636 k3_ringacc_ring_dump(ring);
638 return 0;
640 err_free_mem:
641 dma_free_coherent(ringacc->dev,
642 ring->size * (4 << ring->elm_size),
643 ring->ring_mem_virt,
644 ring->ring_mem_dma);
645 err_free_ops:
646 ring->ops = NULL;
647 err_free_proxy:
648 ring->proxy = NULL;
649 return ret;
651 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg);
653 u32 k3_ringacc_ring_get_size(struct k3_ring *ring)
655 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
656 return -EINVAL;
658 return ring->size;
660 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size);
662 u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
664 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
665 return -EINVAL;
667 if (!ring->free)
668 ring->free = ring->size - readl(&ring->rt->occ);
670 return ring->free;
672 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free);
674 u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
676 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
677 return -EINVAL;
679 return readl(&ring->rt->occ);
681 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
683 u32 k3_ringacc_ring_is_full(struct k3_ring *ring)
685 return !k3_ringacc_ring_get_free(ring);
687 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full);
689 enum k3_ringacc_access_mode {
690 K3_RINGACC_ACCESS_MODE_PUSH_HEAD,
691 K3_RINGACC_ACCESS_MODE_POP_HEAD,
692 K3_RINGACC_ACCESS_MODE_PUSH_TAIL,
693 K3_RINGACC_ACCESS_MODE_POP_TAIL,
694 K3_RINGACC_ACCESS_MODE_PEEK_HEAD,
695 K3_RINGACC_ACCESS_MODE_PEEK_TAIL,
698 #define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
699 #define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
700 static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring,
701 enum k3_ringacc_proxy_access_mode mode)
703 u32 val;
705 val = ring->ring_id;
706 val |= K3_RINGACC_PROXY_MODE(mode);
707 val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size);
708 writel(val, &ring->proxy->control);
709 return 0;
712 static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem,
713 enum k3_ringacc_access_mode access_mode)
715 void __iomem *ptr;
717 ptr = (void __iomem *)&ring->proxy->data;
719 switch (access_mode) {
720 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
721 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
722 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD);
723 break;
724 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
725 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
726 k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL);
727 break;
728 default:
729 return -EINVAL;
732 ptr += k3_ringacc_ring_get_fifo_pos(ring);
734 switch (access_mode) {
735 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
736 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
737 dev_dbg(ring->parent->dev,
738 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
739 access_mode);
740 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
741 ring->occ--;
742 break;
743 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
744 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
745 dev_dbg(ring->parent->dev,
746 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
747 access_mode);
748 memcpy_toio(ptr, elem, (4 << ring->elm_size));
749 ring->free--;
750 break;
751 default:
752 return -EINVAL;
755 dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free,
756 ring->occ);
757 return 0;
760 static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem)
762 return k3_ringacc_ring_access_proxy(ring, elem,
763 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
766 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem)
768 return k3_ringacc_ring_access_proxy(ring, elem,
769 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
772 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem)
774 return k3_ringacc_ring_access_proxy(ring, elem,
775 K3_RINGACC_ACCESS_MODE_POP_HEAD);
778 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem)
780 return k3_ringacc_ring_access_proxy(ring, elem,
781 K3_RINGACC_ACCESS_MODE_POP_HEAD);
784 static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem,
785 enum k3_ringacc_access_mode access_mode)
787 void __iomem *ptr;
789 switch (access_mode) {
790 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
791 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
792 ptr = (void __iomem *)&ring->fifos->head_data;
793 break;
794 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
795 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
796 ptr = (void __iomem *)&ring->fifos->tail_data;
797 break;
798 default:
799 return -EINVAL;
802 ptr += k3_ringacc_ring_get_fifo_pos(ring);
804 switch (access_mode) {
805 case K3_RINGACC_ACCESS_MODE_POP_HEAD:
806 case K3_RINGACC_ACCESS_MODE_POP_TAIL:
807 dev_dbg(ring->parent->dev,
808 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr,
809 access_mode);
810 memcpy_fromio(elem, ptr, (4 << ring->elm_size));
811 ring->occ--;
812 break;
813 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL:
814 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD:
815 dev_dbg(ring->parent->dev,
816 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr,
817 access_mode);
818 memcpy_toio(ptr, elem, (4 << ring->elm_size));
819 ring->free--;
820 break;
821 default:
822 return -EINVAL;
825 dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free,
826 ring->windex, ring->occ, ring->rindex);
827 return 0;
830 static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem)
832 return k3_ringacc_ring_access_io(ring, elem,
833 K3_RINGACC_ACCESS_MODE_PUSH_HEAD);
836 static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem)
838 return k3_ringacc_ring_access_io(ring, elem,
839 K3_RINGACC_ACCESS_MODE_PUSH_TAIL);
842 static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem)
844 return k3_ringacc_ring_access_io(ring, elem,
845 K3_RINGACC_ACCESS_MODE_POP_HEAD);
848 static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
850 return k3_ringacc_ring_access_io(ring, elem,
851 K3_RINGACC_ACCESS_MODE_POP_HEAD);
854 static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
856 void *elem_ptr;
858 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex);
860 memcpy(elem_ptr, elem, (4 << ring->elm_size));
862 ring->windex = (ring->windex + 1) % ring->size;
863 ring->free--;
864 writel(1, &ring->rt->db);
866 dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n",
867 ring->free, ring->windex);
869 return 0;
872 static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem)
874 void *elem_ptr;
876 elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex);
878 memcpy(elem, elem_ptr, (4 << ring->elm_size));
880 ring->rindex = (ring->rindex + 1) % ring->size;
881 ring->occ--;
882 writel(-1, &ring->rt->db);
884 dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
885 ring->occ, ring->rindex, elem_ptr);
886 return 0;
889 int k3_ringacc_ring_push(struct k3_ring *ring, void *elem)
891 int ret = -EOPNOTSUPP;
893 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
894 return -EINVAL;
896 dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free,
897 ring->windex);
899 if (k3_ringacc_ring_is_full(ring))
900 return -ENOMEM;
902 if (ring->ops && ring->ops->push_tail)
903 ret = ring->ops->push_tail(ring, elem);
905 return ret;
907 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push);
909 int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem)
911 int ret = -EOPNOTSUPP;
913 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
914 return -EINVAL;
916 dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n",
917 ring->free, ring->windex);
919 if (k3_ringacc_ring_is_full(ring))
920 return -ENOMEM;
922 if (ring->ops && ring->ops->push_head)
923 ret = ring->ops->push_head(ring, elem);
925 return ret;
927 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head);
929 int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
931 int ret = -EOPNOTSUPP;
933 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
934 return -EINVAL;
936 if (!ring->occ)
937 ring->occ = k3_ringacc_ring_get_occ(ring);
939 dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ,
940 ring->rindex);
942 if (!ring->occ)
943 return -ENODATA;
945 if (ring->ops && ring->ops->pop_head)
946 ret = ring->ops->pop_head(ring, elem);
948 return ret;
950 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop);
952 int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
954 int ret = -EOPNOTSUPP;
956 if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
957 return -EINVAL;
959 if (!ring->occ)
960 ring->occ = k3_ringacc_ring_get_occ(ring);
962 dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ,
963 ring->rindex);
965 if (!ring->occ)
966 return -ENODATA;
968 if (ring->ops && ring->ops->pop_tail)
969 ret = ring->ops->pop_tail(ring, elem);
971 return ret;
973 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail);
975 struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
976 const char *property)
978 struct device_node *ringacc_np;
979 struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER);
980 struct k3_ringacc *entry;
982 ringacc_np = of_parse_phandle(np, property, 0);
983 if (!ringacc_np)
984 return ERR_PTR(-ENODEV);
986 mutex_lock(&k3_ringacc_list_lock);
987 list_for_each_entry(entry, &k3_ringacc_list, list)
988 if (entry->dev->of_node == ringacc_np) {
989 ringacc = entry;
990 break;
992 mutex_unlock(&k3_ringacc_list_lock);
993 of_node_put(ringacc_np);
995 return ringacc;
997 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle);
999 static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc)
1001 struct device_node *node = ringacc->dev->of_node;
1002 struct device *dev = ringacc->dev;
1003 struct platform_device *pdev = to_platform_device(dev);
1004 int ret;
1006 if (!node) {
1007 dev_err(dev, "device tree info unavailable\n");
1008 return -ENODEV;
1011 ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings);
1012 if (ret) {
1013 dev_err(dev, "ti,num-rings read failure %d\n", ret);
1014 return ret;
1017 ringacc->dma_ring_reset_quirk =
1018 of_property_read_bool(node, "ti,dma-ring-reset-quirk");
1020 ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci");
1021 if (IS_ERR(ringacc->tisci)) {
1022 ret = PTR_ERR(ringacc->tisci);
1023 if (ret != -EPROBE_DEFER)
1024 dev_err(dev, "ti,sci read fail %d\n", ret);
1025 ringacc->tisci = NULL;
1026 return ret;
1029 ret = of_property_read_u32(node, "ti,sci-dev-id",
1030 &ringacc->tisci_dev_id);
1031 if (ret) {
1032 dev_err(dev, "ti,sci-dev-id read fail %d\n", ret);
1033 return ret;
1036 pdev->id = ringacc->tisci_dev_id;
1038 ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev,
1039 ringacc->tisci_dev_id,
1040 "ti,sci-rm-range-gp-rings");
1041 if (IS_ERR(ringacc->rm_gp_range)) {
1042 dev_err(dev, "Failed to allocate MSI interrupts\n");
1043 return PTR_ERR(ringacc->rm_gp_range);
1046 return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev,
1047 ringacc->rm_gp_range);
1050 static int k3_ringacc_probe(struct platform_device *pdev)
1052 struct k3_ringacc *ringacc;
1053 void __iomem *base_fifo, *base_rt;
1054 struct device *dev = &pdev->dev;
1055 struct resource *res;
1056 int ret, i;
1058 ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
1059 if (!ringacc)
1060 return -ENOMEM;
1062 ringacc->dev = dev;
1063 mutex_init(&ringacc->req_lock);
1065 dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
1066 DOMAIN_BUS_TI_SCI_INTA_MSI);
1067 if (!dev->msi_domain) {
1068 dev_err(dev, "Failed to get MSI domain\n");
1069 return -EPROBE_DEFER;
1072 ret = k3_ringacc_probe_dt(ringacc);
1073 if (ret)
1074 return ret;
1076 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt");
1077 base_rt = devm_ioremap_resource(dev, res);
1078 if (IS_ERR(base_rt))
1079 return PTR_ERR(base_rt);
1081 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos");
1082 base_fifo = devm_ioremap_resource(dev, res);
1083 if (IS_ERR(base_fifo))
1084 return PTR_ERR(base_fifo);
1086 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg");
1087 ringacc->proxy_gcfg = devm_ioremap_resource(dev, res);
1088 if (IS_ERR(ringacc->proxy_gcfg))
1089 return PTR_ERR(ringacc->proxy_gcfg);
1091 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1092 "proxy_target");
1093 ringacc->proxy_target_base = devm_ioremap_resource(dev, res);
1094 if (IS_ERR(ringacc->proxy_target_base))
1095 return PTR_ERR(ringacc->proxy_target_base);
1097 ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) &
1098 K3_RINGACC_PROXY_CFG_THREADS_MASK;
1100 ringacc->rings = devm_kzalloc(dev,
1101 sizeof(*ringacc->rings) *
1102 ringacc->num_rings,
1103 GFP_KERNEL);
1104 ringacc->rings_inuse = devm_kcalloc(dev,
1105 BITS_TO_LONGS(ringacc->num_rings),
1106 sizeof(unsigned long), GFP_KERNEL);
1107 ringacc->proxy_inuse = devm_kcalloc(dev,
1108 BITS_TO_LONGS(ringacc->num_proxies),
1109 sizeof(unsigned long), GFP_KERNEL);
1111 if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse)
1112 return -ENOMEM;
1114 for (i = 0; i < ringacc->num_rings; i++) {
1115 ringacc->rings[i].rt = base_rt +
1116 K3_RINGACC_RT_REGS_STEP * i;
1117 ringacc->rings[i].fifos = base_fifo +
1118 K3_RINGACC_FIFO_REGS_STEP * i;
1119 ringacc->rings[i].parent = ringacc;
1120 ringacc->rings[i].ring_id = i;
1121 ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED;
1123 dev_set_drvdata(dev, ringacc);
1125 ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
1127 mutex_lock(&k3_ringacc_list_lock);
1128 list_add_tail(&ringacc->list, &k3_ringacc_list);
1129 mutex_unlock(&k3_ringacc_list_lock);
1131 dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1132 ringacc->num_rings,
1133 ringacc->rm_gp_range->desc[0].start,
1134 ringacc->rm_gp_range->desc[0].num,
1135 ringacc->tisci_dev_id);
1136 dev_info(dev, "dma-ring-reset-quirk: %s\n",
1137 ringacc->dma_ring_reset_quirk ? "enabled" : "disabled");
1138 dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n",
1139 readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies);
1140 return 0;
1143 /* Match table for of_platform binding */
1144 static const struct of_device_id k3_ringacc_of_match[] = {
1145 { .compatible = "ti,am654-navss-ringacc", },
1149 static struct platform_driver k3_ringacc_driver = {
1150 .probe = k3_ringacc_probe,
1151 .driver = {
1152 .name = "k3-ringacc",
1153 .of_match_table = k3_ringacc_of_match,
1154 .suppress_bind_attrs = true,
1157 builtin_platform_driver(k3_ringacc_driver);