1 // SPDX-License-Identifier: GPL-2.0
3 * TI K3 NAVSS Ring Accelerator subsystem driver
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
8 #include <linux/dma-mapping.h>
10 #include <linux/init.h>
12 #include <linux/platform_device.h>
13 #include <linux/soc/ti/k3-ringacc.h>
14 #include <linux/soc/ti/ti_sci_protocol.h>
15 #include <linux/soc/ti/ti_sci_inta_msi.h>
16 #include <linux/of_irq.h>
17 #include <linux/irqdomain.h>
19 static LIST_HEAD(k3_ringacc_list
);
20 static DEFINE_MUTEX(k3_ringacc_list_lock
);
22 #define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
25 * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
28 * @db: Ring Doorbell Register
30 * @occ: Ring Occupancy Register
31 * @indx: Ring Current Index Register
32 * @hwocc: Ring Hardware Occupancy Register
33 * @hwindx: Ring Hardware Current Index Register
35 struct k3_ring_rt_regs
{
45 #define K3_RINGACC_RT_REGS_STEP 0x1000
48 * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
50 * @head_data: Ring Head Entry Data Registers
51 * @tail_data: Ring Tail Entry Data Registers
52 * @peek_head_data: Ring Peek Head Entry Data Regs
53 * @peek_tail_data: Ring Peek Tail Entry Data Regs
55 struct k3_ring_fifo_regs
{
58 u32 peek_head_data
[128];
59 u32 peek_tail_data
[128];
63 * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region
65 * @revision: Revision Register
66 * @config: Config Register
68 struct k3_ringacc_proxy_gcfg_regs
{
73 #define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0)
76 * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region
78 * @control: Proxy Control Register
79 * @status: Proxy Status Register
81 * @data: Proxy Data Register
83 struct k3_ringacc_proxy_target_regs
{
90 #define K3_RINGACC_PROXY_TARGET_STEP 0x1000
91 #define K3_RINGACC_PROXY_NOT_USED (-1)
93 enum k3_ringacc_proxy_access_mode
{
94 PROXY_ACCESS_MODE_HEAD
= 0,
95 PROXY_ACCESS_MODE_TAIL
= 1,
96 PROXY_ACCESS_MODE_PEEK_HEAD
= 2,
97 PROXY_ACCESS_MODE_PEEK_TAIL
= 3,
100 #define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U)
101 #define K3_RINGACC_FIFO_REGS_STEP 0x1000
102 #define K3_RINGACC_MAX_DB_RING_CNT (127U)
105 int (*push_tail
)(struct k3_ring
*ring
, void *elm
);
106 int (*push_head
)(struct k3_ring
*ring
, void *elm
);
107 int (*pop_tail
)(struct k3_ring
*ring
, void *elm
);
108 int (*pop_head
)(struct k3_ring
*ring
, void *elm
);
112 * struct k3_ring - RA Ring descriptor
114 * @rt: Ring control/status registers
115 * @fifos: Ring queues registers
116 * @proxy: Ring Proxy Datapath registers
117 * @ring_mem_dma: Ring buffer dma address
118 * @ring_mem_virt: Ring buffer virt address
119 * @ops: Ring operations
120 * @size: Ring size in elements
121 * @elm_size: Size of the ring element
124 * @free: Number of free elements
125 * @occ: Ring occupancy
126 * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING)
127 * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING)
129 * @parent: Pointer on struct @k3_ringacc
130 * @use_count: Use count for shared rings
131 * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
134 struct k3_ring_rt_regs __iomem
*rt
;
135 struct k3_ring_fifo_regs __iomem
*fifos
;
136 struct k3_ringacc_proxy_target_regs __iomem
*proxy
;
137 dma_addr_t ring_mem_dma
;
139 struct k3_ring_ops
*ops
;
141 enum k3_ring_size elm_size
;
142 enum k3_ring_mode mode
;
144 #define K3_RING_FLAG_BUSY BIT(1)
145 #define K3_RING_FLAG_SHARED BIT(2)
151 struct k3_ringacc
*parent
;
157 * struct k3_ringacc - Rings accelerator descriptor
159 * @dev: pointer on RA device
160 * @proxy_gcfg: RA proxy global config registers
161 * @proxy_target_base: RA proxy datapath region
162 * @num_rings: number of ring in RA
163 * @rings_inuse: bitfield for ring usage tracking
164 * @rm_gp_range: general purpose rings range from tisci
165 * @dma_ring_reset_quirk: DMA reset w/a enable
166 * @num_proxies: number of RA proxies
167 * @proxy_inuse: bitfield for proxy usage tracking
168 * @rings: array of rings descriptors (struct @k3_ring)
169 * @list: list of RAs in the system
170 * @req_lock: protect rings allocation
171 * @tisci: pointer ti-sci handle
172 * @tisci_ring_ops: ti-sci rings ops
173 * @tisci_dev_id: ti-sci device id
177 struct k3_ringacc_proxy_gcfg_regs __iomem
*proxy_gcfg
;
178 void __iomem
*proxy_target_base
;
179 u32 num_rings
; /* number of rings in Ringacc module */
180 unsigned long *rings_inuse
;
181 struct ti_sci_resource
*rm_gp_range
;
183 bool dma_ring_reset_quirk
;
185 unsigned long *proxy_inuse
;
187 struct k3_ring
*rings
;
188 struct list_head list
;
189 struct mutex req_lock
; /* protect rings allocation */
191 const struct ti_sci_handle
*tisci
;
192 const struct ti_sci_rm_ringacc_ops
*tisci_ring_ops
;
196 static long k3_ringacc_ring_get_fifo_pos(struct k3_ring
*ring
)
198 return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES
-
199 (4 << ring
->elm_size
);
202 static void *k3_ringacc_get_elm_addr(struct k3_ring
*ring
, u32 idx
)
204 return (ring
->ring_mem_virt
+ idx
* (4 << ring
->elm_size
));
207 static int k3_ringacc_ring_push_mem(struct k3_ring
*ring
, void *elem
);
208 static int k3_ringacc_ring_pop_mem(struct k3_ring
*ring
, void *elem
);
210 static struct k3_ring_ops k3_ring_mode_ring_ops
= {
211 .push_tail
= k3_ringacc_ring_push_mem
,
212 .pop_head
= k3_ringacc_ring_pop_mem
,
215 static int k3_ringacc_ring_push_io(struct k3_ring
*ring
, void *elem
);
216 static int k3_ringacc_ring_pop_io(struct k3_ring
*ring
, void *elem
);
217 static int k3_ringacc_ring_push_head_io(struct k3_ring
*ring
, void *elem
);
218 static int k3_ringacc_ring_pop_tail_io(struct k3_ring
*ring
, void *elem
);
220 static struct k3_ring_ops k3_ring_mode_msg_ops
= {
221 .push_tail
= k3_ringacc_ring_push_io
,
222 .push_head
= k3_ringacc_ring_push_head_io
,
223 .pop_tail
= k3_ringacc_ring_pop_tail_io
,
224 .pop_head
= k3_ringacc_ring_pop_io
,
227 static int k3_ringacc_ring_push_head_proxy(struct k3_ring
*ring
, void *elem
);
228 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring
*ring
, void *elem
);
229 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring
*ring
, void *elem
);
230 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring
*ring
, void *elem
);
232 static struct k3_ring_ops k3_ring_mode_proxy_ops
= {
233 .push_tail
= k3_ringacc_ring_push_tail_proxy
,
234 .push_head
= k3_ringacc_ring_push_head_proxy
,
235 .pop_tail
= k3_ringacc_ring_pop_tail_proxy
,
236 .pop_head
= k3_ringacc_ring_pop_head_proxy
,
239 static void k3_ringacc_ring_dump(struct k3_ring
*ring
)
241 struct device
*dev
= ring
->parent
->dev
;
243 dev_dbg(dev
, "dump ring: %d\n", ring
->ring_id
);
244 dev_dbg(dev
, "dump mem virt %p, dma %pad\n", ring
->ring_mem_virt
,
245 &ring
->ring_mem_dma
);
246 dev_dbg(dev
, "dump elmsize %d, size %d, mode %d, proxy_id %d\n",
247 ring
->elm_size
, ring
->size
, ring
->mode
, ring
->proxy_id
);
249 dev_dbg(dev
, "dump ring_rt_regs: db%08x\n", readl(&ring
->rt
->db
));
250 dev_dbg(dev
, "dump occ%08x\n", readl(&ring
->rt
->occ
));
251 dev_dbg(dev
, "dump indx%08x\n", readl(&ring
->rt
->indx
));
252 dev_dbg(dev
, "dump hwocc%08x\n", readl(&ring
->rt
->hwocc
));
253 dev_dbg(dev
, "dump hwindx%08x\n", readl(&ring
->rt
->hwindx
));
255 if (ring
->ring_mem_virt
)
256 print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE
,
257 16, 1, ring
->ring_mem_virt
, 16 * 8, false);
260 struct k3_ring
*k3_ringacc_request_ring(struct k3_ringacc
*ringacc
,
263 int proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
265 mutex_lock(&ringacc
->req_lock
);
267 if (id
== K3_RINGACC_RING_ID_ANY
) {
268 /* Request for any general purpose ring */
269 struct ti_sci_resource_desc
*gp_rings
=
270 &ringacc
->rm_gp_range
->desc
[0];
273 size
= gp_rings
->start
+ gp_rings
->num
;
274 id
= find_next_zero_bit(ringacc
->rings_inuse
, size
,
282 if (test_bit(id
, ringacc
->rings_inuse
) &&
283 !(ringacc
->rings
[id
].flags
& K3_RING_FLAG_SHARED
))
285 else if (ringacc
->rings
[id
].flags
& K3_RING_FLAG_SHARED
)
288 if (flags
& K3_RINGACC_RING_USE_PROXY
) {
289 proxy_id
= find_next_zero_bit(ringacc
->proxy_inuse
,
290 ringacc
->num_proxies
, 0);
291 if (proxy_id
== ringacc
->num_proxies
)
295 if (proxy_id
!= K3_RINGACC_PROXY_NOT_USED
) {
296 set_bit(proxy_id
, ringacc
->proxy_inuse
);
297 ringacc
->rings
[id
].proxy_id
= proxy_id
;
298 dev_dbg(ringacc
->dev
, "Giving ring#%d proxy#%d\n", id
,
301 dev_dbg(ringacc
->dev
, "Giving ring#%d\n", id
);
304 set_bit(id
, ringacc
->rings_inuse
);
306 ringacc
->rings
[id
].use_count
++;
307 mutex_unlock(&ringacc
->req_lock
);
308 return &ringacc
->rings
[id
];
311 mutex_unlock(&ringacc
->req_lock
);
314 EXPORT_SYMBOL_GPL(k3_ringacc_request_ring
);
316 static void k3_ringacc_ring_reset_sci(struct k3_ring
*ring
)
318 struct k3_ringacc
*ringacc
= ring
->parent
;
321 ret
= ringacc
->tisci_ring_ops
->config(
323 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID
,
324 ringacc
->tisci_dev_id
,
333 dev_err(ringacc
->dev
, "TISCI reset ring fail (%d) ring_idx %d\n",
337 void k3_ringacc_ring_reset(struct k3_ring
*ring
)
339 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
347 k3_ringacc_ring_reset_sci(ring
);
349 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset
);
351 static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring
*ring
,
352 enum k3_ring_mode mode
)
354 struct k3_ringacc
*ringacc
= ring
->parent
;
357 ret
= ringacc
->tisci_ring_ops
->config(
359 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID
,
360 ringacc
->tisci_dev_id
,
369 dev_err(ringacc
->dev
, "TISCI reconf qmode fail (%d) ring_idx %d\n",
373 void k3_ringacc_ring_reset_dma(struct k3_ring
*ring
, u32 occ
)
375 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
378 if (!ring
->parent
->dma_ring_reset_quirk
)
382 occ
= readl(&ring
->rt
->occ
);
385 u32 db_ring_cnt
, db_ring_cnt_cur
;
387 dev_dbg(ring
->parent
->dev
, "%s %u occ: %u\n", __func__
,
389 /* TI-SCI ring reset */
390 k3_ringacc_ring_reset_sci(ring
);
393 * Setup the ring in ring/doorbell mode (if not already in this
396 if (ring
->mode
!= K3_RINGACC_RING_MODE_RING
)
397 k3_ringacc_ring_reconfig_qmode_sci(
398 ring
, K3_RINGACC_RING_MODE_RING
);
400 * Ring the doorbell 2**22 – ringOcc times.
401 * This will wrap the internal UDMAP ring state occupancy
402 * counter (which is 21-bits wide) to 0.
404 db_ring_cnt
= (1U << 22) - occ
;
406 while (db_ring_cnt
!= 0) {
408 * Ring the doorbell with the maximum count each
409 * iteration if possible to minimize the total
412 if (db_ring_cnt
> K3_RINGACC_MAX_DB_RING_CNT
)
413 db_ring_cnt_cur
= K3_RINGACC_MAX_DB_RING_CNT
;
415 db_ring_cnt_cur
= db_ring_cnt
;
417 writel(db_ring_cnt_cur
, &ring
->rt
->db
);
418 db_ring_cnt
-= db_ring_cnt_cur
;
421 /* Restore the original ring mode (if not ring mode) */
422 if (ring
->mode
!= K3_RINGACC_RING_MODE_RING
)
423 k3_ringacc_ring_reconfig_qmode_sci(ring
, ring
->mode
);
428 k3_ringacc_ring_reset(ring
);
430 EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma
);
432 static void k3_ringacc_ring_free_sci(struct k3_ring
*ring
)
434 struct k3_ringacc
*ringacc
= ring
->parent
;
437 ret
= ringacc
->tisci_ring_ops
->config(
439 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER
,
440 ringacc
->tisci_dev_id
,
449 dev_err(ringacc
->dev
, "TISCI ring free fail (%d) ring_idx %d\n",
453 int k3_ringacc_ring_free(struct k3_ring
*ring
)
455 struct k3_ringacc
*ringacc
;
460 ringacc
= ring
->parent
;
462 dev_dbg(ring
->parent
->dev
, "flags: 0x%08x\n", ring
->flags
);
464 if (!test_bit(ring
->ring_id
, ringacc
->rings_inuse
))
467 mutex_lock(&ringacc
->req_lock
);
469 if (--ring
->use_count
)
472 if (!(ring
->flags
& K3_RING_FLAG_BUSY
))
475 k3_ringacc_ring_free_sci(ring
);
477 dma_free_coherent(ringacc
->dev
,
478 ring
->size
* (4 << ring
->elm_size
),
479 ring
->ring_mem_virt
, ring
->ring_mem_dma
);
482 if (ring
->proxy_id
!= K3_RINGACC_PROXY_NOT_USED
) {
483 clear_bit(ring
->proxy_id
, ringacc
->proxy_inuse
);
485 ring
->proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
489 clear_bit(ring
->ring_id
, ringacc
->rings_inuse
);
492 mutex_unlock(&ringacc
->req_lock
);
495 EXPORT_SYMBOL_GPL(k3_ringacc_ring_free
);
497 u32
k3_ringacc_get_ring_id(struct k3_ring
*ring
)
502 return ring
->ring_id
;
504 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id
);
506 u32
k3_ringacc_get_tisci_dev_id(struct k3_ring
*ring
)
511 return ring
->parent
->tisci_dev_id
;
513 EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id
);
515 int k3_ringacc_get_ring_irq_num(struct k3_ring
*ring
)
522 irq_num
= ti_sci_inta_msi_get_virq(ring
->parent
->dev
, ring
->ring_id
);
527 EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num
);
529 static int k3_ringacc_ring_cfg_sci(struct k3_ring
*ring
)
531 struct k3_ringacc
*ringacc
= ring
->parent
;
538 ring_idx
= ring
->ring_id
;
539 ret
= ringacc
->tisci_ring_ops
->config(
541 TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER
,
542 ringacc
->tisci_dev_id
,
544 lower_32_bits(ring
->ring_mem_dma
),
545 upper_32_bits(ring
->ring_mem_dma
),
551 dev_err(ringacc
->dev
, "TISCI config ring fail (%d) ring_idx %d\n",
557 int k3_ringacc_ring_cfg(struct k3_ring
*ring
, struct k3_ring_cfg
*cfg
)
559 struct k3_ringacc
*ringacc
= ring
->parent
;
564 if (cfg
->elm_size
> K3_RINGACC_RING_ELSIZE_256
||
565 cfg
->mode
>= K3_RINGACC_RING_MODE_INVALID
||
566 cfg
->size
& ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK
||
567 !test_bit(ring
->ring_id
, ringacc
->rings_inuse
))
570 if (cfg
->mode
== K3_RINGACC_RING_MODE_MESSAGE
&&
571 ring
->proxy_id
== K3_RINGACC_PROXY_NOT_USED
&&
572 cfg
->elm_size
> K3_RINGACC_RING_ELSIZE_8
) {
573 dev_err(ringacc
->dev
,
574 "Message mode must use proxy for %u element size\n",
575 4 << ring
->elm_size
);
580 * In case of shared ring only the first user (master user) can
581 * configure the ring. The sequence should be by the client:
582 * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user
583 * k3_ringacc_ring_cfg(ring, cfg); # master configuration
584 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
585 * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED);
587 if (ring
->use_count
!= 1)
590 ring
->size
= cfg
->size
;
591 ring
->elm_size
= cfg
->elm_size
;
592 ring
->mode
= cfg
->mode
;
598 if (ring
->proxy_id
!= K3_RINGACC_PROXY_NOT_USED
)
599 ring
->proxy
= ringacc
->proxy_target_base
+
600 ring
->proxy_id
* K3_RINGACC_PROXY_TARGET_STEP
;
602 switch (ring
->mode
) {
603 case K3_RINGACC_RING_MODE_RING
:
604 ring
->ops
= &k3_ring_mode_ring_ops
;
606 case K3_RINGACC_RING_MODE_MESSAGE
:
608 ring
->ops
= &k3_ring_mode_proxy_ops
;
610 ring
->ops
= &k3_ring_mode_msg_ops
;
618 ring
->ring_mem_virt
= dma_alloc_coherent(ringacc
->dev
,
619 ring
->size
* (4 << ring
->elm_size
),
620 &ring
->ring_mem_dma
, GFP_KERNEL
);
621 if (!ring
->ring_mem_virt
) {
622 dev_err(ringacc
->dev
, "Failed to alloc ring mem\n");
627 ret
= k3_ringacc_ring_cfg_sci(ring
);
632 ring
->flags
|= K3_RING_FLAG_BUSY
;
633 ring
->flags
|= (cfg
->flags
& K3_RINGACC_RING_SHARED
) ?
634 K3_RING_FLAG_SHARED
: 0;
636 k3_ringacc_ring_dump(ring
);
641 dma_free_coherent(ringacc
->dev
,
642 ring
->size
* (4 << ring
->elm_size
),
651 EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg
);
653 u32
k3_ringacc_ring_get_size(struct k3_ring
*ring
)
655 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
660 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size
);
662 u32
k3_ringacc_ring_get_free(struct k3_ring
*ring
)
664 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
668 ring
->free
= ring
->size
- readl(&ring
->rt
->occ
);
672 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free
);
674 u32
k3_ringacc_ring_get_occ(struct k3_ring
*ring
)
676 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
679 return readl(&ring
->rt
->occ
);
681 EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ
);
683 u32
k3_ringacc_ring_is_full(struct k3_ring
*ring
)
685 return !k3_ringacc_ring_get_free(ring
);
687 EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full
);
689 enum k3_ringacc_access_mode
{
690 K3_RINGACC_ACCESS_MODE_PUSH_HEAD
,
691 K3_RINGACC_ACCESS_MODE_POP_HEAD
,
692 K3_RINGACC_ACCESS_MODE_PUSH_TAIL
,
693 K3_RINGACC_ACCESS_MODE_POP_TAIL
,
694 K3_RINGACC_ACCESS_MODE_PEEK_HEAD
,
695 K3_RINGACC_ACCESS_MODE_PEEK_TAIL
,
698 #define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16)
699 #define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24)
700 static int k3_ringacc_ring_cfg_proxy(struct k3_ring
*ring
,
701 enum k3_ringacc_proxy_access_mode mode
)
706 val
|= K3_RINGACC_PROXY_MODE(mode
);
707 val
|= K3_RINGACC_PROXY_ELSIZE(ring
->elm_size
);
708 writel(val
, &ring
->proxy
->control
);
712 static int k3_ringacc_ring_access_proxy(struct k3_ring
*ring
, void *elem
,
713 enum k3_ringacc_access_mode access_mode
)
717 ptr
= (void __iomem
*)&ring
->proxy
->data
;
719 switch (access_mode
) {
720 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
721 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
722 k3_ringacc_ring_cfg_proxy(ring
, PROXY_ACCESS_MODE_HEAD
);
724 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
725 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
726 k3_ringacc_ring_cfg_proxy(ring
, PROXY_ACCESS_MODE_TAIL
);
732 ptr
+= k3_ringacc_ring_get_fifo_pos(ring
);
734 switch (access_mode
) {
735 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
736 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
737 dev_dbg(ring
->parent
->dev
,
738 "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr
,
740 memcpy_fromio(elem
, ptr
, (4 << ring
->elm_size
));
743 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
744 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
745 dev_dbg(ring
->parent
->dev
,
746 "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr
,
748 memcpy_toio(ptr
, elem
, (4 << ring
->elm_size
));
755 dev_dbg(ring
->parent
->dev
, "proxy: free%d occ%d\n", ring
->free
,
760 static int k3_ringacc_ring_push_head_proxy(struct k3_ring
*ring
, void *elem
)
762 return k3_ringacc_ring_access_proxy(ring
, elem
,
763 K3_RINGACC_ACCESS_MODE_PUSH_HEAD
);
766 static int k3_ringacc_ring_push_tail_proxy(struct k3_ring
*ring
, void *elem
)
768 return k3_ringacc_ring_access_proxy(ring
, elem
,
769 K3_RINGACC_ACCESS_MODE_PUSH_TAIL
);
772 static int k3_ringacc_ring_pop_head_proxy(struct k3_ring
*ring
, void *elem
)
774 return k3_ringacc_ring_access_proxy(ring
, elem
,
775 K3_RINGACC_ACCESS_MODE_POP_HEAD
);
778 static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring
*ring
, void *elem
)
780 return k3_ringacc_ring_access_proxy(ring
, elem
,
781 K3_RINGACC_ACCESS_MODE_POP_HEAD
);
784 static int k3_ringacc_ring_access_io(struct k3_ring
*ring
, void *elem
,
785 enum k3_ringacc_access_mode access_mode
)
789 switch (access_mode
) {
790 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
791 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
792 ptr
= (void __iomem
*)&ring
->fifos
->head_data
;
794 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
795 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
796 ptr
= (void __iomem
*)&ring
->fifos
->tail_data
;
802 ptr
+= k3_ringacc_ring_get_fifo_pos(ring
);
804 switch (access_mode
) {
805 case K3_RINGACC_ACCESS_MODE_POP_HEAD
:
806 case K3_RINGACC_ACCESS_MODE_POP_TAIL
:
807 dev_dbg(ring
->parent
->dev
,
808 "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr
,
810 memcpy_fromio(elem
, ptr
, (4 << ring
->elm_size
));
813 case K3_RINGACC_ACCESS_MODE_PUSH_TAIL
:
814 case K3_RINGACC_ACCESS_MODE_PUSH_HEAD
:
815 dev_dbg(ring
->parent
->dev
,
816 "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr
,
818 memcpy_toio(ptr
, elem
, (4 << ring
->elm_size
));
825 dev_dbg(ring
->parent
->dev
, "free%d index%d occ%d index%d\n", ring
->free
,
826 ring
->windex
, ring
->occ
, ring
->rindex
);
830 static int k3_ringacc_ring_push_head_io(struct k3_ring
*ring
, void *elem
)
832 return k3_ringacc_ring_access_io(ring
, elem
,
833 K3_RINGACC_ACCESS_MODE_PUSH_HEAD
);
836 static int k3_ringacc_ring_push_io(struct k3_ring
*ring
, void *elem
)
838 return k3_ringacc_ring_access_io(ring
, elem
,
839 K3_RINGACC_ACCESS_MODE_PUSH_TAIL
);
842 static int k3_ringacc_ring_pop_io(struct k3_ring
*ring
, void *elem
)
844 return k3_ringacc_ring_access_io(ring
, elem
,
845 K3_RINGACC_ACCESS_MODE_POP_HEAD
);
848 static int k3_ringacc_ring_pop_tail_io(struct k3_ring
*ring
, void *elem
)
850 return k3_ringacc_ring_access_io(ring
, elem
,
851 K3_RINGACC_ACCESS_MODE_POP_HEAD
);
854 static int k3_ringacc_ring_push_mem(struct k3_ring
*ring
, void *elem
)
858 elem_ptr
= k3_ringacc_get_elm_addr(ring
, ring
->windex
);
860 memcpy(elem_ptr
, elem
, (4 << ring
->elm_size
));
862 ring
->windex
= (ring
->windex
+ 1) % ring
->size
;
864 writel(1, &ring
->rt
->db
);
866 dev_dbg(ring
->parent
->dev
, "ring_push_mem: free%d index%d\n",
867 ring
->free
, ring
->windex
);
872 static int k3_ringacc_ring_pop_mem(struct k3_ring
*ring
, void *elem
)
876 elem_ptr
= k3_ringacc_get_elm_addr(ring
, ring
->rindex
);
878 memcpy(elem
, elem_ptr
, (4 << ring
->elm_size
));
880 ring
->rindex
= (ring
->rindex
+ 1) % ring
->size
;
882 writel(-1, &ring
->rt
->db
);
884 dev_dbg(ring
->parent
->dev
, "ring_pop_mem: occ%d index%d pos_ptr%p\n",
885 ring
->occ
, ring
->rindex
, elem_ptr
);
889 int k3_ringacc_ring_push(struct k3_ring
*ring
, void *elem
)
891 int ret
= -EOPNOTSUPP
;
893 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
896 dev_dbg(ring
->parent
->dev
, "ring_push: free%d index%d\n", ring
->free
,
899 if (k3_ringacc_ring_is_full(ring
))
902 if (ring
->ops
&& ring
->ops
->push_tail
)
903 ret
= ring
->ops
->push_tail(ring
, elem
);
907 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push
);
909 int k3_ringacc_ring_push_head(struct k3_ring
*ring
, void *elem
)
911 int ret
= -EOPNOTSUPP
;
913 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
916 dev_dbg(ring
->parent
->dev
, "ring_push_head: free%d index%d\n",
917 ring
->free
, ring
->windex
);
919 if (k3_ringacc_ring_is_full(ring
))
922 if (ring
->ops
&& ring
->ops
->push_head
)
923 ret
= ring
->ops
->push_head(ring
, elem
);
927 EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head
);
929 int k3_ringacc_ring_pop(struct k3_ring
*ring
, void *elem
)
931 int ret
= -EOPNOTSUPP
;
933 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
937 ring
->occ
= k3_ringacc_ring_get_occ(ring
);
939 dev_dbg(ring
->parent
->dev
, "ring_pop: occ%d index%d\n", ring
->occ
,
945 if (ring
->ops
&& ring
->ops
->pop_head
)
946 ret
= ring
->ops
->pop_head(ring
, elem
);
950 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop
);
952 int k3_ringacc_ring_pop_tail(struct k3_ring
*ring
, void *elem
)
954 int ret
= -EOPNOTSUPP
;
956 if (!ring
|| !(ring
->flags
& K3_RING_FLAG_BUSY
))
960 ring
->occ
= k3_ringacc_ring_get_occ(ring
);
962 dev_dbg(ring
->parent
->dev
, "ring_pop_tail: occ%d index%d\n", ring
->occ
,
968 if (ring
->ops
&& ring
->ops
->pop_tail
)
969 ret
= ring
->ops
->pop_tail(ring
, elem
);
973 EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail
);
975 struct k3_ringacc
*of_k3_ringacc_get_by_phandle(struct device_node
*np
,
976 const char *property
)
978 struct device_node
*ringacc_np
;
979 struct k3_ringacc
*ringacc
= ERR_PTR(-EPROBE_DEFER
);
980 struct k3_ringacc
*entry
;
982 ringacc_np
= of_parse_phandle(np
, property
, 0);
984 return ERR_PTR(-ENODEV
);
986 mutex_lock(&k3_ringacc_list_lock
);
987 list_for_each_entry(entry
, &k3_ringacc_list
, list
)
988 if (entry
->dev
->of_node
== ringacc_np
) {
992 mutex_unlock(&k3_ringacc_list_lock
);
993 of_node_put(ringacc_np
);
997 EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle
);
999 static int k3_ringacc_probe_dt(struct k3_ringacc
*ringacc
)
1001 struct device_node
*node
= ringacc
->dev
->of_node
;
1002 struct device
*dev
= ringacc
->dev
;
1003 struct platform_device
*pdev
= to_platform_device(dev
);
1007 dev_err(dev
, "device tree info unavailable\n");
1011 ret
= of_property_read_u32(node
, "ti,num-rings", &ringacc
->num_rings
);
1013 dev_err(dev
, "ti,num-rings read failure %d\n", ret
);
1017 ringacc
->dma_ring_reset_quirk
=
1018 of_property_read_bool(node
, "ti,dma-ring-reset-quirk");
1020 ringacc
->tisci
= ti_sci_get_by_phandle(node
, "ti,sci");
1021 if (IS_ERR(ringacc
->tisci
)) {
1022 ret
= PTR_ERR(ringacc
->tisci
);
1023 if (ret
!= -EPROBE_DEFER
)
1024 dev_err(dev
, "ti,sci read fail %d\n", ret
);
1025 ringacc
->tisci
= NULL
;
1029 ret
= of_property_read_u32(node
, "ti,sci-dev-id",
1030 &ringacc
->tisci_dev_id
);
1032 dev_err(dev
, "ti,sci-dev-id read fail %d\n", ret
);
1036 pdev
->id
= ringacc
->tisci_dev_id
;
1038 ringacc
->rm_gp_range
= devm_ti_sci_get_of_resource(ringacc
->tisci
, dev
,
1039 ringacc
->tisci_dev_id
,
1040 "ti,sci-rm-range-gp-rings");
1041 if (IS_ERR(ringacc
->rm_gp_range
)) {
1042 dev_err(dev
, "Failed to allocate MSI interrupts\n");
1043 return PTR_ERR(ringacc
->rm_gp_range
);
1046 return ti_sci_inta_msi_domain_alloc_irqs(ringacc
->dev
,
1047 ringacc
->rm_gp_range
);
1050 static int k3_ringacc_probe(struct platform_device
*pdev
)
1052 struct k3_ringacc
*ringacc
;
1053 void __iomem
*base_fifo
, *base_rt
;
1054 struct device
*dev
= &pdev
->dev
;
1055 struct resource
*res
;
1058 ringacc
= devm_kzalloc(dev
, sizeof(*ringacc
), GFP_KERNEL
);
1063 mutex_init(&ringacc
->req_lock
);
1065 dev
->msi_domain
= of_msi_get_domain(dev
, dev
->of_node
,
1066 DOMAIN_BUS_TI_SCI_INTA_MSI
);
1067 if (!dev
->msi_domain
) {
1068 dev_err(dev
, "Failed to get MSI domain\n");
1069 return -EPROBE_DEFER
;
1072 ret
= k3_ringacc_probe_dt(ringacc
);
1076 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "rt");
1077 base_rt
= devm_ioremap_resource(dev
, res
);
1078 if (IS_ERR(base_rt
))
1079 return PTR_ERR(base_rt
);
1081 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "fifos");
1082 base_fifo
= devm_ioremap_resource(dev
, res
);
1083 if (IS_ERR(base_fifo
))
1084 return PTR_ERR(base_fifo
);
1086 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "proxy_gcfg");
1087 ringacc
->proxy_gcfg
= devm_ioremap_resource(dev
, res
);
1088 if (IS_ERR(ringacc
->proxy_gcfg
))
1089 return PTR_ERR(ringacc
->proxy_gcfg
);
1091 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1093 ringacc
->proxy_target_base
= devm_ioremap_resource(dev
, res
);
1094 if (IS_ERR(ringacc
->proxy_target_base
))
1095 return PTR_ERR(ringacc
->proxy_target_base
);
1097 ringacc
->num_proxies
= readl(&ringacc
->proxy_gcfg
->config
) &
1098 K3_RINGACC_PROXY_CFG_THREADS_MASK
;
1100 ringacc
->rings
= devm_kzalloc(dev
,
1101 sizeof(*ringacc
->rings
) *
1104 ringacc
->rings_inuse
= devm_kcalloc(dev
,
1105 BITS_TO_LONGS(ringacc
->num_rings
),
1106 sizeof(unsigned long), GFP_KERNEL
);
1107 ringacc
->proxy_inuse
= devm_kcalloc(dev
,
1108 BITS_TO_LONGS(ringacc
->num_proxies
),
1109 sizeof(unsigned long), GFP_KERNEL
);
1111 if (!ringacc
->rings
|| !ringacc
->rings_inuse
|| !ringacc
->proxy_inuse
)
1114 for (i
= 0; i
< ringacc
->num_rings
; i
++) {
1115 ringacc
->rings
[i
].rt
= base_rt
+
1116 K3_RINGACC_RT_REGS_STEP
* i
;
1117 ringacc
->rings
[i
].fifos
= base_fifo
+
1118 K3_RINGACC_FIFO_REGS_STEP
* i
;
1119 ringacc
->rings
[i
].parent
= ringacc
;
1120 ringacc
->rings
[i
].ring_id
= i
;
1121 ringacc
->rings
[i
].proxy_id
= K3_RINGACC_PROXY_NOT_USED
;
1123 dev_set_drvdata(dev
, ringacc
);
1125 ringacc
->tisci_ring_ops
= &ringacc
->tisci
->ops
.rm_ring_ops
;
1127 mutex_lock(&k3_ringacc_list_lock
);
1128 list_add_tail(&ringacc
->list
, &k3_ringacc_list
);
1129 mutex_unlock(&k3_ringacc_list_lock
);
1131 dev_info(dev
, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n",
1133 ringacc
->rm_gp_range
->desc
[0].start
,
1134 ringacc
->rm_gp_range
->desc
[0].num
,
1135 ringacc
->tisci_dev_id
);
1136 dev_info(dev
, "dma-ring-reset-quirk: %s\n",
1137 ringacc
->dma_ring_reset_quirk
? "enabled" : "disabled");
1138 dev_info(dev
, "RA Proxy rev. %08x, num_proxies:%u\n",
1139 readl(&ringacc
->proxy_gcfg
->revision
), ringacc
->num_proxies
);
1143 /* Match table for of_platform binding */
1144 static const struct of_device_id k3_ringacc_of_match
[] = {
1145 { .compatible
= "ti,am654-navss-ringacc", },
1149 static struct platform_driver k3_ringacc_driver
= {
1150 .probe
= k3_ringacc_probe
,
1152 .name
= "k3-ringacc",
1153 .of_match_table
= k3_ringacc_of_match
,
1154 .suppress_bind_attrs
= true,
1157 builtin_platform_driver(k3_ringacc_driver
);