1 // SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2020, MIPI Alliance, Inc.
5 * Author: Nicolas Pitre <npitre@baylibre.com>
7 * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
8 * v1.x of the spec and v2.0 will likely be split out.
11 #include <linux/bitfield.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/i3c/master.h>
24 * Software Parameter Values (somewhat arb itrary for now).
25 * Some of them could be determined at run time eventually.
28 #define XFER_RINGS 1 /* max: 8 */
29 #define XFER_RING_ENTRIES 16 /* max: 255 */
31 #define IBI_RINGS 1 /* max: 8 */
32 #define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
33 #define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
34 #define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
37 * Ring Header Preamble
40 #define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
41 #define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
43 #define RHS_CONTROL 0x00
44 #define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
45 #define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
46 #define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
47 #define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
49 #define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
52 * Ring Header (Per-Ring Bundle)
55 #define rh_reg_read(r) readl(rh->regs + (RH_##r))
56 #define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
58 #define RH_CR_SETUP 0x00 /* Command/Response Ring */
59 #define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
60 #define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
61 #define CR_RING_SIZE GENMASK(8, 0)
63 #define RH_IBI_SETUP 0x04
64 #define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
65 #define IBI_STATUS_RING_SIZE GENMASK(23, 16)
66 #define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
67 #define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
69 #define RH_CHUNK_CONTROL 0x08
71 #define RH_INTR_STATUS 0x10
72 #define RH_INTR_STATUS_ENABLE 0x14
73 #define RH_INTR_SIGNAL_ENABLE 0x18
74 #define RH_INTR_FORCE 0x1c
75 #define INTR_IBI_READY BIT(12)
76 #define INTR_TRANSFER_COMPLETION BIT(11)
77 #define INTR_RING_OP BIT(10)
78 #define INTR_TRANSFER_ERR BIT(9)
79 #define INTR_WARN_INS_STOP_MODE BIT(7)
80 #define INTR_IBI_RING_FULL BIT(6)
81 #define INTR_TRANSFER_ABORT BIT(5)
83 #define RH_RING_STATUS 0x20
84 #define RING_STATUS_LOCKED BIT(3)
85 #define RING_STATUS_ABORTED BIT(2)
86 #define RING_STATUS_RUNNING BIT(1)
87 #define RING_STATUS_ENABLED BIT(0)
89 #define RH_RING_CONTROL 0x24
90 #define RING_CTRL_ABORT BIT(2)
91 #define RING_CTRL_RUN_STOP BIT(1)
92 #define RING_CTRL_ENABLE BIT(0)
94 #define RH_RING_OPERATION1 0x28
95 #define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
96 #define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
97 #define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
99 #define RH_RING_OPERATION2 0x2c
100 #define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
101 #define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
103 #define RH_CMD_RING_BASE_LO 0x30
104 #define RH_CMD_RING_BASE_HI 0x34
105 #define RH_RESP_RING_BASE_LO 0x38
106 #define RH_RESP_RING_BASE_HI 0x3c
107 #define RH_IBI_STATUS_RING_BASE_LO 0x40
108 #define RH_IBI_STATUS_RING_BASE_HI 0x44
109 #define RH_IBI_DATA_RING_BASE_LO 0x48
110 #define RH_IBI_DATA_RING_BASE_HI 0x4c
112 #define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
113 #define RH_RESP_RING_SG 0x54
114 #define RH_IBI_STATUS_RING_SG 0x58
115 #define RH_IBI_DATA_RING_SG 0x5c
116 #define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
117 #define RING_SG_LIST_SIZE GENMASK(15, 0)
120 * Data Buffer Descriptor (in memory)
123 #define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
124 #define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
125 #define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
130 void *xfer
, *resp
, *ibi_status
, *ibi_data
;
131 dma_addr_t xfer_dma
, resp_dma
, ibi_status_dma
, ibi_data_dma
;
132 unsigned int xfer_entries
, ibi_status_entries
, ibi_chunks_total
;
133 unsigned int xfer_struct_sz
, resp_struct_sz
, ibi_status_sz
, ibi_chunk_sz
;
134 unsigned int done_ptr
, ibi_chunk_ptr
;
135 struct hci_xfer
**src_xfers
;
137 struct completion op_done
;
140 struct hci_rings_data
{
142 struct hci_rh_data headers
[];
145 struct hci_dma_dev_ibi_data
{
146 struct i3c_generic_ibi_pool
*pool
;
147 unsigned int max_len
;
150 static inline u32
lo32(dma_addr_t physaddr
)
155 static inline u32
hi32(dma_addr_t physaddr
)
157 /* trickery to avoid compiler warnings on 32-bit build targets */
158 if (sizeof(dma_addr_t
) > 4) {
165 static void hci_dma_cleanup(struct i3c_hci
*hci
)
167 struct hci_rings_data
*rings
= hci
->io_data
;
168 struct hci_rh_data
*rh
;
174 for (i
= 0; i
< rings
->total
; i
++) {
175 rh
= &rings
->headers
[i
];
177 rh_reg_write(RING_CONTROL
, 0);
178 rh_reg_write(CR_SETUP
, 0);
179 rh_reg_write(IBI_SETUP
, 0);
180 rh_reg_write(INTR_SIGNAL_ENABLE
, 0);
183 dma_free_coherent(&hci
->master
.dev
,
184 rh
->xfer_struct_sz
* rh
->xfer_entries
,
185 rh
->xfer
, rh
->xfer_dma
);
187 dma_free_coherent(&hci
->master
.dev
,
188 rh
->resp_struct_sz
* rh
->xfer_entries
,
189 rh
->resp
, rh
->resp_dma
);
190 kfree(rh
->src_xfers
);
192 dma_free_coherent(&hci
->master
.dev
,
193 rh
->ibi_status_sz
* rh
->ibi_status_entries
,
194 rh
->ibi_status
, rh
->ibi_status_dma
);
195 if (rh
->ibi_data_dma
)
196 dma_unmap_single(&hci
->master
.dev
, rh
->ibi_data_dma
,
197 rh
->ibi_chunk_sz
* rh
->ibi_chunks_total
,
202 rhs_reg_write(CONTROL
, 0);
208 static int hci_dma_init(struct i3c_hci
*hci
)
210 struct hci_rings_data
*rings
;
211 struct hci_rh_data
*rh
;
213 unsigned int i
, nr_rings
, xfers_sz
, resps_sz
;
214 unsigned int ibi_status_ring_sz
, ibi_data_ring_sz
;
217 regval
= rhs_reg_read(CONTROL
);
218 nr_rings
= FIELD_GET(MAX_HEADER_COUNT_CAP
, regval
);
219 dev_info(&hci
->master
.dev
, "%d DMA rings available\n", nr_rings
);
220 if (unlikely(nr_rings
> 8)) {
221 dev_err(&hci
->master
.dev
, "number of rings should be <= 8\n");
224 if (nr_rings
> XFER_RINGS
)
225 nr_rings
= XFER_RINGS
;
226 rings
= kzalloc(sizeof(*rings
) + nr_rings
* sizeof(*rh
), GFP_KERNEL
);
229 hci
->io_data
= rings
;
230 rings
->total
= nr_rings
;
232 for (i
= 0; i
< rings
->total
; i
++) {
233 u32 offset
= rhs_reg_read(RHn_OFFSET(i
));
235 dev_info(&hci
->master
.dev
, "Ring %d at offset %#x\n", i
, offset
);
239 rh
= &rings
->headers
[i
];
240 rh
->regs
= hci
->base_regs
+ offset
;
241 spin_lock_init(&rh
->lock
);
242 init_completion(&rh
->op_done
);
244 rh
->xfer_entries
= XFER_RING_ENTRIES
;
246 regval
= rh_reg_read(CR_SETUP
);
247 rh
->xfer_struct_sz
= FIELD_GET(CR_XFER_STRUCT_SIZE
, regval
);
248 rh
->resp_struct_sz
= FIELD_GET(CR_RESP_STRUCT_SIZE
, regval
);
249 DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
250 rh
->xfer_struct_sz
, rh
->resp_struct_sz
);
251 xfers_sz
= rh
->xfer_struct_sz
* rh
->xfer_entries
;
252 resps_sz
= rh
->resp_struct_sz
* rh
->xfer_entries
;
254 rh
->xfer
= dma_alloc_coherent(&hci
->master
.dev
, xfers_sz
,
255 &rh
->xfer_dma
, GFP_KERNEL
);
256 rh
->resp
= dma_alloc_coherent(&hci
->master
.dev
, resps_sz
,
257 &rh
->resp_dma
, GFP_KERNEL
);
259 kmalloc_array(rh
->xfer_entries
, sizeof(*rh
->src_xfers
),
262 if (!rh
->xfer
|| !rh
->resp
|| !rh
->src_xfers
)
265 rh_reg_write(CMD_RING_BASE_LO
, lo32(rh
->xfer_dma
));
266 rh_reg_write(CMD_RING_BASE_HI
, hi32(rh
->xfer_dma
));
267 rh_reg_write(RESP_RING_BASE_LO
, lo32(rh
->resp_dma
));
268 rh_reg_write(RESP_RING_BASE_HI
, hi32(rh
->resp_dma
));
270 regval
= FIELD_PREP(CR_RING_SIZE
, rh
->xfer_entries
);
271 rh_reg_write(CR_SETUP
, regval
);
273 rh_reg_write(INTR_STATUS_ENABLE
, 0xffffffff);
274 rh_reg_write(INTR_SIGNAL_ENABLE
, INTR_IBI_READY
|
275 INTR_TRANSFER_COMPLETION
|
278 INTR_WARN_INS_STOP_MODE
|
280 INTR_TRANSFER_ABORT
);
287 regval
= rh_reg_read(IBI_SETUP
);
288 rh
->ibi_status_sz
= FIELD_GET(IBI_STATUS_STRUCT_SIZE
, regval
);
289 rh
->ibi_status_entries
= IBI_STATUS_RING_ENTRIES
;
290 rh
->ibi_chunks_total
= IBI_CHUNK_POOL_SIZE
;
292 rh
->ibi_chunk_sz
= dma_get_cache_alignment();
293 rh
->ibi_chunk_sz
*= IBI_CHUNK_CACHELINES
;
294 BUG_ON(rh
->ibi_chunk_sz
> 256);
296 ibi_status_ring_sz
= rh
->ibi_status_sz
* rh
->ibi_status_entries
;
297 ibi_data_ring_sz
= rh
->ibi_chunk_sz
* rh
->ibi_chunks_total
;
300 dma_alloc_coherent(&hci
->master
.dev
, ibi_status_ring_sz
,
301 &rh
->ibi_status_dma
, GFP_KERNEL
);
302 rh
->ibi_data
= kmalloc(ibi_data_ring_sz
, GFP_KERNEL
);
304 if (!rh
->ibi_status
|| !rh
->ibi_data
)
307 dma_map_single(&hci
->master
.dev
, rh
->ibi_data
,
308 ibi_data_ring_sz
, DMA_FROM_DEVICE
);
309 if (dma_mapping_error(&hci
->master
.dev
, rh
->ibi_data_dma
)) {
310 rh
->ibi_data_dma
= 0;
315 regval
= FIELD_PREP(IBI_STATUS_RING_SIZE
,
316 rh
->ibi_status_entries
) |
317 FIELD_PREP(IBI_DATA_CHUNK_SIZE
,
318 ilog2(rh
->ibi_chunk_sz
) - 2) |
319 FIELD_PREP(IBI_DATA_CHUNK_COUNT
,
320 rh
->ibi_chunks_total
);
321 rh_reg_write(IBI_SETUP
, regval
);
323 regval
= rh_reg_read(INTR_SIGNAL_ENABLE
);
324 regval
|= INTR_IBI_READY
;
325 rh_reg_write(INTR_SIGNAL_ENABLE
, regval
);
328 rh_reg_write(RING_CONTROL
, RING_CTRL_ENABLE
);
331 regval
= FIELD_PREP(MAX_HEADER_COUNT
, rings
->total
);
332 rhs_reg_write(CONTROL
, regval
);
336 hci_dma_cleanup(hci
);
340 static void hci_dma_unmap_xfer(struct i3c_hci
*hci
,
341 struct hci_xfer
*xfer_list
, unsigned int n
)
343 struct hci_xfer
*xfer
;
346 for (i
= 0; i
< n
; i
++) {
347 xfer
= xfer_list
+ i
;
348 dma_unmap_single(&hci
->master
.dev
,
349 xfer
->data_dma
, xfer
->data_len
,
350 xfer
->rnw
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
354 static int hci_dma_queue_xfer(struct i3c_hci
*hci
,
355 struct hci_xfer
*xfer_list
, int n
)
357 struct hci_rings_data
*rings
= hci
->io_data
;
358 struct hci_rh_data
*rh
;
359 unsigned int i
, ring
, enqueue_ptr
;
360 u32 op1_val
, op2_val
;
362 /* For now we only use ring 0 */
364 rh
= &rings
->headers
[ring
];
366 op1_val
= rh_reg_read(RING_OPERATION1
);
367 enqueue_ptr
= FIELD_GET(RING_OP1_CR_ENQ_PTR
, op1_val
);
368 for (i
= 0; i
< n
; i
++) {
369 struct hci_xfer
*xfer
= xfer_list
+ i
;
370 u32
*ring_data
= rh
->xfer
+ rh
->xfer_struct_sz
* enqueue_ptr
;
372 /* store cmd descriptor */
373 *ring_data
++ = xfer
->cmd_desc
[0];
374 *ring_data
++ = xfer
->cmd_desc
[1];
375 if (hci
->cmd
== &mipi_i3c_hci_cmd_v2
) {
376 *ring_data
++ = xfer
->cmd_desc
[2];
377 *ring_data
++ = xfer
->cmd_desc
[3];
380 /* first word of Data Buffer Descriptor Structure */
384 FIELD_PREP(DATA_BUF_BLOCK_SIZE
, xfer
->data_len
) |
385 ((i
== n
- 1) ? DATA_BUF_IOC
: 0);
387 /* 2nd and 3rd words of Data Buffer Descriptor Structure */
390 dma_map_single(&hci
->master
.dev
,
396 if (dma_mapping_error(&hci
->master
.dev
,
398 hci_dma_unmap_xfer(hci
, xfer_list
, i
);
401 *ring_data
++ = lo32(xfer
->data_dma
);
402 *ring_data
++ = hi32(xfer
->data_dma
);
408 /* remember corresponding xfer struct */
409 rh
->src_xfers
[enqueue_ptr
] = xfer
;
410 /* remember corresponding ring/entry for this xfer structure */
411 xfer
->ring_number
= ring
;
412 xfer
->ring_entry
= enqueue_ptr
;
414 enqueue_ptr
= (enqueue_ptr
+ 1) % rh
->xfer_entries
;
417 * We may update the hardware view of the enqueue pointer
418 * only if we didn't reach its dequeue pointer.
420 op2_val
= rh_reg_read(RING_OPERATION2
);
421 if (enqueue_ptr
== FIELD_GET(RING_OP2_CR_DEQ_PTR
, op2_val
)) {
422 /* the ring is full */
423 hci_dma_unmap_xfer(hci
, xfer_list
, i
+ 1);
428 /* take care to update the hardware enqueue pointer atomically */
429 spin_lock_irq(&rh
->lock
);
430 op1_val
= rh_reg_read(RING_OPERATION1
);
431 op1_val
&= ~RING_OP1_CR_ENQ_PTR
;
432 op1_val
|= FIELD_PREP(RING_OP1_CR_ENQ_PTR
, enqueue_ptr
);
433 rh_reg_write(RING_OPERATION1
, op1_val
);
434 spin_unlock_irq(&rh
->lock
);
439 static bool hci_dma_dequeue_xfer(struct i3c_hci
*hci
,
440 struct hci_xfer
*xfer_list
, int n
)
442 struct hci_rings_data
*rings
= hci
->io_data
;
443 struct hci_rh_data
*rh
= &rings
->headers
[xfer_list
[0].ring_number
];
445 bool did_unqueue
= false;
448 rh_reg_write(RING_CONTROL
, RING_CTRL_ABORT
);
449 if (wait_for_completion_timeout(&rh
->op_done
, HZ
) == 0) {
451 * We're deep in it if ever this condition is ever met.
452 * Hardware might still be writing to memory, etc.
453 * Better suspend the world than risking silent corruption.
455 dev_crit(&hci
->master
.dev
, "unable to abort the ring\n");
459 for (i
= 0; i
< n
; i
++) {
460 struct hci_xfer
*xfer
= xfer_list
+ i
;
461 int idx
= xfer
->ring_entry
;
464 * At the time the abort happened, the xfer might have
465 * completed already. If not then replace corresponding
466 * descriptor entries with a no-op.
469 u32
*ring_data
= rh
->xfer
+ rh
->xfer_struct_sz
* idx
;
471 /* store no-op cmd descriptor */
472 *ring_data
++ = FIELD_PREP(CMD_0_ATTR
, 0x7);
474 if (hci
->cmd
== &mipi_i3c_hci_cmd_v2
) {
479 /* disassociate this xfer struct */
480 rh
->src_xfers
[idx
] = NULL
;
483 hci_dma_unmap_xfer(hci
, xfer
, 1);
489 /* restart the ring */
490 rh_reg_write(RING_CONTROL
, RING_CTRL_ENABLE
);
495 static void hci_dma_xfer_done(struct i3c_hci
*hci
, struct hci_rh_data
*rh
)
497 u32 op1_val
, op2_val
, resp
, *ring_resp
;
498 unsigned int tid
, done_ptr
= rh
->done_ptr
;
499 struct hci_xfer
*xfer
;
502 op2_val
= rh_reg_read(RING_OPERATION2
);
503 if (done_ptr
== FIELD_GET(RING_OP2_CR_DEQ_PTR
, op2_val
))
506 ring_resp
= rh
->resp
+ rh
->resp_struct_sz
* done_ptr
;
508 tid
= RESP_TID(resp
);
509 DBG("resp = 0x%08x", resp
);
511 xfer
= rh
->src_xfers
[done_ptr
];
513 DBG("orphaned ring entry");
515 hci_dma_unmap_xfer(hci
, xfer
, 1);
516 xfer
->ring_entry
= -1;
517 xfer
->response
= resp
;
518 if (tid
!= xfer
->cmd_tid
) {
519 dev_err(&hci
->master
.dev
,
520 "response tid=%d when expecting %d\n",
522 /* TODO: do something about it? */
524 if (xfer
->completion
)
525 complete(xfer
->completion
);
528 done_ptr
= (done_ptr
+ 1) % rh
->xfer_entries
;
529 rh
->done_ptr
= done_ptr
;
532 /* take care to update the software dequeue pointer atomically */
533 spin_lock(&rh
->lock
);
534 op1_val
= rh_reg_read(RING_OPERATION1
);
535 op1_val
&= ~RING_OP1_CR_SW_DEQ_PTR
;
536 op1_val
|= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR
, done_ptr
);
537 rh_reg_write(RING_OPERATION1
, op1_val
);
538 spin_unlock(&rh
->lock
);
541 static int hci_dma_request_ibi(struct i3c_hci
*hci
, struct i3c_dev_desc
*dev
,
542 const struct i3c_ibi_setup
*req
)
544 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
545 struct i3c_generic_ibi_pool
*pool
;
546 struct hci_dma_dev_ibi_data
*dev_ibi
;
548 dev_ibi
= kmalloc(sizeof(*dev_ibi
), GFP_KERNEL
);
551 pool
= i3c_generic_ibi_alloc_pool(dev
, req
);
554 return PTR_ERR(pool
);
556 dev_ibi
->pool
= pool
;
557 dev_ibi
->max_len
= req
->max_payload_len
;
558 dev_data
->ibi_data
= dev_ibi
;
562 static void hci_dma_free_ibi(struct i3c_hci
*hci
, struct i3c_dev_desc
*dev
)
564 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
565 struct hci_dma_dev_ibi_data
*dev_ibi
= dev_data
->ibi_data
;
567 dev_data
->ibi_data
= NULL
;
568 i3c_generic_ibi_free_pool(dev_ibi
->pool
);
572 static void hci_dma_recycle_ibi_slot(struct i3c_hci
*hci
,
573 struct i3c_dev_desc
*dev
,
574 struct i3c_ibi_slot
*slot
)
576 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
577 struct hci_dma_dev_ibi_data
*dev_ibi
= dev_data
->ibi_data
;
579 i3c_generic_ibi_recycle_slot(dev_ibi
->pool
, slot
);
582 static void hci_dma_process_ibi(struct i3c_hci
*hci
, struct hci_rh_data
*rh
)
584 struct i3c_dev_desc
*dev
;
585 struct i3c_hci_dev_data
*dev_data
;
586 struct hci_dma_dev_ibi_data
*dev_ibi
;
587 struct i3c_ibi_slot
*slot
;
588 u32 op1_val
, op2_val
, ibi_status_error
;
589 unsigned int ptr
, enq_ptr
, deq_ptr
;
590 unsigned int ibi_size
, ibi_chunks
, ibi_data_offset
, first_part
;
591 int ibi_addr
, last_ptr
;
593 dma_addr_t ring_ibi_data_dma
;
595 op1_val
= rh_reg_read(RING_OPERATION1
);
596 deq_ptr
= FIELD_GET(RING_OP1_IBI_DEQ_PTR
, op1_val
);
598 op2_val
= rh_reg_read(RING_OPERATION2
);
599 enq_ptr
= FIELD_GET(RING_OP2_IBI_ENQ_PTR
, op2_val
);
601 ibi_status_error
= 0;
607 /* let's find all we can about this IBI */
608 for (ptr
= deq_ptr
; ptr
!= enq_ptr
;
609 ptr
= (ptr
+ 1) % rh
->ibi_status_entries
) {
610 u32 ibi_status
, *ring_ibi_status
;
613 ring_ibi_status
= rh
->ibi_status
+ rh
->ibi_status_sz
* ptr
;
614 ibi_status
= *ring_ibi_status
;
615 DBG("status = %#x", ibi_status
);
617 if (ibi_status_error
) {
618 /* we no longer care */
619 } else if (ibi_status
& IBI_ERROR
) {
620 ibi_status_error
= ibi_status
;
621 } else if (ibi_addr
== -1) {
622 ibi_addr
= FIELD_GET(IBI_TARGET_ADDR
, ibi_status
);
623 } else if (ibi_addr
!= FIELD_GET(IBI_TARGET_ADDR
, ibi_status
)) {
624 /* the address changed unexpectedly */
625 ibi_status_error
= ibi_status
;
628 chunks
= FIELD_GET(IBI_CHUNKS
, ibi_status
);
629 ibi_chunks
+= chunks
;
630 if (!(ibi_status
& IBI_LAST_STATUS
)) {
631 ibi_size
+= chunks
* rh
->ibi_chunk_sz
;
633 ibi_size
+= FIELD_GET(IBI_DATA_LENGTH
, ibi_status
);
639 /* validate what we've got */
641 if (last_ptr
== -1) {
642 /* this IBI sequence is not yet complete */
643 DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr
, deq_ptr
);
646 deq_ptr
= last_ptr
+ 1;
647 deq_ptr
%= rh
->ibi_status_entries
;
649 if (ibi_status_error
) {
650 dev_err(&hci
->master
.dev
, "IBI error from %#x\n", ibi_addr
);
654 /* determine who this is for */
655 dev
= i3c_hci_addr_to_dev(hci
, ibi_addr
);
657 dev_err(&hci
->master
.dev
,
658 "IBI for unknown device %#x\n", ibi_addr
);
662 dev_data
= i3c_dev_get_master_data(dev
);
663 dev_ibi
= dev_data
->ibi_data
;
664 if (ibi_size
> dev_ibi
->max_len
) {
665 dev_err(&hci
->master
.dev
, "IBI payload too big (%d > %d)\n",
666 ibi_size
, dev_ibi
->max_len
);
671 * This ring model is not suitable for zero-copy processing of IBIs.
672 * We have the data chunk ring wrap-around to deal with, meaning
673 * that the payload might span multiple chunks beginning at the
674 * end of the ring and wrap to the start of the ring. Furthermore
675 * there is no guarantee that those chunks will be released in order
676 * and in a timely manner by the upper driver. So let's just copy
677 * them to a discrete buffer. In practice they're supposed to be
680 slot
= i3c_generic_ibi_get_free_slot(dev_ibi
->pool
);
682 dev_err(&hci
->master
.dev
, "no free slot for IBI\n");
686 /* copy first part of the payload */
687 ibi_data_offset
= rh
->ibi_chunk_sz
* rh
->ibi_chunk_ptr
;
688 ring_ibi_data
= rh
->ibi_data
+ ibi_data_offset
;
689 ring_ibi_data_dma
= rh
->ibi_data_dma
+ ibi_data_offset
;
690 first_part
= (rh
->ibi_chunks_total
- rh
->ibi_chunk_ptr
)
692 if (first_part
> ibi_size
)
693 first_part
= ibi_size
;
694 dma_sync_single_for_cpu(&hci
->master
.dev
, ring_ibi_data_dma
,
695 first_part
, DMA_FROM_DEVICE
);
696 memcpy(slot
->data
, ring_ibi_data
, first_part
);
698 /* copy second part if any */
699 if (ibi_size
> first_part
) {
700 /* we wrap back to the start and copy remaining data */
701 ring_ibi_data
= rh
->ibi_data
;
702 ring_ibi_data_dma
= rh
->ibi_data_dma
;
703 dma_sync_single_for_cpu(&hci
->master
.dev
, ring_ibi_data_dma
,
704 ibi_size
- first_part
, DMA_FROM_DEVICE
);
705 memcpy(slot
->data
+ first_part
, ring_ibi_data
,
706 ibi_size
- first_part
);
711 slot
->len
= ibi_size
;
712 i3c_master_queue_ibi(dev
, slot
);
715 /* take care to update the ibi dequeue pointer atomically */
716 spin_lock(&rh
->lock
);
717 op1_val
= rh_reg_read(RING_OPERATION1
);
718 op1_val
&= ~RING_OP1_IBI_DEQ_PTR
;
719 op1_val
|= FIELD_PREP(RING_OP1_IBI_DEQ_PTR
, deq_ptr
);
720 rh_reg_write(RING_OPERATION1
, op1_val
);
721 spin_unlock(&rh
->lock
);
723 /* update the chunk pointer */
724 rh
->ibi_chunk_ptr
+= ibi_chunks
;
725 rh
->ibi_chunk_ptr
%= rh
->ibi_chunks_total
;
727 /* and tell the hardware about freed chunks */
728 rh_reg_write(CHUNK_CONTROL
, rh_reg_read(CHUNK_CONTROL
) + ibi_chunks
);
731 static bool hci_dma_irq_handler(struct i3c_hci
*hci
, unsigned int mask
)
733 struct hci_rings_data
*rings
= hci
->io_data
;
735 bool handled
= false;
737 for (i
= 0; mask
&& i
< 8; i
++) {
738 struct hci_rh_data
*rh
;
741 if (!(mask
& BIT(i
)))
745 rh
= &rings
->headers
[i
];
746 status
= rh_reg_read(INTR_STATUS
);
747 DBG("rh%d status: %#x", i
, status
);
750 rh_reg_write(INTR_STATUS
, status
);
752 if (status
& INTR_IBI_READY
)
753 hci_dma_process_ibi(hci
, rh
);
754 if (status
& (INTR_TRANSFER_COMPLETION
| INTR_TRANSFER_ERR
))
755 hci_dma_xfer_done(hci
, rh
);
756 if (status
& INTR_RING_OP
)
757 complete(&rh
->op_done
);
759 if (status
& INTR_TRANSFER_ABORT
)
760 dev_notice_ratelimited(&hci
->master
.dev
,
761 "ring %d: Transfer Aborted\n", i
);
762 if (status
& INTR_WARN_INS_STOP_MODE
)
763 dev_warn_ratelimited(&hci
->master
.dev
,
764 "ring %d: Inserted Stop on Mode Change\n", i
);
765 if (status
& INTR_IBI_RING_FULL
)
766 dev_err_ratelimited(&hci
->master
.dev
,
767 "ring %d: IBI Ring Full Condition\n", i
);
775 const struct hci_io_ops mipi_i3c_hci_dma
= {
776 .init
= hci_dma_init
,
777 .cleanup
= hci_dma_cleanup
,
778 .queue_xfer
= hci_dma_queue_xfer
,
779 .dequeue_xfer
= hci_dma_dequeue_xfer
,
780 .irq_handler
= hci_dma_irq_handler
,
781 .request_ibi
= hci_dma_request_ibi
,
782 .free_ibi
= hci_dma_free_ibi
,
783 .recycle_ibi_slot
= hci_dma_recycle_ibi_slot
,