1 // SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2020, MIPI Alliance, Inc.
5 * Author: Nicolas Pitre <npitre@baylibre.com>
7 * Core driver code with main interface to the I3C subsystem.
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
26 * Host Controller Capabilities and Operation Registers
29 #define HCI_VERSION 0x00 /* HCI Version (in BCD) */
31 #define HC_CONTROL 0x04
32 #define HC_CONTROL_BUS_ENABLE BIT(31)
33 #define HC_CONTROL_RESUME BIT(30)
34 #define HC_CONTROL_ABORT BIT(29)
35 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12)
36 #define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */
37 #define HC_CONTROL_I2C_TARGET_PRESENT BIT(7)
38 #define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */
39 #define HC_CONTROL_DATA_BIG_ENDIAN BIT(4)
40 #define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */
42 #define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */
43 #define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */
44 #define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v)
46 #define HC_CAPABILITIES 0x0c
47 #define HC_CAP_SG_DC_EN BIT(30)
48 #define HC_CAP_SG_IBI_EN BIT(29)
49 #define HC_CAP_SG_CR_EN BIT(28)
50 #define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22)
51 #define HC_CAP_CMD_SIZE GENMASK(21, 20)
52 #define HC_CAP_DIRECT_COMMANDS_EN BIT(18)
53 #define HC_CAP_MULTI_LANE_EN BIT(15)
54 #define HC_CAP_CMD_CCC_DEFBYTE BIT(10)
55 #define HC_CAP_HDR_BT_EN BIT(8)
56 #define HC_CAP_HDR_TS_EN BIT(7)
57 #define HC_CAP_HDR_DDR_EN BIT(6)
58 #define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */
59 #define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */
60 #define HC_CAP_AUTO_COMMAND BIT(3)
61 #define HC_CAP_COMBO_COMMAND BIT(2)
63 #define RESET_CONTROL 0x10
64 #define BUS_RESET BIT(31)
65 #define BUS_RESET_TYPE GENMASK(30, 29)
66 #define IBI_QUEUE_RST BIT(5)
67 #define RX_FIFO_RST BIT(4)
68 #define TX_FIFO_RST BIT(3)
69 #define RESP_QUEUE_RST BIT(2)
70 #define CMD_QUEUE_RST BIT(1)
71 #define SOFT_RST BIT(0) /* Core Reset */
73 #define PRESENT_STATE 0x14
74 #define STATE_CURRENT_MASTER BIT(2)
76 #define INTR_STATUS 0x20
77 #define INTR_STATUS_ENABLE 0x24
78 #define INTR_SIGNAL_ENABLE 0x28
79 #define INTR_FORCE 0x2c
80 #define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
81 #define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
82 #define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
84 #define DAT_SECTION 0x30 /* Device Address Table */
85 #define DAT_ENTRY_SIZE GENMASK(31, 28)
86 #define DAT_TABLE_SIZE GENMASK(18, 12)
87 #define DAT_TABLE_OFFSET GENMASK(11, 0)
89 #define DCT_SECTION 0x34 /* Device Characteristics Table */
90 #define DCT_ENTRY_SIZE GENMASK(31, 28)
91 #define DCT_TABLE_INDEX GENMASK(23, 19)
92 #define DCT_TABLE_SIZE GENMASK(18, 12)
93 #define DCT_TABLE_OFFSET GENMASK(11, 0)
95 #define RING_HEADERS_SECTION 0x38
96 #define RING_HEADERS_OFFSET GENMASK(15, 0)
98 #define PIO_SECTION 0x3c
99 #define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */
101 #define EXT_CAPS_SECTION 0x40
102 #define EXT_CAPS_OFFSET GENMASK(15, 0)
104 #define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */
105 #define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */
106 #define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */
107 #define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */
109 #define DEV_CTX_BASE_LO 0x60
110 #define DEV_CTX_BASE_HI 0x64
113 static inline struct i3c_hci
*to_i3c_hci(struct i3c_master_controller
*m
)
115 return container_of(m
, struct i3c_hci
, master
);
118 static int i3c_hci_bus_init(struct i3c_master_controller
*m
)
120 struct i3c_hci
*hci
= to_i3c_hci(m
);
121 struct i3c_device_info info
;
126 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
) {
127 ret
= mipi_i3c_hci_dat_v1
.init(hci
);
132 ret
= i3c_master_get_free_addr(m
, 0);
135 reg_write(MASTER_DEVICE_ADDR
,
136 MASTER_DYNAMIC_ADDR(ret
) | MASTER_DYNAMIC_ADDR_VALID
);
137 memset(&info
, 0, sizeof(info
));
139 ret
= i3c_master_set_info(m
, &info
);
143 ret
= hci
->io
->init(hci
);
147 /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
148 if (hci
->quirks
& HCI_QUIRK_RESP_BUF_THLD
)
149 amd_set_resp_buf_thld(hci
);
151 reg_set(HC_CONTROL
, HC_CONTROL_BUS_ENABLE
);
152 DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL
));
157 static void i3c_hci_bus_cleanup(struct i3c_master_controller
*m
)
159 struct i3c_hci
*hci
= to_i3c_hci(m
);
160 struct platform_device
*pdev
= to_platform_device(m
->dev
.parent
);
164 reg_clear(HC_CONTROL
, HC_CONTROL_BUS_ENABLE
);
165 synchronize_irq(platform_get_irq(pdev
, 0));
166 hci
->io
->cleanup(hci
);
167 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
)
168 mipi_i3c_hci_dat_v1
.cleanup(hci
);
171 void mipi_i3c_hci_resume(struct i3c_hci
*hci
)
173 reg_set(HC_CONTROL
, HC_CONTROL_RESUME
);
176 /* located here rather than pio.c because needed bits are in core reg space */
177 void mipi_i3c_hci_pio_reset(struct i3c_hci
*hci
)
179 reg_write(RESET_CONTROL
, RX_FIFO_RST
| TX_FIFO_RST
| RESP_QUEUE_RST
);
182 /* located here rather than dct.c because needed bits are in core reg space */
183 void mipi_i3c_hci_dct_index_reset(struct i3c_hci
*hci
)
185 reg_write(DCT_SECTION
, FIELD_PREP(DCT_TABLE_INDEX
, 0));
188 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller
*m
,
189 struct i3c_ccc_cmd
*ccc
)
191 struct i3c_hci
*hci
= to_i3c_hci(m
);
192 struct hci_xfer
*xfer
;
193 bool raw
= !!(hci
->quirks
& HCI_QUIRK_RAW_CCC
);
194 bool prefixed
= raw
&& !!(ccc
->id
& I3C_CCC_DIRECT
);
195 unsigned int nxfers
= ccc
->ndests
+ prefixed
;
196 DECLARE_COMPLETION_ONSTACK(done
);
197 int i
, last
, ret
= 0;
199 DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
200 ccc
->id
, ccc
->rnw
, ccc
->ndests
, ccc
->dests
[0].payload
.len
);
202 xfer
= hci_alloc_xfer(nxfers
);
210 hci
->cmd
->prep_ccc(hci
, xfer
, I3C_BROADCAST_ADDR
,
215 for (i
= 0; i
< nxfers
- prefixed
; i
++) {
216 xfer
[i
].data
= ccc
->dests
[i
].payload
.data
;
217 xfer
[i
].data_len
= ccc
->dests
[i
].payload
.len
;
218 xfer
[i
].rnw
= ccc
->rnw
;
219 ret
= hci
->cmd
->prep_ccc(hci
, &xfer
[i
], ccc
->dests
[i
].addr
,
223 xfer
[i
].cmd_desc
[0] |= CMD_0_ROC
;
226 xfer
[last
].cmd_desc
[0] |= CMD_0_TOC
;
227 xfer
[last
].completion
= &done
;
232 ret
= hci
->io
->queue_xfer(hci
, xfer
, nxfers
);
235 if (!wait_for_completion_timeout(&done
, HZ
) &&
236 hci
->io
->dequeue_xfer(hci
, xfer
, nxfers
)) {
240 for (i
= prefixed
; i
< nxfers
; i
++) {
242 ccc
->dests
[i
- prefixed
].payload
.len
=
243 RESP_DATA_LENGTH(xfer
[i
].response
);
244 switch (RESP_STATUS(xfer
[i
].response
)) {
247 case RESP_ERR_ADDR_HEADER
:
249 ccc
->err
= I3C_ERROR_M2
;
259 ccc
->dests
[0].payload
.len
, ccc
->dests
[0].payload
.data
);
262 hci_free_xfer(xfer
, nxfers
);
266 static int i3c_hci_daa(struct i3c_master_controller
*m
)
268 struct i3c_hci
*hci
= to_i3c_hci(m
);
272 return hci
->cmd
->perform_daa(hci
);
275 static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci
*hci
,
276 struct hci_xfer
*xfer
)
278 if (hci
->io
!= &mipi_i3c_hci_dma
||
279 xfer
->data
== NULL
|| !is_vmalloc_addr(xfer
->data
))
283 xfer
->bounce_buf
= kzalloc(xfer
->data_len
, GFP_KERNEL
);
285 xfer
->bounce_buf
= kmemdup(xfer
->data
,
286 xfer
->data_len
, GFP_KERNEL
);
288 return xfer
->bounce_buf
== NULL
? -ENOMEM
: 0;
291 static void i3c_hci_free_safe_xfer_buf(struct i3c_hci
*hci
,
292 struct hci_xfer
*xfer
)
294 if (hci
->io
!= &mipi_i3c_hci_dma
|| xfer
->bounce_buf
== NULL
)
298 memcpy(xfer
->data
, xfer
->bounce_buf
, xfer
->data_len
);
300 kfree(xfer
->bounce_buf
);
303 static int i3c_hci_priv_xfers(struct i3c_dev_desc
*dev
,
304 struct i3c_priv_xfer
*i3c_xfers
,
307 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
308 struct i3c_hci
*hci
= to_i3c_hci(m
);
309 struct hci_xfer
*xfer
;
310 DECLARE_COMPLETION_ONSTACK(done
);
311 unsigned int size_limit
;
312 int i
, last
, ret
= 0;
314 DBG("nxfers = %d", nxfers
);
316 xfer
= hci_alloc_xfer(nxfers
);
320 size_limit
= 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH
, hci
->caps
));
322 for (i
= 0; i
< nxfers
; i
++) {
323 xfer
[i
].data_len
= i3c_xfers
[i
].len
;
325 if (xfer
[i
].data_len
>= size_limit
)
327 xfer
[i
].rnw
= i3c_xfers
[i
].rnw
;
328 if (i3c_xfers
[i
].rnw
) {
329 xfer
[i
].data
= i3c_xfers
[i
].data
.in
;
331 /* silence the const qualifier warning with a cast */
332 xfer
[i
].data
= (void *) i3c_xfers
[i
].data
.out
;
334 hci
->cmd
->prep_i3c_xfer(hci
, dev
, &xfer
[i
]);
335 xfer
[i
].cmd_desc
[0] |= CMD_0_ROC
;
336 ret
= i3c_hci_alloc_safe_xfer_buf(hci
, &xfer
[i
]);
341 xfer
[last
].cmd_desc
[0] |= CMD_0_TOC
;
342 xfer
[last
].completion
= &done
;
344 ret
= hci
->io
->queue_xfer(hci
, xfer
, nxfers
);
347 if (!wait_for_completion_timeout(&done
, HZ
) &&
348 hci
->io
->dequeue_xfer(hci
, xfer
, nxfers
)) {
352 for (i
= 0; i
< nxfers
; i
++) {
353 if (i3c_xfers
[i
].rnw
)
354 i3c_xfers
[i
].len
= RESP_DATA_LENGTH(xfer
[i
].response
);
355 if (RESP_STATUS(xfer
[i
].response
) != RESP_SUCCESS
) {
362 for (i
= 0; i
< nxfers
; i
++)
363 i3c_hci_free_safe_xfer_buf(hci
, &xfer
[i
]);
365 hci_free_xfer(xfer
, nxfers
);
369 static int i3c_hci_i2c_xfers(struct i2c_dev_desc
*dev
,
370 const struct i2c_msg
*i2c_xfers
, int nxfers
)
372 struct i3c_master_controller
*m
= i2c_dev_get_master(dev
);
373 struct i3c_hci
*hci
= to_i3c_hci(m
);
374 struct hci_xfer
*xfer
;
375 DECLARE_COMPLETION_ONSTACK(done
);
376 int i
, last
, ret
= 0;
378 DBG("nxfers = %d", nxfers
);
380 xfer
= hci_alloc_xfer(nxfers
);
384 for (i
= 0; i
< nxfers
; i
++) {
385 xfer
[i
].data
= i2c_xfers
[i
].buf
;
386 xfer
[i
].data_len
= i2c_xfers
[i
].len
;
387 xfer
[i
].rnw
= i2c_xfers
[i
].flags
& I2C_M_RD
;
388 hci
->cmd
->prep_i2c_xfer(hci
, dev
, &xfer
[i
]);
389 xfer
[i
].cmd_desc
[0] |= CMD_0_ROC
;
390 ret
= i3c_hci_alloc_safe_xfer_buf(hci
, &xfer
[i
]);
395 xfer
[last
].cmd_desc
[0] |= CMD_0_TOC
;
396 xfer
[last
].completion
= &done
;
398 ret
= hci
->io
->queue_xfer(hci
, xfer
, nxfers
);
401 if (!wait_for_completion_timeout(&done
, HZ
) &&
402 hci
->io
->dequeue_xfer(hci
, xfer
, nxfers
)) {
406 for (i
= 0; i
< nxfers
; i
++) {
407 if (RESP_STATUS(xfer
[i
].response
) != RESP_SUCCESS
) {
414 for (i
= 0; i
< nxfers
; i
++)
415 i3c_hci_free_safe_xfer_buf(hci
, &xfer
[i
]);
417 hci_free_xfer(xfer
, nxfers
);
421 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc
*dev
)
423 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
424 struct i3c_hci
*hci
= to_i3c_hci(m
);
425 struct i3c_hci_dev_data
*dev_data
;
430 dev_data
= kzalloc(sizeof(*dev_data
), GFP_KERNEL
);
433 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
) {
434 ret
= mipi_i3c_hci_dat_v1
.alloc_entry(hci
);
439 mipi_i3c_hci_dat_v1
.set_dynamic_addr(hci
, ret
,
440 dev
->info
.dyn_addr
?: dev
->info
.static_addr
);
441 dev_data
->dat_idx
= ret
;
443 i3c_dev_set_master_data(dev
, dev_data
);
447 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc
*dev
, u8 old_dyn_addr
)
449 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
450 struct i3c_hci
*hci
= to_i3c_hci(m
);
451 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
455 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
)
456 mipi_i3c_hci_dat_v1
.set_dynamic_addr(hci
, dev_data
->dat_idx
,
461 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc
*dev
)
463 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
464 struct i3c_hci
*hci
= to_i3c_hci(m
);
465 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
469 i3c_dev_set_master_data(dev
, NULL
);
470 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
)
471 mipi_i3c_hci_dat_v1
.free_entry(hci
, dev_data
->dat_idx
);
475 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc
*dev
)
477 struct i3c_master_controller
*m
= i2c_dev_get_master(dev
);
478 struct i3c_hci
*hci
= to_i3c_hci(m
);
479 struct i3c_hci_dev_data
*dev_data
;
484 if (hci
->cmd
!= &mipi_i3c_hci_cmd_v1
)
486 dev_data
= kzalloc(sizeof(*dev_data
), GFP_KERNEL
);
489 ret
= mipi_i3c_hci_dat_v1
.alloc_entry(hci
);
494 mipi_i3c_hci_dat_v1
.set_static_addr(hci
, ret
, dev
->addr
);
495 mipi_i3c_hci_dat_v1
.set_flags(hci
, ret
, DAT_0_I2C_DEVICE
, 0);
496 dev_data
->dat_idx
= ret
;
497 i2c_dev_set_master_data(dev
, dev_data
);
501 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc
*dev
)
503 struct i3c_master_controller
*m
= i2c_dev_get_master(dev
);
504 struct i3c_hci
*hci
= to_i3c_hci(m
);
505 struct i3c_hci_dev_data
*dev_data
= i2c_dev_get_master_data(dev
);
510 i2c_dev_set_master_data(dev
, NULL
);
511 if (hci
->cmd
== &mipi_i3c_hci_cmd_v1
)
512 mipi_i3c_hci_dat_v1
.free_entry(hci
, dev_data
->dat_idx
);
517 static int i3c_hci_request_ibi(struct i3c_dev_desc
*dev
,
518 const struct i3c_ibi_setup
*req
)
520 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
521 struct i3c_hci
*hci
= to_i3c_hci(m
);
522 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
523 unsigned int dat_idx
= dev_data
->dat_idx
;
525 if (req
->max_payload_len
!= 0)
526 mipi_i3c_hci_dat_v1
.set_flags(hci
, dat_idx
, DAT_0_IBI_PAYLOAD
, 0);
528 mipi_i3c_hci_dat_v1
.clear_flags(hci
, dat_idx
, DAT_0_IBI_PAYLOAD
, 0);
529 return hci
->io
->request_ibi(hci
, dev
, req
);
532 static void i3c_hci_free_ibi(struct i3c_dev_desc
*dev
)
534 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
535 struct i3c_hci
*hci
= to_i3c_hci(m
);
537 hci
->io
->free_ibi(hci
, dev
);
540 static int i3c_hci_enable_ibi(struct i3c_dev_desc
*dev
)
542 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
543 struct i3c_hci
*hci
= to_i3c_hci(m
);
544 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
546 mipi_i3c_hci_dat_v1
.clear_flags(hci
, dev_data
->dat_idx
, DAT_0_SIR_REJECT
, 0);
547 return i3c_master_enec_locked(m
, dev
->info
.dyn_addr
, I3C_CCC_EVENT_SIR
);
550 static int i3c_hci_disable_ibi(struct i3c_dev_desc
*dev
)
552 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
553 struct i3c_hci
*hci
= to_i3c_hci(m
);
554 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
556 mipi_i3c_hci_dat_v1
.set_flags(hci
, dev_data
->dat_idx
, DAT_0_SIR_REJECT
, 0);
557 return i3c_master_disec_locked(m
, dev
->info
.dyn_addr
, I3C_CCC_EVENT_SIR
);
560 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc
*dev
,
561 struct i3c_ibi_slot
*slot
)
563 struct i3c_master_controller
*m
= i3c_dev_get_master(dev
);
564 struct i3c_hci
*hci
= to_i3c_hci(m
);
566 hci
->io
->recycle_ibi_slot(hci
, dev
, slot
);
569 static const struct i3c_master_controller_ops i3c_hci_ops
= {
570 .bus_init
= i3c_hci_bus_init
,
571 .bus_cleanup
= i3c_hci_bus_cleanup
,
572 .do_daa
= i3c_hci_daa
,
573 .send_ccc_cmd
= i3c_hci_send_ccc_cmd
,
574 .priv_xfers
= i3c_hci_priv_xfers
,
575 .i2c_xfers
= i3c_hci_i2c_xfers
,
576 .attach_i3c_dev
= i3c_hci_attach_i3c_dev
,
577 .reattach_i3c_dev
= i3c_hci_reattach_i3c_dev
,
578 .detach_i3c_dev
= i3c_hci_detach_i3c_dev
,
579 .attach_i2c_dev
= i3c_hci_attach_i2c_dev
,
580 .detach_i2c_dev
= i3c_hci_detach_i2c_dev
,
581 .request_ibi
= i3c_hci_request_ibi
,
582 .free_ibi
= i3c_hci_free_ibi
,
583 .enable_ibi
= i3c_hci_enable_ibi
,
584 .disable_ibi
= i3c_hci_disable_ibi
,
585 .recycle_ibi_slot
= i3c_hci_recycle_ibi_slot
,
588 static irqreturn_t
i3c_hci_irq_handler(int irq
, void *dev_id
)
590 struct i3c_hci
*hci
= dev_id
;
591 irqreturn_t result
= IRQ_NONE
;
594 val
= reg_read(INTR_STATUS
);
595 DBG("INTR_STATUS = %#x", val
);
598 reg_write(INTR_STATUS
, val
);
601 if (val
& INTR_HC_RESET_CANCEL
) {
602 DBG("cancelled reset");
603 val
&= ~INTR_HC_RESET_CANCEL
;
605 if (val
& INTR_HC_INTERNAL_ERR
) {
606 dev_err(&hci
->master
.dev
, "Host Controller Internal Error\n");
607 val
&= ~INTR_HC_INTERNAL_ERR
;
610 hci
->io
->irq_handler(hci
);
613 dev_err(&hci
->master
.dev
, "unexpected INTR_STATUS %#x\n", val
);
615 result
= IRQ_HANDLED
;
620 static int i3c_hci_init(struct i3c_hci
*hci
)
622 bool size_in_dwords
, mode_selector
;
626 /* Validate HCI hardware version */
627 regval
= reg_read(HCI_VERSION
);
628 hci
->version_major
= (regval
>> 8) & 0xf;
629 hci
->version_minor
= (regval
>> 4) & 0xf;
630 hci
->revision
= regval
& 0xf;
631 dev_notice(&hci
->master
.dev
, "MIPI I3C HCI v%u.%u r%02u\n",
632 hci
->version_major
, hci
->version_minor
, hci
->revision
);
634 switch (regval
& ~0xf) {
635 case 0x100: /* version 1.0 */
636 case 0x110: /* version 1.1 */
637 case 0x200: /* version 2.0 */
640 dev_err(&hci
->master
.dev
, "unsupported HCI version\n");
641 return -EPROTONOSUPPORT
;
644 hci
->caps
= reg_read(HC_CAPABILITIES
);
645 DBG("caps = %#x", hci
->caps
);
647 size_in_dwords
= hci
->version_major
< 1 ||
648 (hci
->version_major
== 1 && hci
->version_minor
< 1);
650 regval
= reg_read(DAT_SECTION
);
651 offset
= FIELD_GET(DAT_TABLE_OFFSET
, regval
);
652 hci
->DAT_regs
= offset
? hci
->base_regs
+ offset
: NULL
;
653 hci
->DAT_entries
= FIELD_GET(DAT_TABLE_SIZE
, regval
);
654 hci
->DAT_entry_size
= FIELD_GET(DAT_ENTRY_SIZE
, regval
) ? 0 : 8;
656 hci
->DAT_entries
= 4 * hci
->DAT_entries
/ hci
->DAT_entry_size
;
657 dev_info(&hci
->master
.dev
, "DAT: %u %u-bytes entries at offset %#x\n",
658 hci
->DAT_entries
, hci
->DAT_entry_size
, offset
);
660 regval
= reg_read(DCT_SECTION
);
661 offset
= FIELD_GET(DCT_TABLE_OFFSET
, regval
);
662 hci
->DCT_regs
= offset
? hci
->base_regs
+ offset
: NULL
;
663 hci
->DCT_entries
= FIELD_GET(DCT_TABLE_SIZE
, regval
);
664 hci
->DCT_entry_size
= FIELD_GET(DCT_ENTRY_SIZE
, regval
) ? 0 : 16;
666 hci
->DCT_entries
= 4 * hci
->DCT_entries
/ hci
->DCT_entry_size
;
667 dev_info(&hci
->master
.dev
, "DCT: %u %u-bytes entries at offset %#x\n",
668 hci
->DCT_entries
, hci
->DCT_entry_size
, offset
);
670 regval
= reg_read(RING_HEADERS_SECTION
);
671 offset
= FIELD_GET(RING_HEADERS_OFFSET
, regval
);
672 hci
->RHS_regs
= offset
? hci
->base_regs
+ offset
: NULL
;
673 dev_info(&hci
->master
.dev
, "Ring Headers at offset %#x\n", offset
);
675 regval
= reg_read(PIO_SECTION
);
676 offset
= FIELD_GET(PIO_REGS_OFFSET
, regval
);
677 hci
->PIO_regs
= offset
? hci
->base_regs
+ offset
: NULL
;
678 dev_info(&hci
->master
.dev
, "PIO section at offset %#x\n", offset
);
680 regval
= reg_read(EXT_CAPS_SECTION
);
681 offset
= FIELD_GET(EXT_CAPS_OFFSET
, regval
);
682 hci
->EXTCAPS_regs
= offset
? hci
->base_regs
+ offset
: NULL
;
683 dev_info(&hci
->master
.dev
, "Extended Caps at offset %#x\n", offset
);
685 ret
= i3c_hci_parse_ext_caps(hci
);
690 * Now let's reset the hardware.
691 * SOFT_RST must be clear before we write to it.
692 * Then we must wait until it clears again.
694 ret
= readx_poll_timeout(reg_read
, RESET_CONTROL
, regval
,
695 !(regval
& SOFT_RST
), 1, 10000);
698 reg_write(RESET_CONTROL
, SOFT_RST
);
699 ret
= readx_poll_timeout(reg_read
, RESET_CONTROL
, regval
,
700 !(regval
& SOFT_RST
), 1, 10000);
704 /* Disable all interrupts and allow all signal updates */
705 reg_write(INTR_SIGNAL_ENABLE
, 0x0);
706 reg_write(INTR_STATUS_ENABLE
, 0xffffffff);
708 /* Make sure our data ordering fits the host's */
709 regval
= reg_read(HC_CONTROL
);
710 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
)) {
711 if (!(regval
& HC_CONTROL_DATA_BIG_ENDIAN
)) {
712 regval
|= HC_CONTROL_DATA_BIG_ENDIAN
;
713 reg_write(HC_CONTROL
, regval
);
714 regval
= reg_read(HC_CONTROL
);
715 if (!(regval
& HC_CONTROL_DATA_BIG_ENDIAN
)) {
716 dev_err(&hci
->master
.dev
, "cannot set BE mode\n");
721 if (regval
& HC_CONTROL_DATA_BIG_ENDIAN
) {
722 regval
&= ~HC_CONTROL_DATA_BIG_ENDIAN
;
723 reg_write(HC_CONTROL
, regval
);
724 regval
= reg_read(HC_CONTROL
);
725 if (regval
& HC_CONTROL_DATA_BIG_ENDIAN
) {
726 dev_err(&hci
->master
.dev
, "cannot clear BE mode\n");
732 /* Select our command descriptor model */
733 switch (FIELD_GET(HC_CAP_CMD_SIZE
, hci
->caps
)) {
735 hci
->cmd
= &mipi_i3c_hci_cmd_v1
;
738 hci
->cmd
= &mipi_i3c_hci_cmd_v2
;
741 dev_err(&hci
->master
.dev
, "wrong CMD_SIZE capability value\n");
745 mode_selector
= hci
->version_major
> 1 ||
746 (hci
->version_major
== 1 && hci
->version_minor
> 0);
748 /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
749 if (hci
->quirks
& HCI_QUIRK_PIO_MODE
)
750 hci
->RHS_regs
= NULL
;
752 /* Try activating DMA operations first */
754 reg_clear(HC_CONTROL
, HC_CONTROL_PIO_MODE
);
755 if (mode_selector
&& (reg_read(HC_CONTROL
) & HC_CONTROL_PIO_MODE
)) {
756 dev_err(&hci
->master
.dev
, "PIO mode is stuck\n");
759 hci
->io
= &mipi_i3c_hci_dma
;
760 dev_info(&hci
->master
.dev
, "Using DMA\n");
764 /* If no DMA, try PIO */
765 if (!hci
->io
&& hci
->PIO_regs
) {
766 reg_set(HC_CONTROL
, HC_CONTROL_PIO_MODE
);
767 if (mode_selector
&& !(reg_read(HC_CONTROL
) & HC_CONTROL_PIO_MODE
)) {
768 dev_err(&hci
->master
.dev
, "DMA mode is stuck\n");
771 hci
->io
= &mipi_i3c_hci_pio
;
772 dev_info(&hci
->master
.dev
, "Using PIO\n");
777 dev_err(&hci
->master
.dev
, "neither DMA nor PIO can be used\n");
783 /* Configure OD and PP timings for AMD platforms */
784 if (hci
->quirks
& HCI_QUIRK_OD_PP_TIMING
)
785 amd_set_od_pp_timing(hci
);
790 static int i3c_hci_probe(struct platform_device
*pdev
)
795 hci
= devm_kzalloc(&pdev
->dev
, sizeof(*hci
), GFP_KERNEL
);
798 hci
->base_regs
= devm_platform_ioremap_resource(pdev
, 0);
799 if (IS_ERR(hci
->base_regs
))
800 return PTR_ERR(hci
->base_regs
);
802 platform_set_drvdata(pdev
, hci
);
803 /* temporary for dev_printk's, to be replaced in i3c_master_register */
804 hci
->master
.dev
.init_name
= dev_name(&pdev
->dev
);
806 hci
->quirks
= (unsigned long)device_get_match_data(&pdev
->dev
);
808 ret
= i3c_hci_init(hci
);
812 irq
= platform_get_irq(pdev
, 0);
813 ret
= devm_request_irq(&pdev
->dev
, irq
, i3c_hci_irq_handler
,
818 ret
= i3c_master_register(&hci
->master
, &pdev
->dev
,
819 &i3c_hci_ops
, false);
826 static void i3c_hci_remove(struct platform_device
*pdev
)
828 struct i3c_hci
*hci
= platform_get_drvdata(pdev
);
830 i3c_master_unregister(&hci
->master
);
833 static const __maybe_unused
struct of_device_id i3c_hci_of_match
[] = {
834 { .compatible
= "mipi-i3c-hci", },
837 MODULE_DEVICE_TABLE(of
, i3c_hci_of_match
);
839 static const struct acpi_device_id i3c_hci_acpi_match
[] = {
840 { "AMDI5017", HCI_QUIRK_PIO_MODE
| HCI_QUIRK_OD_PP_TIMING
| HCI_QUIRK_RESP_BUF_THLD
},
843 MODULE_DEVICE_TABLE(acpi
, i3c_hci_acpi_match
);
845 static struct platform_driver i3c_hci_driver
= {
846 .probe
= i3c_hci_probe
,
847 .remove
= i3c_hci_remove
,
849 .name
= "mipi-i3c-hci",
850 .of_match_table
= of_match_ptr(i3c_hci_of_match
),
851 .acpi_match_table
= i3c_hci_acpi_match
,
854 module_platform_driver(i3c_hci_driver
);
855 MODULE_ALIAS("platform:mipi-i3c-hci");
857 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
858 MODULE_DESCRIPTION("MIPI I3C HCI driver");
859 MODULE_LICENSE("Dual BSD/GPL");