1 // SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2020, MIPI Alliance, Inc.
5 * Author: Nicolas Pitre <npitre@baylibre.com>
7 * I3C HCI v1.0/v1.1 Command Descriptor Handling
10 #include <linux/bitfield.h>
11 #include <linux/i3c/master.h>
20 * Address Assignment Command
23 #define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
25 #define CMD_A0_TOC W0_BIT_(31)
26 #define CMD_A0_ROC W0_BIT_(30)
27 #define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
28 #define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
29 #define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
30 #define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
33 * Immediate Data Transfer Command
36 #define CMD_0_ATTR_I FIELD_PREP(CMD_0_ATTR, 0x1)
38 #define CMD_I1_DATA_BYTE_4(v) FIELD_PREP(W1_MASK(63, 56), v)
39 #define CMD_I1_DATA_BYTE_3(v) FIELD_PREP(W1_MASK(55, 48), v)
40 #define CMD_I1_DATA_BYTE_2(v) FIELD_PREP(W1_MASK(47, 40), v)
41 #define CMD_I1_DATA_BYTE_1(v) FIELD_PREP(W1_MASK(39, 32), v)
42 #define CMD_I1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
43 #define CMD_I0_TOC W0_BIT_(31)
44 #define CMD_I0_ROC W0_BIT_(30)
45 #define CMD_I0_RNW W0_BIT_(29)
46 #define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
47 #define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
48 #define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
49 #define CMD_I0_CP W0_BIT_(15)
50 #define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
51 #define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
54 * Regular Data Transfer Command
57 #define CMD_0_ATTR_R FIELD_PREP(CMD_0_ATTR, 0x0)
59 #define CMD_R1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
60 #define CMD_R1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
61 #define CMD_R0_TOC W0_BIT_(31)
62 #define CMD_R0_ROC W0_BIT_(30)
63 #define CMD_R0_RNW W0_BIT_(29)
64 #define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
65 #define CMD_R0_DBP W0_BIT_(25)
66 #define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
67 #define CMD_R0_CP W0_BIT_(15)
68 #define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
69 #define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
72 * Combo Transfer (Write + Write/Read) Command
75 #define CMD_0_ATTR_C FIELD_PREP(CMD_0_ATTR, 0x3)
77 #define CMD_C1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
78 #define CMD_C1_OFFSET(v) FIELD_PREP(W1_MASK(47, 32), v)
79 #define CMD_C0_TOC W0_BIT_(31)
80 #define CMD_C0_ROC W0_BIT_(30)
81 #define CMD_C0_RNW W0_BIT_(29)
82 #define CMD_C0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
83 #define CMD_C0_16_BIT_SUBOFFSET W0_BIT_(25)
84 #define CMD_C0_FIRST_PHASE_MODE W0_BIT_(24)
85 #define CMD_C0_DATA_LENGTH_POSITION(v) FIELD_PREP(W0_MASK(23, 22), v)
86 #define CMD_C0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
87 #define CMD_C0_CP W0_BIT_(15)
88 #define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
89 #define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
92 * Internal Control Command
95 #define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
97 #define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
98 #define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
99 #define CMD_M0_MIPI_CMD W0_MASK(11, 8)
100 #define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
101 #define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
104 /* Data Transfer Speed and Mode */
111 MODE_I3C_HDR_TSx
= 0x5,
112 MODE_I3C_HDR_DDR
= 0x6,
113 MODE_I3C_HDR_BT
= 0x7,
114 MODE_I3C_Fm_FmP
= 0x8,
122 static enum hci_cmd_mode
get_i3c_mode(struct i3c_hci
*hci
)
124 struct i3c_bus
*bus
= i3c_master_get_bus(&hci
->master
);
126 if (bus
->scl_rate
.i3c
>= 12500000)
127 return MODE_I3C_SDR0
;
128 if (bus
->scl_rate
.i3c
> 8000000)
129 return MODE_I3C_SDR1
;
130 if (bus
->scl_rate
.i3c
> 6000000)
131 return MODE_I3C_SDR2
;
132 if (bus
->scl_rate
.i3c
> 4000000)
133 return MODE_I3C_SDR3
;
134 if (bus
->scl_rate
.i3c
> 2000000)
135 return MODE_I3C_SDR4
;
136 return MODE_I3C_Fm_FmP
;
139 static enum hci_cmd_mode
get_i2c_mode(struct i3c_hci
*hci
)
141 struct i3c_bus
*bus
= i3c_master_get_bus(&hci
->master
);
143 if (bus
->scl_rate
.i2c
>= 1000000)
148 static void fill_data_bytes(struct hci_xfer
*xfer
, u8
*data
,
149 unsigned int data_len
)
151 xfer
->cmd_desc
[1] = 0;
154 xfer
->cmd_desc
[1] |= CMD_I1_DATA_BYTE_4(data
[3]);
157 xfer
->cmd_desc
[1] |= CMD_I1_DATA_BYTE_3(data
[2]);
160 xfer
->cmd_desc
[1] |= CMD_I1_DATA_BYTE_2(data
[1]);
163 xfer
->cmd_desc
[1] |= CMD_I1_DATA_BYTE_1(data
[0]);
168 /* we consumed all the data with the cmd descriptor */
172 static int hci_cmd_v1_prep_ccc(struct i3c_hci
*hci
,
173 struct hci_xfer
*xfer
,
174 u8 ccc_addr
, u8 ccc_cmd
, bool raw
)
176 unsigned int dat_idx
= 0;
177 enum hci_cmd_mode mode
= get_i3c_mode(hci
);
178 u8
*data
= xfer
->data
;
179 unsigned int data_len
= xfer
->data_len
;
180 bool rnw
= xfer
->rnw
;
183 /* this should never happen */
187 if (ccc_addr
!= I3C_BROADCAST_ADDR
) {
188 ret
= mipi_i3c_hci_dat_v1
.get_index(hci
, ccc_addr
);
194 xfer
->cmd_tid
= hci_get_tid();
196 if (!rnw
&& data_len
<= 4) {
197 /* we use an Immediate Data Transfer Command */
200 CMD_I0_TID(xfer
->cmd_tid
) |
201 CMD_I0_CMD(ccc_cmd
) | CMD_I0_CP
|
202 CMD_I0_DEV_INDEX(dat_idx
) |
203 CMD_I0_DTT(data_len
) |
205 fill_data_bytes(xfer
, data
, data_len
);
207 /* we use a Regular Data Transfer Command */
210 CMD_R0_TID(xfer
->cmd_tid
) |
211 CMD_R0_CMD(ccc_cmd
) | CMD_R0_CP
|
212 CMD_R0_DEV_INDEX(dat_idx
) |
214 (rnw
? CMD_R0_RNW
: 0);
216 CMD_R1_DATA_LENGTH(data_len
);
222 static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci
*hci
,
223 struct i3c_dev_desc
*dev
,
224 struct hci_xfer
*xfer
)
226 struct i3c_hci_dev_data
*dev_data
= i3c_dev_get_master_data(dev
);
227 unsigned int dat_idx
= dev_data
->dat_idx
;
228 enum hci_cmd_mode mode
= get_i3c_mode(hci
);
229 u8
*data
= xfer
->data
;
230 unsigned int data_len
= xfer
->data_len
;
231 bool rnw
= xfer
->rnw
;
233 xfer
->cmd_tid
= hci_get_tid();
235 if (!rnw
&& data_len
<= 4) {
236 /* we use an Immediate Data Transfer Command */
239 CMD_I0_TID(xfer
->cmd_tid
) |
240 CMD_I0_DEV_INDEX(dat_idx
) |
241 CMD_I0_DTT(data_len
) |
243 fill_data_bytes(xfer
, data
, data_len
);
245 /* we use a Regular Data Transfer Command */
248 CMD_R0_TID(xfer
->cmd_tid
) |
249 CMD_R0_DEV_INDEX(dat_idx
) |
251 (rnw
? CMD_R0_RNW
: 0);
253 CMD_R1_DATA_LENGTH(data_len
);
257 static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci
*hci
,
258 struct i2c_dev_desc
*dev
,
259 struct hci_xfer
*xfer
)
261 struct i3c_hci_dev_data
*dev_data
= i2c_dev_get_master_data(dev
);
262 unsigned int dat_idx
= dev_data
->dat_idx
;
263 enum hci_cmd_mode mode
= get_i2c_mode(hci
);
264 u8
*data
= xfer
->data
;
265 unsigned int data_len
= xfer
->data_len
;
266 bool rnw
= xfer
->rnw
;
268 xfer
->cmd_tid
= hci_get_tid();
270 if (!rnw
&& data_len
<= 4) {
271 /* we use an Immediate Data Transfer Command */
274 CMD_I0_TID(xfer
->cmd_tid
) |
275 CMD_I0_DEV_INDEX(dat_idx
) |
276 CMD_I0_DTT(data_len
) |
278 fill_data_bytes(xfer
, data
, data_len
);
280 /* we use a Regular Data Transfer Command */
283 CMD_R0_TID(xfer
->cmd_tid
) |
284 CMD_R0_DEV_INDEX(dat_idx
) |
286 (rnw
? CMD_R0_RNW
: 0);
288 CMD_R1_DATA_LENGTH(data_len
);
292 static int hci_cmd_v1_daa(struct i3c_hci
*hci
)
294 struct hci_xfer
*xfer
;
295 int ret
, dat_idx
= -1;
298 unsigned int dcr
, bcr
;
299 DECLARE_COMPLETION_ONSTACK(done
);
301 xfer
= hci_alloc_xfer(2);
306 * Simple for now: we allocate a temporary DAT entry, do a single
307 * DAA, register the device which will allocate its own DAT entry
308 * via the core callback, then free the temporary DAT entry.
309 * Loop until there is no more devices to assign an address to.
310 * Yes, there is room for improvements.
313 ret
= mipi_i3c_hci_dat_v1
.alloc_entry(hci
);
317 ret
= i3c_master_get_free_addr(&hci
->master
, next_addr
);
322 DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr
, dat_idx
);
323 mipi_i3c_hci_dat_v1
.set_dynamic_addr(hci
, dat_idx
, next_addr
);
324 mipi_i3c_hci_dct_index_reset(hci
);
326 xfer
->cmd_tid
= hci_get_tid();
329 CMD_A0_TID(xfer
->cmd_tid
) |
330 CMD_A0_CMD(I3C_CCC_ENTDAA
) |
331 CMD_A0_DEV_INDEX(dat_idx
) |
332 CMD_A0_DEV_COUNT(1) |
333 CMD_A0_ROC
| CMD_A0_TOC
;
334 xfer
->cmd_desc
[1] = 0;
335 hci
->io
->queue_xfer(hci
, xfer
, 1);
336 if (!wait_for_completion_timeout(&done
, HZ
) &&
337 hci
->io
->dequeue_xfer(hci
, xfer
, 1)) {
341 if (RESP_STATUS(xfer
[0].response
) == RESP_ERR_NACK
&&
342 RESP_STATUS(xfer
[0].response
) == 1) {
343 ret
= 0; /* no more devices to be assigned */
346 if (RESP_STATUS(xfer
[0].response
) != RESP_SUCCESS
) {
351 i3c_hci_dct_get_val(hci
, 0, &pid
, &dcr
, &bcr
);
352 DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
353 next_addr
, pid
, dcr
, bcr
);
355 mipi_i3c_hci_dat_v1
.free_entry(hci
, dat_idx
);
359 * TODO: Extend the subsystem layer to allow for registering
360 * new device and provide BCR/DCR/PID at the same time.
362 ret
= i3c_master_add_i3c_dev_locked(&hci
->master
, next_addr
);
368 mipi_i3c_hci_dat_v1
.free_entry(hci
, dat_idx
);
369 hci_free_xfer(xfer
, 1);
373 const struct hci_cmd_ops mipi_i3c_hci_cmd_v1
= {
374 .prep_ccc
= hci_cmd_v1_prep_ccc
,
375 .prep_i3c_xfer
= hci_cmd_v1_prep_i3c_xfer
,
376 .prep_i2c_xfer
= hci_cmd_v1_prep_i2c_xfer
,
377 .perform_daa
= hci_cmd_v1_daa
,