1 // SPDX-License-Identifier: GPL-2.0
3 // Register map access API - SPI AVMM support
5 // Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
7 #include <linux/module.h>
8 #include <linux/regmap.h>
9 #include <linux/spi/spi.h>
10 #include <linux/swab.h>
13 * This driver implements the regmap operations for a generic SPI
14 * master to access the registers of the spi slave chip which has an
17 * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
18 * in the spi slave chip. The IP acts as a bridge to convert encoded streams of
19 * bytes from the host to the internal register read/write on Avalon bus. In
20 * order to issue register access requests to the slave chip, the host should
21 * send formatted bytes that conform to the transfer protocol.
22 * The transfer protocol contains 3 layers: transaction layer, packet layer
25 * Reference Documents could be found at:
26 * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html
28 * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
29 * introduction to the protocol.
31 * Chapter "Avalon Packets to Transactions Converter Core" describes
32 * the transaction layer.
34 * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
35 * describes the packet layer.
37 * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
41 * When host issues a regmap read/write, the driver will transform the request
42 * to byte stream layer by layer. It formats the register addr, value and
43 * length to the transaction layer request, then converts the request to packet
44 * layer bytes stream and then to physical layer bytes stream. Finally the
45 * driver sends the formatted byte stream over SPI bus to the slave chip.
47 * The spi-avmm IP on the slave chip decodes the byte stream and initiates
48 * register read/write on its internal Avalon bus, and then encodes the
49 * response to byte stream and sends back to host.
51 * The driver receives the byte stream, reverses the 3 layers transformation,
52 * and finally gets the response value (read out data for register read,
53 * successful written size for register write).
58 #define PKT_CHANNEL 0x7c
64 #define TRANS_CODE_WRITE 0x0
65 #define TRANS_CODE_SEQ_WRITE 0x4
66 #define TRANS_CODE_READ 0x10
67 #define TRANS_CODE_SEQ_READ 0x14
68 #define TRANS_CODE_NO_TRANS 0x7f
70 #define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200))
72 /* slave's register addr is 32 bits */
73 #define SPI_AVMM_REG_SIZE 4UL
74 /* slave's register value is 32 bits */
75 #define SPI_AVMM_VAL_SIZE 4UL
78 * max rx size could be larger. But considering the buffer consuming,
79 * it is proper that we limit 1KB xfer at max.
81 #define MAX_READ_CNT 256UL
82 #define MAX_WRITE_CNT 1UL
84 struct trans_req_header
{
91 struct trans_resp_header
{
97 #define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header))
98 #define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header))
101 * In transaction layer,
102 * the write request format is: Transaction request header + data
103 * the read request format is: Transaction request header
104 * the write response format is: Transaction response header
105 * the read response format is: pure data, no Transaction response header
107 #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
108 #define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE
109 #define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
111 #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
112 #define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE
113 #define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT)
115 /* tx & rx share one transaction layer buffer */
116 #define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \
117 TRANS_TX_MAX : TRANS_RX_MAX)
120 * In tx phase, the host prepares all the phy layer bytes of a request in the
121 * phy buffer and sends them in a batch.
123 * The packet layer and physical layer defines several special chars for
124 * various purpose, when a transaction layer byte hits one of these special
125 * chars, it should be escaped. The escape rule is, "Escape char first,
126 * following the byte XOR'ed with 0x20".
128 * This macro defines the max possible length of the phy data. In the worst
129 * case, all transaction layer bytes need to be escaped (so the data length
130 * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally
131 * we should make sure the length is aligned to SPI BPW.
133 #define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4)
136 * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
137 * length of the rx bit stream is unpredictable. So the driver reads the words
138 * one by one, and parses each word immediately into transaction layer buffer.
139 * Only one word length of phy buffer is used for rx.
141 #define PHY_BUF_SIZE PHY_TX_MAX
144 * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
146 * @spi: spi slave associated with this bridge.
147 * @word_len: bytes of word for spi transfer.
148 * @trans_len: length of valid data in trans_buf.
149 * @phy_len: length of valid data in phy_buf.
150 * @trans_buf: the bridge buffer for transaction layer data.
151 * @phy_buf: the bridge buffer for physical layer data.
152 * @swap_words: the word swapping cb for phy data. NULL if not needed.
154 * As a device's registers are implemented on the AVMM bus address space, it
155 * requires the driver to issue formatted requests to spi slave to AVMM bus
156 * master bridge to perform register access.
158 struct spi_avmm_bridge
{
159 struct spi_device
*spi
;
160 unsigned char word_len
;
161 unsigned int trans_len
;
162 unsigned int phy_len
;
163 /* bridge buffer used in translation between protocol layers */
164 char trans_buf
[TRANS_BUF_SIZE
];
165 char phy_buf
[PHY_BUF_SIZE
];
166 void (*swap_words
)(void *buf
, unsigned int len
);
169 static void br_swap_words_32(void *buf
, unsigned int len
)
171 swab32_array(buf
, len
/ 4);
175 * Format transaction layer data in br->trans_buf according to the register
176 * access request, Store valid transaction layer data length in br->trans_len.
178 static int br_trans_tx_prepare(struct spi_avmm_bridge
*br
, bool is_read
, u32 reg
,
179 u32
*wr_val
, u32 count
)
181 struct trans_req_header
*header
;
182 unsigned int trans_len
;
189 code
= TRANS_CODE_READ
;
191 code
= TRANS_CODE_SEQ_READ
;
194 code
= TRANS_CODE_WRITE
;
196 code
= TRANS_CODE_SEQ_WRITE
;
199 header
= (struct trans_req_header
*)br
->trans_buf
;
202 header
->size
= cpu_to_be16((u16
)count
* SPI_AVMM_VAL_SIZE
);
203 header
->addr
= cpu_to_be32(reg
);
205 trans_len
= TRANS_REQ_HD_SIZE
;
208 trans_len
+= SPI_AVMM_VAL_SIZE
* count
;
209 if (trans_len
> sizeof(br
->trans_buf
))
212 data
= (__le32
*)(br
->trans_buf
+ TRANS_REQ_HD_SIZE
);
214 for (i
= 0; i
< count
; i
++)
215 *data
++ = cpu_to_le32(*wr_val
++);
218 /* Store valid trans data length for next layer */
219 br
->trans_len
= trans_len
;
225 * Convert transaction layer data (in br->trans_buf) to phy layer data, store
226 * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
227 * layer data length in br->phy_len.
229 * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
230 * with PHY_IDLE, then the slave will just drop them.
232 * The driver will not simply pad 4a at the tail. The concern is that driver
233 * will not store MISO data during tx phase, if the driver pads 4a at the tail,
234 * it is possible that if the slave is fast enough to response at the padding
235 * time. As a result these rx bytes are lost. In the following case, 7a,7c,00
237 * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|...
238 * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|...
240 * So the driver moves EOP and bytes after EOP to the end of the aligned size,
241 * then fill the hole with PHY_IDLE. As following:
242 * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|
243 * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40|
244 * Then if the slave will not get the entire packet before the tx phase is
245 * over, it can't responsed to anything either.
247 static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge
*br
)
249 char *tb
, *tb_end
, *pb
, *pb_limit
, *pb_eop
= NULL
;
250 unsigned int aligned_phy_len
, move_size
;
251 bool need_esc
= false;
254 tb_end
= tb
+ br
->trans_len
;
256 pb_limit
= pb
+ ARRAY_SIZE(br
->phy_buf
);
261 * The driver doesn't support multiple channels so the channel number
267 for (; pb
< pb_limit
&& tb
< tb_end
; pb
++) {
274 /* EOP should be inserted before the last valid char */
275 if (tb
== tb_end
- 1 && !pb_eop
) {
282 * insert an ESCAPE char if the data value equals any special
304 /* The phy buffer is used out but transaction layer data remains */
308 /* Store valid phy data length for spi transfer */
309 br
->phy_len
= pb
- br
->phy_buf
;
311 if (br
->word_len
== 1)
314 /* Do phy buf padding if word_len > 1 byte. */
315 aligned_phy_len
= ALIGN(br
->phy_len
, br
->word_len
);
316 if (aligned_phy_len
> sizeof(br
->phy_buf
))
319 if (aligned_phy_len
== br
->phy_len
)
322 /* move EOP and bytes after EOP to the end of aligned size */
323 move_size
= pb
- pb_eop
;
324 memmove(&br
->phy_buf
[aligned_phy_len
- move_size
], pb_eop
, move_size
);
326 /* fill the hole with PHY_IDLEs */
327 memset(pb_eop
, PHY_IDLE
, aligned_phy_len
- br
->phy_len
);
329 /* update the phy data length */
330 br
->phy_len
= aligned_phy_len
;
336 * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
337 * ignore rx in tx phase.
339 static int br_do_tx(struct spi_avmm_bridge
*br
)
341 /* reorder words for spi transfer */
343 br
->swap_words(br
->phy_buf
, br
->phy_len
);
345 /* send all data in phy_buf */
346 return spi_write(br
->spi
, br
->phy_buf
, br
->phy_len
);
350 * This function read the rx byte stream from SPI word by word and convert
351 * them to transaction layer data in br->trans_buf. It also stores the length
352 * of rx transaction layer data in br->trans_len
354 * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
355 * prepare a fixed length buffer to receive all of the rx data in a batch. We
356 * have to read word by word and convert them to transaction layer data at
359 static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge
*br
)
361 bool eop_found
= false, channel_found
= false, esc_found
= false;
362 bool valid_word
= false, last_try
= false;
363 struct device
*dev
= &br
->spi
->dev
;
364 char *pb
, *tb_limit
, *tb
= NULL
;
365 unsigned long poll_timeout
;
368 tb_limit
= br
->trans_buf
+ ARRAY_SIZE(br
->trans_buf
);
370 poll_timeout
= jiffies
+ SPI_AVMM_XFER_TIMEOUT
;
371 while (tb
< tb_limit
) {
372 ret
= spi_read(br
->spi
, pb
, br
->word_len
);
376 /* reorder the word back */
378 br
->swap_words(pb
, br
->word_len
);
381 for (i
= 0; i
< br
->word_len
; i
++) {
382 /* drop everything before first SOP */
383 if (!tb
&& pb
[i
] != PKT_SOP
)
387 if (pb
[i
] == PHY_IDLE
)
393 * We don't support multiple channels, so error out if
394 * a non-zero channel number is found.
398 dev_err(dev
, "%s channel num != 0\n",
403 channel_found
= false;
410 * reset the parsing if a second SOP appears.
414 channel_found
= false;
419 * No special char is expected after ESC char.
420 * No special char (except ESC & PHY_IDLE) is
421 * expected after EOP char.
423 * The special chars are all dropped.
425 if (esc_found
|| eop_found
)
431 if (esc_found
|| eop_found
)
434 channel_found
= true;
444 /* Record the normal byte in trans_buf. */
446 *tb
++ = pb
[i
] ^ 0x20;
453 * We get the last normal byte after EOP, it is
454 * time we finish. Normally the function should
458 br
->trans_len
= tb
- br
->trans_buf
;
465 /* update poll timeout when we get valid word */
466 poll_timeout
= jiffies
+ SPI_AVMM_XFER_TIMEOUT
;
470 * We timeout when rx keeps invalid for some time. But
471 * it is possible we are scheduled out for long time
472 * after a spi_read. So when we are scheduled in, a SW
473 * timeout happens. But actually HW may have worked fine and
474 * has been ready long time ago. So we need to do an extra
475 * read, if we get a valid word then we could continue rx,
476 * otherwise real a HW issue happens.
481 if (time_after(jiffies
, poll_timeout
))
487 * We have used out all transfer layer buffer but cannot find the end
488 * of the byte stream.
490 dev_err(dev
, "%s transfer buffer is full but rx doesn't end\n",
497 * For read transactions, the avmm bus will directly return register values
498 * without transaction response header.
500 static int br_rd_trans_rx_parse(struct spi_avmm_bridge
*br
,
501 u32
*val
, unsigned int expected_count
)
503 unsigned int i
, trans_len
= br
->trans_len
;
506 if (expected_count
* SPI_AVMM_VAL_SIZE
!= trans_len
)
509 data
= (__le32
*)br
->trans_buf
;
510 for (i
= 0; i
< expected_count
; i
++)
511 *val
++ = le32_to_cpu(*data
++);
517 * For write transactions, the slave will return a transaction response
520 static int br_wr_trans_rx_parse(struct spi_avmm_bridge
*br
,
521 unsigned int expected_count
)
523 unsigned int trans_len
= br
->trans_len
;
524 struct trans_resp_header
*resp
;
528 if (trans_len
!= TRANS_RESP_HD_SIZE
)
531 resp
= (struct trans_resp_header
*)br
->trans_buf
;
533 code
= resp
->r_code
^ 0x80;
534 val_len
= be16_to_cpu(resp
->size
);
535 if (!val_len
|| val_len
!= expected_count
* SPI_AVMM_VAL_SIZE
)
538 /* error out if the trans code doesn't align with the val size */
539 if ((val_len
== SPI_AVMM_VAL_SIZE
&& code
!= TRANS_CODE_WRITE
) ||
540 (val_len
> SPI_AVMM_VAL_SIZE
&& code
!= TRANS_CODE_SEQ_WRITE
))
546 static int do_reg_access(void *context
, bool is_read
, unsigned int reg
,
547 unsigned int *value
, unsigned int count
)
549 struct spi_avmm_bridge
*br
= context
;
552 /* invalidate bridge buffers first */
556 ret
= br_trans_tx_prepare(br
, is_read
, reg
, value
, count
);
560 ret
= br_pkt_phy_tx_prepare(br
);
568 ret
= br_do_rx_and_pkt_phy_parse(br
);
573 return br_rd_trans_rx_parse(br
, value
, count
);
575 return br_wr_trans_rx_parse(br
, count
);
578 static int regmap_spi_avmm_gather_write(void *context
,
579 const void *reg_buf
, size_t reg_len
,
580 const void *val_buf
, size_t val_len
)
582 if (reg_len
!= SPI_AVMM_REG_SIZE
)
585 if (!IS_ALIGNED(val_len
, SPI_AVMM_VAL_SIZE
))
588 return do_reg_access(context
, false, *(u32
*)reg_buf
, (u32
*)val_buf
,
589 val_len
/ SPI_AVMM_VAL_SIZE
);
592 static int regmap_spi_avmm_write(void *context
, const void *data
, size_t bytes
)
594 if (bytes
< SPI_AVMM_REG_SIZE
+ SPI_AVMM_VAL_SIZE
)
597 return regmap_spi_avmm_gather_write(context
, data
, SPI_AVMM_REG_SIZE
,
598 data
+ SPI_AVMM_REG_SIZE
,
599 bytes
- SPI_AVMM_REG_SIZE
);
602 static int regmap_spi_avmm_read(void *context
,
603 const void *reg_buf
, size_t reg_len
,
604 void *val_buf
, size_t val_len
)
606 if (reg_len
!= SPI_AVMM_REG_SIZE
)
609 if (!IS_ALIGNED(val_len
, SPI_AVMM_VAL_SIZE
))
612 return do_reg_access(context
, true, *(u32
*)reg_buf
, val_buf
,
613 (val_len
/ SPI_AVMM_VAL_SIZE
));
616 static struct spi_avmm_bridge
*
617 spi_avmm_bridge_ctx_gen(struct spi_device
*spi
)
619 struct spi_avmm_bridge
*br
;
622 return ERR_PTR(-ENODEV
);
624 /* Only support BPW == 8 or 32 now. Try 32 BPW first. */
625 spi
->mode
= SPI_MODE_1
;
626 spi
->bits_per_word
= 32;
627 if (spi_setup(spi
)) {
628 spi
->bits_per_word
= 8;
630 return ERR_PTR(-EINVAL
);
633 br
= kzalloc(sizeof(*br
), GFP_KERNEL
);
635 return ERR_PTR(-ENOMEM
);
638 br
->word_len
= spi
->bits_per_word
/ 8;
639 if (br
->word_len
== 4) {
641 * The protocol requires little endian byte order but MSB
642 * first. So driver needs to swap the byte order word by word
643 * if word length > 1.
645 br
->swap_words
= br_swap_words_32
;
651 static void spi_avmm_bridge_ctx_free(void *context
)
656 static const struct regmap_bus regmap_spi_avmm_bus
= {
657 .write
= regmap_spi_avmm_write
,
658 .gather_write
= regmap_spi_avmm_gather_write
,
659 .read
= regmap_spi_avmm_read
,
660 .reg_format_endian_default
= REGMAP_ENDIAN_NATIVE
,
661 .val_format_endian_default
= REGMAP_ENDIAN_NATIVE
,
662 .max_raw_read
= SPI_AVMM_VAL_SIZE
* MAX_READ_CNT
,
663 .max_raw_write
= SPI_AVMM_VAL_SIZE
* MAX_WRITE_CNT
,
664 .free_context
= spi_avmm_bridge_ctx_free
,
667 struct regmap
*__regmap_init_spi_avmm(struct spi_device
*spi
,
668 const struct regmap_config
*config
,
669 struct lock_class_key
*lock_key
,
670 const char *lock_name
)
672 struct spi_avmm_bridge
*bridge
;
675 bridge
= spi_avmm_bridge_ctx_gen(spi
);
677 return ERR_CAST(bridge
);
679 map
= __regmap_init(&spi
->dev
, ®map_spi_avmm_bus
,
680 bridge
, config
, lock_key
, lock_name
);
682 spi_avmm_bridge_ctx_free(bridge
);
683 return ERR_CAST(map
);
688 EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm
);
690 struct regmap
*__devm_regmap_init_spi_avmm(struct spi_device
*spi
,
691 const struct regmap_config
*config
,
692 struct lock_class_key
*lock_key
,
693 const char *lock_name
)
695 struct spi_avmm_bridge
*bridge
;
698 bridge
= spi_avmm_bridge_ctx_gen(spi
);
700 return ERR_CAST(bridge
);
702 map
= __devm_regmap_init(&spi
->dev
, ®map_spi_avmm_bus
,
703 bridge
, config
, lock_key
, lock_name
);
705 spi_avmm_bridge_ctx_free(bridge
);
706 return ERR_CAST(map
);
711 EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm
);
713 MODULE_DESCRIPTION("Register map access API - SPI AVMM support");
714 MODULE_LICENSE("GPL v2");