1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* NVIDIA Tegra SPI controller (T114 and later) */
4 #include <arch/cache.h>
5 #include <device/mmio.h>
7 #include <console/console.h>
9 #include <soc/addressmap.h>
12 #include <spi-generic.h>
13 #include <spi_flash.h>
17 #if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
18 # define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
20 # define DEBUG_SPI(x,...)
24 * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
25 * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
27 #define SPI_PACKET_SIZE_BYTES 1
28 #define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
29 #define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
32 * This is used to workaround an issue seen where it may take some time for
33 * packets to show up in the FIFO after they have been received and the
34 * BLOCK_COUNT has been incremented.
36 #define SPI_FIFO_XFER_TIMEOUT_US 1000
39 #define SPI_CMD1_GO (1 << 31)
40 #define SPI_CMD1_M_S (1 << 30)
41 #define SPI_CMD1_MODE_MASK 0x3
42 #define SPI_CMD1_MODE_SHIFT 28
43 #define SPI_CMD1_CS_SEL_MASK 0x3
44 #define SPI_CMD1_CS_SEL_SHIFT 26
45 #define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
46 #define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
47 #define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
48 #define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
49 #define SPI_CMD1_CS_SW_HW (1 << 21)
50 #define SPI_CMD1_CS_SW_VAL (1 << 20)
51 #define SPI_CMD1_IDLE_SDA_MASK 0x3
52 #define SPI_CMD1_IDLE_SDA_SHIFT 18
53 #define SPI_CMD1_BIDIR (1 << 17)
54 #define SPI_CMD1_LSBI_FE (1 << 16)
55 #define SPI_CMD1_LSBY_FE (1 << 15)
56 #define SPI_CMD1_BOTH_EN_BIT (1 << 14)
57 #define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
58 #define SPI_CMD1_RX_EN (1 << 12)
59 #define SPI_CMD1_TX_EN (1 << 11)
60 #define SPI_CMD1_PACKED (1 << 5)
61 #define SPI_CMD1_BIT_LEN_MASK 0x1f
62 #define SPI_CMD1_BIT_LEN_SHIFT 0
65 #define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
66 #define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
67 #define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
68 #define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
70 /* SPI_TRANS_STATUS */
71 #define SPI_STATUS_RDY (1 << 30)
72 #define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
73 #define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
74 #define SPI_STATUS_BLOCK_COUNT 0xffff
75 #define SPI_STATUS_BLOCK_COUNT_SHIFT 0
78 #define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
79 #define SPI_FIFO_STATUS_FRAME_END (1 << 30)
80 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
81 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
82 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
83 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
84 #define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
85 #define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
86 #define SPI_FIFO_STATUS_ERR (1 << 8)
87 #define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
88 #define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
89 #define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
90 #define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
91 #define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
92 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
93 #define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
94 #define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
97 #define SPI_DMA_CTL_DMA (1 << 31)
98 #define SPI_DMA_CTL_CONT (1 << 30)
99 #define SPI_DMA_CTL_IE_RX (1 << 29)
100 #define SPI_DMA_CTL_IE_TX (1 << 28)
101 #define SPI_DMA_CTL_RX_TRIG_MASK 0x3
102 #define SPI_DMA_CTL_RX_TRIG_SHIFT 19
103 #define SPI_DMA_CTL_TX_TRIG_MASK 0x3
104 #define SPI_DMA_CTL_TX_TRIG_SHIFT 15
107 #define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
108 #define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
110 static struct tegra_spi_channel tegra_spi_channels
[] = {
112 * Note: Tegra pinmux must be setup for corresponding SPI channel in
113 * order for its registers to be accessible. If pinmux has not been
114 * set up, access to the channel's registers will simply hang.
116 * TODO(dhendrix): Clarify or remove this comment (is clock setup
117 * necessary first, or just pinmux, or both?)
120 .slave
= { .bus
= 1, },
121 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI1_BASE
,
122 .req_sel
= APBDMA_SLAVE_SL2B1
,
125 .slave
= { .bus
= 2, },
126 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI2_BASE
,
127 .req_sel
= APBDMA_SLAVE_SL2B2
,
130 .slave
= { .bus
= 3, },
131 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI3_BASE
,
132 .req_sel
= APBDMA_SLAVE_SL2B3
,
135 .slave
= { .bus
= 4, },
136 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI4_BASE
,
137 .req_sel
= APBDMA_SLAVE_SL2B4
,
140 .slave
= { .bus
= 5, },
141 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI5_BASE
,
142 .req_sel
= APBDMA_SLAVE_SL2B5
,
145 .slave
= { .bus
= 6, },
146 .regs
= (struct tegra_spi_regs
*)TEGRA_SPI6_BASE
,
147 .req_sel
= APBDMA_SLAVE_SL2B6
,
156 struct tegra_spi_channel
*tegra_spi_init(unsigned int bus
)
159 struct tegra_spi_channel
*spi
= NULL
;
161 for (i
= 0; i
< ARRAY_SIZE(tegra_spi_channels
); i
++) {
162 if (tegra_spi_channels
[i
].slave
.bus
== bus
) {
163 spi
= &tegra_spi_channels
[i
];
170 /* software drives chip-select, set value to high */
171 setbits32(&spi
->regs
->command1
,
172 SPI_CMD1_CS_SW_HW
| SPI_CMD1_CS_SW_VAL
);
174 /* 8-bit transfers, unpacked mode, most significant bit first */
175 clrbits32(&spi
->regs
->command1
,
176 SPI_CMD1_BIT_LEN_MASK
| SPI_CMD1_PACKED
);
177 setbits32(&spi
->regs
->command1
, 7 << SPI_CMD1_BIT_LEN_SHIFT
);
182 static struct tegra_spi_channel
* const to_tegra_spi(int bus
) {
183 return &tegra_spi_channels
[bus
- 1];
186 static unsigned int tegra_spi_speed(unsigned int bus
)
188 /* FIXME: implement this properly, for now use max value (50MHz) */
192 static int spi_ctrlr_claim_bus(const struct spi_slave
*slave
)
194 struct tegra_spi_regs
*regs
= to_tegra_spi(slave
->bus
)->regs
;
197 tegra_spi_init(slave
->bus
);
199 val
= read32(®s
->command1
);
201 /* select appropriate chip-select line */
202 val
&= ~(SPI_CMD1_CS_SEL_MASK
<< SPI_CMD1_CS_SEL_SHIFT
);
203 val
|= (slave
->cs
<< SPI_CMD1_CS_SEL_SHIFT
);
205 /* drive chip-select with the inverse of the "inactive" value */
206 if (val
& (SPI_CMD1_CS_POL_INACTIVE0
<< slave
->cs
))
207 val
&= ~SPI_CMD1_CS_SW_VAL
;
209 val
|= SPI_CMD1_CS_SW_VAL
;
211 write32(®s
->command1
, val
);
215 static void spi_ctrlr_release_bus(const struct spi_slave
*slave
)
217 struct tegra_spi_regs
*regs
= to_tegra_spi(slave
->bus
)->regs
;
220 val
= read32(®s
->command1
);
222 if (val
& (SPI_CMD1_CS_POL_INACTIVE0
<< slave
->cs
))
223 val
|= SPI_CMD1_CS_SW_VAL
;
225 val
&= ~SPI_CMD1_CS_SW_VAL
;
227 write32(®s
->command1
, val
);
230 static void dump_fifo_status(struct tegra_spi_channel
*spi
)
232 u32 status
= read32(&spi
->regs
->fifo_status
);
234 printk(BIOS_INFO
, "Raw FIFO status: 0x%08x\n", status
);
235 if (status
& SPI_FIFO_STATUS_TX_FIFO_OVF
)
236 printk(BIOS_INFO
, "\tTx overflow detected\n");
237 if (status
& SPI_FIFO_STATUS_TX_FIFO_UNR
)
238 printk(BIOS_INFO
, "\tTx underrun detected\n");
239 if (status
& SPI_FIFO_STATUS_RX_FIFO_OVF
)
240 printk(BIOS_INFO
, "\tRx overflow detected\n");
241 if (status
& SPI_FIFO_STATUS_RX_FIFO_UNR
)
242 printk(BIOS_INFO
, "\tRx underrun detected\n");
244 printk(BIOS_INFO
, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
245 read32(&spi
->regs
->tx_fifo
), read32(&spi
->regs
->tx_data
));
246 printk(BIOS_INFO
, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
247 read32(&spi
->regs
->rx_fifo
), read32(&spi
->regs
->rx_data
));
250 static void clear_fifo_status(struct tegra_spi_channel
*spi
)
252 clrbits32(&spi
->regs
->fifo_status
,
253 SPI_FIFO_STATUS_ERR
|
254 SPI_FIFO_STATUS_TX_FIFO_OVF
|
255 SPI_FIFO_STATUS_TX_FIFO_UNR
|
256 SPI_FIFO_STATUS_RX_FIFO_OVF
|
257 SPI_FIFO_STATUS_RX_FIFO_UNR
);
260 static void dump_spi_regs(struct tegra_spi_channel
*spi
)
262 printk(BIOS_INFO
, "SPI regs:\n"
263 "\tdma_blk: 0x%08x\n"
264 "\tcommand1: 0x%08x\n"
265 "\tdma_ctl: 0x%08x\n"
266 "\ttrans_status: 0x%08x\n",
267 read32(&spi
->regs
->dma_blk
),
268 read32(&spi
->regs
->command1
),
269 read32(&spi
->regs
->dma_ctl
),
270 read32(&spi
->regs
->trans_status
));
273 static void dump_dma_regs(struct apb_dma_channel
*dma
)
278 printk(BIOS_INFO
, "DMA regs:\n"
279 "\tahb_ptr: 0x%08x\n"
280 "\tapb_ptr: 0x%08x\n"
281 "\tahb_seq: 0x%08x\n"
282 "\tapb_seq: 0x%08x\n"
286 "\tdma_byte_sta: 0x%08x\n"
287 "\tword_transfer: 0x%08x\n",
288 read32(&dma
->regs
->ahb_ptr
),
289 read32(&dma
->regs
->apb_ptr
),
290 read32(&dma
->regs
->ahb_seq
),
291 read32(&dma
->regs
->apb_seq
),
292 read32(&dma
->regs
->csr
),
293 read32(&dma
->regs
->csre
),
294 read32(&dma
->regs
->wcount
),
295 read32(&dma
->regs
->dma_byte_sta
),
296 read32(&dma
->regs
->word_transfer
));
299 static inline unsigned int spi_byte_count(struct tegra_spi_channel
*spi
)
301 /* FIXME: Make this take total packet size into account */
302 return read32(&spi
->regs
->trans_status
) &
303 (SPI_STATUS_BLOCK_COUNT
<< SPI_STATUS_BLOCK_COUNT_SHIFT
);
307 * This calls udelay() with a calculated value based on the SPI speed and
308 * number of bytes remaining to be transferred. It assumes that if the
309 * calculated delay period is less than MIN_DELAY_US then it is probably
310 * not worth the overhead of yielding.
312 #define MIN_DELAY_US 250
313 static void spi_delay(struct tegra_spi_channel
*spi
,
314 unsigned int bytes_remaining
)
316 unsigned int ns_per_byte
, delay_us
;
318 ns_per_byte
= 1000000000 / (tegra_spi_speed(spi
->slave
.bus
) / 8);
319 delay_us
= (ns_per_byte
* bytes_remaining
) / 1000;
321 if (delay_us
< MIN_DELAY_US
)
327 static void tegra_spi_wait(struct tegra_spi_channel
*spi
)
329 unsigned int count
, dma_blk
;
331 dma_blk
= 1 + (read32(&spi
->regs
->dma_blk
) &
332 (SPI_DMA_CTL_BLOCK_SIZE_MASK
<< SPI_DMA_CTL_BLOCK_SIZE_SHIFT
));
334 while ((count
= spi_byte_count(spi
)) != dma_blk
)
335 spi_delay(spi
, dma_blk
- count
);
338 static int fifo_error(struct tegra_spi_channel
*spi
)
340 return read32(&spi
->regs
->fifo_status
) & SPI_FIFO_STATUS_ERR
? 1 : 0;
343 static int tegra_spi_pio_prepare(struct tegra_spi_channel
*spi
,
344 unsigned int bytes
, enum spi_direction dir
)
346 u8
*p
= spi
->out_buf
;
347 unsigned int todo
= MIN(bytes
, SPI_MAX_TRANSFER_BYTES_FIFO
);
348 u32 flush_mask
, enable_mask
;
350 if (dir
== SPI_SEND
) {
351 flush_mask
= SPI_FIFO_STATUS_TX_FIFO_FLUSH
;
352 enable_mask
= SPI_CMD1_TX_EN
;
354 flush_mask
= SPI_FIFO_STATUS_RX_FIFO_FLUSH
;
355 enable_mask
= SPI_CMD1_RX_EN
;
358 setbits32(&spi
->regs
->fifo_status
, flush_mask
);
359 while (read32(&spi
->regs
->fifo_status
) & flush_mask
)
362 setbits32(&spi
->regs
->command1
, enable_mask
);
364 /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
366 write32(&spi
->regs
->dma_blk
, todo
- 1);
368 if (dir
== SPI_SEND
) {
369 unsigned int to_fifo
= bytes
;
371 write32(&spi
->regs
->tx_fifo
, *p
);
380 static void tegra_spi_pio_start(struct tegra_spi_channel
*spi
)
382 setbits32(&spi
->regs
->trans_status
, SPI_STATUS_RDY
);
383 setbits32(&spi
->regs
->command1
, SPI_CMD1_GO
);
384 /* Make sure the write to command1 completes. */
385 read32(&spi
->regs
->command1
);
388 static inline u32
rx_fifo_count(struct tegra_spi_channel
*spi
)
390 return (read32(&spi
->regs
->fifo_status
) >>
391 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT
) &
392 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK
;
395 static int tegra_spi_pio_finish(struct tegra_spi_channel
*spi
)
400 clrbits32(&spi
->regs
->command1
, SPI_CMD1_RX_EN
| SPI_CMD1_TX_EN
);
403 * Allow some time in case the Rx FIFO does not yet have
404 * all packets pushed into it. See chrome-os-partner:24215.
406 stopwatch_init_usecs_expire(&sw
, SPI_FIFO_XFER_TIMEOUT_US
);
408 if (rx_fifo_count(spi
) == spi_byte_count(spi
))
410 } while (!stopwatch_expired(&sw
));
412 while (!(read32(&spi
->regs
->fifo_status
) &
413 SPI_FIFO_STATUS_RX_FIFO_EMPTY
)) {
414 *p
= read8(&spi
->regs
->rx_fifo
);
418 if (fifo_error(spi
)) {
419 printk(BIOS_ERR
, "%s: ERROR:\n", __func__
);
421 dump_fifo_status(spi
);
428 static void setup_dma_params(struct tegra_spi_channel
*spi
,
429 struct apb_dma_channel
*dma
)
431 /* APB bus width = 8-bits, address wrap for each word */
432 clrbits32(&dma
->regs
->apb_seq
,
433 APB_BUS_WIDTH_MASK
<< APB_BUS_WIDTH_SHIFT
);
434 /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
435 * no address wrapping */
436 clrsetbits32(&dma
->regs
->ahb_seq
,
437 (AHB_BURST_MASK
<< AHB_BURST_SHIFT
),
438 4 << AHB_BURST_SHIFT
);
440 /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
442 clrbits32(&dma
->regs
->csr
,
443 APB_CSR_REQ_SEL_MASK
<< APB_CSR_REQ_SEL_SHIFT
);
444 setbits32(&dma
->regs
->csr
, APB_CSR_ONCE
| APB_CSR_FLOW
|
445 (spi
->req_sel
<< APB_CSR_REQ_SEL_SHIFT
));
448 static int tegra_spi_dma_prepare(struct tegra_spi_channel
*spi
,
449 unsigned int bytes
, enum spi_direction dir
)
451 unsigned int todo
, wcount
;
454 * For DMA we need to think of things in terms of word count.
455 * AHB width is fixed at 32-bits. To avoid overrunning
456 * the in/out buffers we must align down. (Note: lowest 2-bits
457 * in WCOUNT register are ignored, and WCOUNT seems to count
458 * words starting at n-1)
460 * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
461 * WCOUNT should be 4. The remaining 3 bytes must be transferred
464 todo
= MIN(bytes
, SPI_MAX_TRANSFER_BYTES_DMA
- TEGRA_DMA_ALIGN_BYTES
);
465 todo
= ALIGN_DOWN(todo
, TEGRA_DMA_ALIGN_BYTES
);
466 wcount
= ALIGN_DOWN(todo
- TEGRA_DMA_ALIGN_BYTES
, TEGRA_DMA_ALIGN_BYTES
);
468 if (dir
== SPI_SEND
) {
469 spi
->dma_out
= dma_claim();
473 /* ensure bytes to send will be visible to DMA controller */
474 dcache_clean_by_mva(spi
->out_buf
, bytes
);
476 write32(&spi
->dma_out
->regs
->apb_ptr
,
477 (u32
)&spi
->regs
->tx_fifo
);
478 write32(&spi
->dma_out
->regs
->ahb_ptr
, (u32
)spi
->out_buf
);
479 setbits32(&spi
->dma_out
->regs
->csr
, APB_CSR_DIR
);
480 setup_dma_params(spi
, spi
->dma_out
);
481 write32(&spi
->dma_out
->regs
->wcount
, wcount
);
483 spi
->dma_in
= dma_claim();
487 /* avoid data collisions */
488 dcache_clean_invalidate_by_mva(spi
->in_buf
, bytes
);
490 write32(&spi
->dma_in
->regs
->apb_ptr
, (u32
)&spi
->regs
->rx_fifo
);
491 write32(&spi
->dma_in
->regs
->ahb_ptr
, (u32
)spi
->in_buf
);
492 clrbits32(&spi
->dma_in
->regs
->csr
, APB_CSR_DIR
);
493 setup_dma_params(spi
, spi
->dma_in
);
494 write32(&spi
->dma_in
->regs
->wcount
, wcount
);
497 /* BLOCK_SIZE starts at n-1 */
498 write32(&spi
->regs
->dma_blk
, todo
- 1);
502 static void tegra_spi_dma_start(struct tegra_spi_channel
*spi
)
505 * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
506 * (set bit to clear) between each transaction. Otherwise the next
507 * transaction does not start.
509 setbits32(&spi
->regs
->trans_status
, SPI_STATUS_RDY
);
512 setbits32(&spi
->regs
->command1
, SPI_CMD1_TX_EN
);
514 setbits32(&spi
->regs
->command1
, SPI_CMD1_RX_EN
);
517 * To avoid underrun conditions, enable APB DMA before SPI DMA for
518 * Tx and enable SPI DMA before APB DMA before Rx.
521 dma_start(spi
->dma_out
);
522 setbits32(&spi
->regs
->dma_ctl
, SPI_DMA_CTL_DMA
);
524 dma_start(spi
->dma_in
);
527 static int tegra_spi_dma_finish(struct tegra_spi_channel
*spi
)
533 todo
= read32(&spi
->dma_in
->regs
->wcount
);
535 while ((read32(&spi
->dma_in
->regs
->dma_byte_sta
) < todo
) ||
536 dma_busy(spi
->dma_in
))
537 ; /* this shouldn't take long, no udelay */
538 dma_stop(spi
->dma_in
);
539 clrbits32(&spi
->regs
->command1
, SPI_CMD1_RX_EN
);
540 dma_release(spi
->dma_in
);
544 todo
= read32(&spi
->dma_out
->regs
->wcount
);
546 while ((read32(&spi
->dma_out
->regs
->dma_byte_sta
) < todo
) ||
547 dma_busy(spi
->dma_out
)) {
548 spi_delay(spi
, todo
- spi_byte_count(spi
));
550 clrbits32(&spi
->regs
->command1
, SPI_CMD1_TX_EN
);
551 dma_stop(spi
->dma_out
);
552 dma_release(spi
->dma_out
);
555 if (fifo_error(spi
)) {
556 printk(BIOS_ERR
, "%s: ERROR:\n", __func__
);
557 dump_dma_regs(spi
->dma_out
);
558 dump_dma_regs(spi
->dma_in
);
560 dump_fifo_status(spi
);
573 * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
574 * sets transfer mode used by this channel (if not set already).
576 * A few caveats to watch out for:
577 * - The number of bytes which can be transferred may be smaller than the
578 * number of bytes the caller specifies. The number of bytes ready for
579 * a transfer will be returned (unless an error occurs).
581 * - Only one mode can be used for both RX and TX. The transfer mode of the
582 * SPI channel (spi->xfer_mode) is checked each time this function is called.
583 * If conflicting modes are detected, spi->xfer_mode will be set to
584 * XFER_MODE_NONE and an error will be returned.
586 * Returns bytes ready for transfer if successful, <0 to indicate error.
588 static int xfer_setup(struct tegra_spi_channel
*spi
, void *buf
,
589 unsigned int bytes
, enum spi_direction dir
)
591 unsigned int line_size
= dcache_line_bytes();
600 else if (dir
== SPI_RECEIVE
)
604 * Alignment consideratons:
605 * When we enable caching we'll need to clean/invalidate portions of
606 * memory. So we need to be careful about memory alignment. Also, DMA
607 * likes to operate on 4-bytes at a time on the AHB side. So for
608 * example, if we only want to receive 1 byte, 4 bytes will be
609 * written in memory even if those extra 3 bytes are beyond the length
612 * For now we'll use PIO to send/receive unaligned bytes. We may
613 * consider setting aside some space for a kind of bounce buffer to
614 * stay in DMA mode once we have a chance to benchmark the two
618 if (bytes
< line_size
) {
619 if (spi
->xfer_mode
== XFER_MODE_DMA
) {
620 spi
->xfer_mode
= XFER_MODE_NONE
;
623 spi
->xfer_mode
= XFER_MODE_PIO
;
624 ret
= tegra_spi_pio_prepare(spi
, bytes
, dir
);
629 /* transfer bytes before the aligned boundary */
630 align
= line_size
- ((uintptr_t)buf
% line_size
);
631 if ((align
!= 0) && (align
!= line_size
)) {
632 if (spi
->xfer_mode
== XFER_MODE_DMA
) {
633 spi
->xfer_mode
= XFER_MODE_NONE
;
636 spi
->xfer_mode
= XFER_MODE_PIO
;
637 ret
= tegra_spi_pio_prepare(spi
, align
, dir
);
642 /* do aligned DMA transfer */
643 align
= (((uintptr_t)buf
+ bytes
) % line_size
);
644 if (bytes
- align
> 0) {
645 unsigned int dma_bytes
= bytes
- align
;
647 if (spi
->xfer_mode
== XFER_MODE_PIO
) {
648 spi
->xfer_mode
= XFER_MODE_NONE
;
651 spi
->xfer_mode
= XFER_MODE_DMA
;
652 ret
= tegra_spi_dma_prepare(spi
, dma_bytes
, dir
);
658 /* transfer any remaining unaligned bytes */
660 if (spi
->xfer_mode
== XFER_MODE_DMA
) {
661 spi
->xfer_mode
= XFER_MODE_NONE
;
664 spi
->xfer_mode
= XFER_MODE_PIO
;
665 ret
= tegra_spi_pio_prepare(spi
, align
, dir
);
674 static void xfer_start(struct tegra_spi_channel
*spi
)
676 if (spi
->xfer_mode
== XFER_MODE_DMA
)
677 tegra_spi_dma_start(spi
);
679 tegra_spi_pio_start(spi
);
682 static void xfer_wait(struct tegra_spi_channel
*spi
)
687 static int xfer_finish(struct tegra_spi_channel
*spi
)
691 if (spi
->xfer_mode
== XFER_MODE_DMA
)
692 ret
= tegra_spi_dma_finish(spi
);
694 ret
= tegra_spi_pio_finish(spi
);
696 spi
->xfer_mode
= XFER_MODE_NONE
;
700 static int spi_ctrlr_xfer(const struct spi_slave
*slave
, const void *dout
,
701 size_t out_bytes
, void *din
, size_t in_bytes
)
703 struct tegra_spi_channel
*spi
= to_tegra_spi(slave
->bus
);
704 u8
*out_buf
= (u8
*)dout
;
705 u8
*in_buf
= (u8
*)din
;
709 /* tegra bus numbers start at 1 */
710 ASSERT(slave
->bus
>= 1 && slave
->bus
<= ARRAY_SIZE(tegra_spi_channels
));
712 while (out_bytes
|| in_bytes
) {
717 else if (in_bytes
== 0)
720 todo
= MIN(out_bytes
, in_bytes
);
723 x
= xfer_setup(spi
, out_buf
, todo
, SPI_SEND
);
725 if (spi
->xfer_mode
== XFER_MODE_NONE
) {
726 spi
->xfer_mode
= XFER_MODE_PIO
;
735 x
= xfer_setup(spi
, in_buf
, todo
, SPI_RECEIVE
);
737 if (spi
->xfer_mode
== XFER_MODE_NONE
) {
738 spi
->xfer_mode
= XFER_MODE_PIO
;
748 * Note: Some devices (such as Chrome EC) are sensitive to
749 * delays, so be careful when adding debug prints not to
750 * cause timeouts between transfers.
754 if (xfer_finish(spi
)) {
759 /* Post-processing. */
771 printk(BIOS_ERR
, "%s: Error detected\n", __func__
);
772 printk(BIOS_ERR
, "Transaction size: %u, bytes remaining: "
773 "%u out / %u in\n", todo
, out_bytes
, in_bytes
);
774 clear_fifo_status(spi
);
779 static const struct spi_ctrlr spi_ctrlr
= {
780 .claim_bus
= spi_ctrlr_claim_bus
,
781 .release_bus
= spi_ctrlr_release_bus
,
782 .xfer
= spi_ctrlr_xfer
,
783 .max_xfer_size
= SPI_CTRLR_DEFAULT_MAX_XFER_SIZE
,
786 const struct spi_ctrlr_buses spi_ctrlr_bus_map
[] = {
790 .bus_end
= ARRAY_SIZE(tegra_spi_channels
)
794 const size_t spi_ctrlr_bus_map_count
= ARRAY_SIZE(spi_ctrlr_bus_map
);