mb/google/brya/var/orisa: Update Type C DisplayPort HPD Configuration
[coreboot2.git] / src / soc / nvidia / tegra124 / spi.c
blob4e0285de5bfcc4dc27f0d4d40cc331c4e6730bd9
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* NVIDIA Tegra SPI controller (T114 and later) */
4 #include <arch/cache.h>
5 #include <device/mmio.h>
6 #include <assert.h>
7 #include <console/console.h>
8 #include <delay.h>
9 #include <soc/addressmap.h>
10 #include <soc/dma.h>
11 #include <soc/spi.h>
12 #include <spi-generic.h>
13 #include <spi_flash.h>
14 #include <timer.h>
15 #include <types.h>
17 #if defined(CONFIG_DEBUG_SPI) && CONFIG_DEBUG_SPI
18 # define DEBUG_SPI(x,...) printk(BIOS_DEBUG, "TEGRA_SPI: " x)
19 #else
20 # define DEBUG_SPI(x,...)
21 #endif
24 * 64 packets in FIFO mode, BLOCK_SIZE packets in DMA mode. Packets can vary
25 * in size from 4 to 32 bits. To keep things simple we'll use 8-bit packets.
27 #define SPI_PACKET_SIZE_BYTES 1
28 #define SPI_MAX_TRANSFER_BYTES_FIFO (64 * SPI_PACKET_SIZE_BYTES)
29 #define SPI_MAX_TRANSFER_BYTES_DMA (65535 * SPI_PACKET_SIZE_BYTES)
32 * This is used to workaround an issue seen where it may take some time for
33 * packets to show up in the FIFO after they have been received and the
34 * BLOCK_COUNT has been incremented.
36 #define SPI_FIFO_XFER_TIMEOUT_US 1000
38 /* COMMAND1 */
39 #define SPI_CMD1_GO (1 << 31)
40 #define SPI_CMD1_M_S (1 << 30)
41 #define SPI_CMD1_MODE_MASK 0x3
42 #define SPI_CMD1_MODE_SHIFT 28
43 #define SPI_CMD1_CS_SEL_MASK 0x3
44 #define SPI_CMD1_CS_SEL_SHIFT 26
45 #define SPI_CMD1_CS_POL_INACTIVE3 (1 << 25)
46 #define SPI_CMD1_CS_POL_INACTIVE2 (1 << 24)
47 #define SPI_CMD1_CS_POL_INACTIVE1 (1 << 23)
48 #define SPI_CMD1_CS_POL_INACTIVE0 (1 << 22)
49 #define SPI_CMD1_CS_SW_HW (1 << 21)
50 #define SPI_CMD1_CS_SW_VAL (1 << 20)
51 #define SPI_CMD1_IDLE_SDA_MASK 0x3
52 #define SPI_CMD1_IDLE_SDA_SHIFT 18
53 #define SPI_CMD1_BIDIR (1 << 17)
54 #define SPI_CMD1_LSBI_FE (1 << 16)
55 #define SPI_CMD1_LSBY_FE (1 << 15)
56 #define SPI_CMD1_BOTH_EN_BIT (1 << 14)
57 #define SPI_CMD1_BOTH_EN_BYTE (1 << 13)
58 #define SPI_CMD1_RX_EN (1 << 12)
59 #define SPI_CMD1_TX_EN (1 << 11)
60 #define SPI_CMD1_PACKED (1 << 5)
61 #define SPI_CMD1_BIT_LEN_MASK 0x1f
62 #define SPI_CMD1_BIT_LEN_SHIFT 0
64 /* COMMAND2 */
65 #define SPI_CMD2_TX_CLK_TAP_DELAY (1 << 6)
66 #define SPI_CMD2_TX_CLK_TAP_DELAY_MASK (0x3F << 6)
67 #define SPI_CMD2_RX_CLK_TAP_DELAY (1 << 0)
68 #define SPI_CMD2_RX_CLK_TAP_DELAY_MASK (0x3F << 0)
70 /* SPI_TRANS_STATUS */
71 #define SPI_STATUS_RDY (1 << 30)
72 #define SPI_STATUS_SLV_IDLE_COUNT_MASK 0xff
73 #define SPI_STATUS_SLV_IDLE_COUNT_SHIFT 16
74 #define SPI_STATUS_BLOCK_COUNT 0xffff
75 #define SPI_STATUS_BLOCK_COUNT_SHIFT 0
77 /* SPI_FIFO_STATUS */
78 #define SPI_FIFO_STATUS_CS_INACTIVE (1 << 31)
79 #define SPI_FIFO_STATUS_FRAME_END (1 << 30)
80 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK 0x7f
81 #define SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT 23
82 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_MASK 0x7f
83 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY_COUNT_SHIFT 16
84 #define SPI_FIFO_STATUS_RX_FIFO_FLUSH (1 << 15)
85 #define SPI_FIFO_STATUS_TX_FIFO_FLUSH (1 << 14)
86 #define SPI_FIFO_STATUS_ERR (1 << 8)
87 #define SPI_FIFO_STATUS_TX_FIFO_OVF (1 << 7)
88 #define SPI_FIFO_STATUS_TX_FIFO_UNR (1 << 6)
89 #define SPI_FIFO_STATUS_RX_FIFO_OVF (1 << 5)
90 #define SPI_FIFO_STATUS_RX_FIFO_UNR (1 << 4)
91 #define SPI_FIFO_STATUS_TX_FIFO_FULL (1 << 3)
92 #define SPI_FIFO_STATUS_TX_FIFO_EMPTY (1 << 2)
93 #define SPI_FIFO_STATUS_RX_FIFO_FULL (1 << 1)
94 #define SPI_FIFO_STATUS_RX_FIFO_EMPTY (1 << 0)
96 /* SPI_DMA_CTL */
97 #define SPI_DMA_CTL_DMA (1 << 31)
98 #define SPI_DMA_CTL_CONT (1 << 30)
99 #define SPI_DMA_CTL_IE_RX (1 << 29)
100 #define SPI_DMA_CTL_IE_TX (1 << 28)
101 #define SPI_DMA_CTL_RX_TRIG_MASK 0x3
102 #define SPI_DMA_CTL_RX_TRIG_SHIFT 19
103 #define SPI_DMA_CTL_TX_TRIG_MASK 0x3
104 #define SPI_DMA_CTL_TX_TRIG_SHIFT 15
106 /* SPI_DMA_BLK */
107 #define SPI_DMA_CTL_BLOCK_SIZE_MASK 0xffff
108 #define SPI_DMA_CTL_BLOCK_SIZE_SHIFT 0
110 static struct tegra_spi_channel tegra_spi_channels[] = {
112 * Note: Tegra pinmux must be setup for corresponding SPI channel in
113 * order for its registers to be accessible. If pinmux has not been
114 * set up, access to the channel's registers will simply hang.
116 * TODO(dhendrix): Clarify or remove this comment (is clock setup
117 * necessary first, or just pinmux, or both?)
120 .slave = { .bus = 1, },
121 .regs = (struct tegra_spi_regs *)TEGRA_SPI1_BASE,
122 .req_sel = APBDMA_SLAVE_SL2B1,
125 .slave = { .bus = 2, },
126 .regs = (struct tegra_spi_regs *)TEGRA_SPI2_BASE,
127 .req_sel = APBDMA_SLAVE_SL2B2,
130 .slave = { .bus = 3, },
131 .regs = (struct tegra_spi_regs *)TEGRA_SPI3_BASE,
132 .req_sel = APBDMA_SLAVE_SL2B3,
135 .slave = { .bus = 4, },
136 .regs = (struct tegra_spi_regs *)TEGRA_SPI4_BASE,
137 .req_sel = APBDMA_SLAVE_SL2B4,
140 .slave = { .bus = 5, },
141 .regs = (struct tegra_spi_regs *)TEGRA_SPI5_BASE,
142 .req_sel = APBDMA_SLAVE_SL2B5,
145 .slave = { .bus = 6, },
146 .regs = (struct tegra_spi_regs *)TEGRA_SPI6_BASE,
147 .req_sel = APBDMA_SLAVE_SL2B6,
151 enum spi_direction {
152 SPI_SEND,
153 SPI_RECEIVE,
156 struct tegra_spi_channel *tegra_spi_init(unsigned int bus)
158 int i;
159 struct tegra_spi_channel *spi = NULL;
161 for (i = 0; i < ARRAY_SIZE(tegra_spi_channels); i++) {
162 if (tegra_spi_channels[i].slave.bus == bus) {
163 spi = &tegra_spi_channels[i];
164 break;
167 if (!spi)
168 return NULL;
170 /* software drives chip-select, set value to high */
171 setbits32(&spi->regs->command1,
172 SPI_CMD1_CS_SW_HW | SPI_CMD1_CS_SW_VAL);
174 /* 8-bit transfers, unpacked mode, most significant bit first */
175 clrbits32(&spi->regs->command1,
176 SPI_CMD1_BIT_LEN_MASK | SPI_CMD1_PACKED);
177 setbits32(&spi->regs->command1, 7 << SPI_CMD1_BIT_LEN_SHIFT);
179 return spi;
182 static struct tegra_spi_channel * const to_tegra_spi(int bus) {
183 return &tegra_spi_channels[bus - 1];
186 static unsigned int tegra_spi_speed(unsigned int bus)
188 /* FIXME: implement this properly, for now use max value (50MHz) */
189 return 50000000;
192 static int spi_ctrlr_claim_bus(const struct spi_slave *slave)
194 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
195 u32 val;
197 tegra_spi_init(slave->bus);
199 val = read32(&regs->command1);
201 /* select appropriate chip-select line */
202 val &= ~(SPI_CMD1_CS_SEL_MASK << SPI_CMD1_CS_SEL_SHIFT);
203 val |= (slave->cs << SPI_CMD1_CS_SEL_SHIFT);
205 /* drive chip-select with the inverse of the "inactive" value */
206 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
207 val &= ~SPI_CMD1_CS_SW_VAL;
208 else
209 val |= SPI_CMD1_CS_SW_VAL;
211 write32(&regs->command1, val);
212 return 0;
215 static void spi_ctrlr_release_bus(const struct spi_slave *slave)
217 struct tegra_spi_regs *regs = to_tegra_spi(slave->bus)->regs;
218 u32 val;
220 val = read32(&regs->command1);
222 if (val & (SPI_CMD1_CS_POL_INACTIVE0 << slave->cs))
223 val |= SPI_CMD1_CS_SW_VAL;
224 else
225 val &= ~SPI_CMD1_CS_SW_VAL;
227 write32(&regs->command1, val);
230 static void dump_fifo_status(struct tegra_spi_channel *spi)
232 u32 status = read32(&spi->regs->fifo_status);
234 printk(BIOS_INFO, "Raw FIFO status: 0x%08x\n", status);
235 if (status & SPI_FIFO_STATUS_TX_FIFO_OVF)
236 printk(BIOS_INFO, "\tTx overflow detected\n");
237 if (status & SPI_FIFO_STATUS_TX_FIFO_UNR)
238 printk(BIOS_INFO, "\tTx underrun detected\n");
239 if (status & SPI_FIFO_STATUS_RX_FIFO_OVF)
240 printk(BIOS_INFO, "\tRx overflow detected\n");
241 if (status & SPI_FIFO_STATUS_RX_FIFO_UNR)
242 printk(BIOS_INFO, "\tRx underrun detected\n");
244 printk(BIOS_INFO, "TX_FIFO: 0x%08x, TX_DATA: 0x%08x\n",
245 read32(&spi->regs->tx_fifo), read32(&spi->regs->tx_data));
246 printk(BIOS_INFO, "RX_FIFO: 0x%08x, RX_DATA: 0x%08x\n",
247 read32(&spi->regs->rx_fifo), read32(&spi->regs->rx_data));
250 static void clear_fifo_status(struct tegra_spi_channel *spi)
252 clrbits32(&spi->regs->fifo_status,
253 SPI_FIFO_STATUS_ERR |
254 SPI_FIFO_STATUS_TX_FIFO_OVF |
255 SPI_FIFO_STATUS_TX_FIFO_UNR |
256 SPI_FIFO_STATUS_RX_FIFO_OVF |
257 SPI_FIFO_STATUS_RX_FIFO_UNR);
260 static void dump_spi_regs(struct tegra_spi_channel *spi)
262 printk(BIOS_INFO, "SPI regs:\n"
263 "\tdma_blk: 0x%08x\n"
264 "\tcommand1: 0x%08x\n"
265 "\tdma_ctl: 0x%08x\n"
266 "\ttrans_status: 0x%08x\n",
267 read32(&spi->regs->dma_blk),
268 read32(&spi->regs->command1),
269 read32(&spi->regs->dma_ctl),
270 read32(&spi->regs->trans_status));
273 static void dump_dma_regs(struct apb_dma_channel *dma)
275 if (dma == NULL)
276 return;
278 printk(BIOS_INFO, "DMA regs:\n"
279 "\tahb_ptr: 0x%08x\n"
280 "\tapb_ptr: 0x%08x\n"
281 "\tahb_seq: 0x%08x\n"
282 "\tapb_seq: 0x%08x\n"
283 "\tcsr: 0x%08x\n"
284 "\tcsre: 0x%08x\n"
285 "\twcount: 0x%08x\n"
286 "\tdma_byte_sta: 0x%08x\n"
287 "\tword_transfer: 0x%08x\n",
288 read32(&dma->regs->ahb_ptr),
289 read32(&dma->regs->apb_ptr),
290 read32(&dma->regs->ahb_seq),
291 read32(&dma->regs->apb_seq),
292 read32(&dma->regs->csr),
293 read32(&dma->regs->csre),
294 read32(&dma->regs->wcount),
295 read32(&dma->regs->dma_byte_sta),
296 read32(&dma->regs->word_transfer));
299 static inline unsigned int spi_byte_count(struct tegra_spi_channel *spi)
301 /* FIXME: Make this take total packet size into account */
302 return read32(&spi->regs->trans_status) &
303 (SPI_STATUS_BLOCK_COUNT << SPI_STATUS_BLOCK_COUNT_SHIFT);
307 * This calls udelay() with a calculated value based on the SPI speed and
308 * number of bytes remaining to be transferred. It assumes that if the
309 * calculated delay period is less than MIN_DELAY_US then it is probably
310 * not worth the overhead of yielding.
312 #define MIN_DELAY_US 250
313 static void spi_delay(struct tegra_spi_channel *spi,
314 unsigned int bytes_remaining)
316 unsigned int ns_per_byte, delay_us;
318 ns_per_byte = 1000000000 / (tegra_spi_speed(spi->slave.bus) / 8);
319 delay_us = (ns_per_byte * bytes_remaining) / 1000;
321 if (delay_us < MIN_DELAY_US)
322 return;
324 udelay(delay_us);
327 static void tegra_spi_wait(struct tegra_spi_channel *spi)
329 unsigned int count, dma_blk;
331 dma_blk = 1 + (read32(&spi->regs->dma_blk) &
332 (SPI_DMA_CTL_BLOCK_SIZE_MASK << SPI_DMA_CTL_BLOCK_SIZE_SHIFT));
334 while ((count = spi_byte_count(spi)) != dma_blk)
335 spi_delay(spi, dma_blk - count);
338 static int fifo_error(struct tegra_spi_channel *spi)
340 return read32(&spi->regs->fifo_status) & SPI_FIFO_STATUS_ERR ? 1 : 0;
343 static int tegra_spi_pio_prepare(struct tegra_spi_channel *spi,
344 unsigned int bytes, enum spi_direction dir)
346 u8 *p = spi->out_buf;
347 unsigned int todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_FIFO);
348 u32 flush_mask, enable_mask;
350 if (dir == SPI_SEND) {
351 flush_mask = SPI_FIFO_STATUS_TX_FIFO_FLUSH;
352 enable_mask = SPI_CMD1_TX_EN;
353 } else {
354 flush_mask = SPI_FIFO_STATUS_RX_FIFO_FLUSH;
355 enable_mask = SPI_CMD1_RX_EN;
358 setbits32(&spi->regs->fifo_status, flush_mask);
359 while (read32(&spi->regs->fifo_status) & flush_mask)
362 setbits32(&spi->regs->command1, enable_mask);
364 /* BLOCK_SIZE in SPI_DMA_BLK register applies to both DMA and
365 * PIO transfers */
366 write32(&spi->regs->dma_blk, todo - 1);
368 if (dir == SPI_SEND) {
369 unsigned int to_fifo = bytes;
370 while (to_fifo) {
371 write32(&spi->regs->tx_fifo, *p);
372 p++;
373 to_fifo--;
377 return todo;
380 static void tegra_spi_pio_start(struct tegra_spi_channel *spi)
382 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
383 setbits32(&spi->regs->command1, SPI_CMD1_GO);
384 /* Make sure the write to command1 completes. */
385 read32(&spi->regs->command1);
388 static inline u32 rx_fifo_count(struct tegra_spi_channel *spi)
390 return (read32(&spi->regs->fifo_status) >>
391 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_SHIFT) &
392 SPI_FIFO_STATUS_RX_FIFO_FULL_COUNT_MASK;
395 static int tegra_spi_pio_finish(struct tegra_spi_channel *spi)
397 u8 *p = spi->in_buf;
398 struct stopwatch sw;
400 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN | SPI_CMD1_TX_EN);
403 * Allow some time in case the Rx FIFO does not yet have
404 * all packets pushed into it. See chrome-os-partner:24215.
406 stopwatch_init_usecs_expire(&sw, SPI_FIFO_XFER_TIMEOUT_US);
407 do {
408 if (rx_fifo_count(spi) == spi_byte_count(spi))
409 break;
410 } while (!stopwatch_expired(&sw));
412 while (!(read32(&spi->regs->fifo_status) &
413 SPI_FIFO_STATUS_RX_FIFO_EMPTY)) {
414 *p = read8(&spi->regs->rx_fifo);
415 p++;
418 if (fifo_error(spi)) {
419 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
420 dump_spi_regs(spi);
421 dump_fifo_status(spi);
422 return -1;
425 return 0;
428 static void setup_dma_params(struct tegra_spi_channel *spi,
429 struct apb_dma_channel *dma)
431 /* APB bus width = 8-bits, address wrap for each word */
432 clrbits32(&dma->regs->apb_seq,
433 APB_BUS_WIDTH_MASK << APB_BUS_WIDTH_SHIFT);
434 /* AHB 1 word burst, bus width = 32 bits (fixed in hardware),
435 * no address wrapping */
436 clrsetbits32(&dma->regs->ahb_seq,
437 (AHB_BURST_MASK << AHB_BURST_SHIFT),
438 4 << AHB_BURST_SHIFT);
440 /* Set ONCE mode to transfer one "block" at a time (64KB) and enable
441 * flow control. */
442 clrbits32(&dma->regs->csr,
443 APB_CSR_REQ_SEL_MASK << APB_CSR_REQ_SEL_SHIFT);
444 setbits32(&dma->regs->csr, APB_CSR_ONCE | APB_CSR_FLOW |
445 (spi->req_sel << APB_CSR_REQ_SEL_SHIFT));
448 static int tegra_spi_dma_prepare(struct tegra_spi_channel *spi,
449 unsigned int bytes, enum spi_direction dir)
451 unsigned int todo, wcount;
454 * For DMA we need to think of things in terms of word count.
455 * AHB width is fixed at 32-bits. To avoid overrunning
456 * the in/out buffers we must align down. (Note: lowest 2-bits
457 * in WCOUNT register are ignored, and WCOUNT seems to count
458 * words starting at n-1)
460 * Example: If "bytes" is 7 and we are transferring 1-byte at a time,
461 * WCOUNT should be 4. The remaining 3 bytes must be transferred
462 * using PIO.
464 todo = MIN(bytes, SPI_MAX_TRANSFER_BYTES_DMA - TEGRA_DMA_ALIGN_BYTES);
465 todo = ALIGN_DOWN(todo, TEGRA_DMA_ALIGN_BYTES);
466 wcount = ALIGN_DOWN(todo - TEGRA_DMA_ALIGN_BYTES, TEGRA_DMA_ALIGN_BYTES);
468 if (dir == SPI_SEND) {
469 spi->dma_out = dma_claim();
470 if (!spi->dma_out)
471 return -1;
473 /* ensure bytes to send will be visible to DMA controller */
474 dcache_clean_by_mva(spi->out_buf, bytes);
476 write32(&spi->dma_out->regs->apb_ptr,
477 (u32)&spi->regs->tx_fifo);
478 write32(&spi->dma_out->regs->ahb_ptr, (u32)spi->out_buf);
479 setbits32(&spi->dma_out->regs->csr, APB_CSR_DIR);
480 setup_dma_params(spi, spi->dma_out);
481 write32(&spi->dma_out->regs->wcount, wcount);
482 } else {
483 spi->dma_in = dma_claim();
484 if (!spi->dma_in)
485 return -1;
487 /* avoid data collisions */
488 dcache_clean_invalidate_by_mva(spi->in_buf, bytes);
490 write32(&spi->dma_in->regs->apb_ptr, (u32)&spi->regs->rx_fifo);
491 write32(&spi->dma_in->regs->ahb_ptr, (u32)spi->in_buf);
492 clrbits32(&spi->dma_in->regs->csr, APB_CSR_DIR);
493 setup_dma_params(spi, spi->dma_in);
494 write32(&spi->dma_in->regs->wcount, wcount);
497 /* BLOCK_SIZE starts at n-1 */
498 write32(&spi->regs->dma_blk, todo - 1);
499 return todo;
502 static void tegra_spi_dma_start(struct tegra_spi_channel *spi)
505 * The RDY bit in SPI_TRANS_STATUS needs to be cleared manually
506 * (set bit to clear) between each transaction. Otherwise the next
507 * transaction does not start.
509 setbits32(&spi->regs->trans_status, SPI_STATUS_RDY);
511 if (spi->dma_out)
512 setbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
513 if (spi->dma_in)
514 setbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
517 * To avoid underrun conditions, enable APB DMA before SPI DMA for
518 * Tx and enable SPI DMA before APB DMA before Rx.
520 if (spi->dma_out)
521 dma_start(spi->dma_out);
522 setbits32(&spi->regs->dma_ctl, SPI_DMA_CTL_DMA);
523 if (spi->dma_in)
524 dma_start(spi->dma_in);
527 static int tegra_spi_dma_finish(struct tegra_spi_channel *spi)
529 int ret;
530 unsigned int todo;
532 if (spi->dma_in) {
533 todo = read32(&spi->dma_in->regs->wcount);
535 while ((read32(&spi->dma_in->regs->dma_byte_sta) < todo) ||
536 dma_busy(spi->dma_in))
537 ; /* this shouldn't take long, no udelay */
538 dma_stop(spi->dma_in);
539 clrbits32(&spi->regs->command1, SPI_CMD1_RX_EN);
540 dma_release(spi->dma_in);
543 if (spi->dma_out) {
544 todo = read32(&spi->dma_out->regs->wcount);
546 while ((read32(&spi->dma_out->regs->dma_byte_sta) < todo) ||
547 dma_busy(spi->dma_out)) {
548 spi_delay(spi, todo - spi_byte_count(spi));
550 clrbits32(&spi->regs->command1, SPI_CMD1_TX_EN);
551 dma_stop(spi->dma_out);
552 dma_release(spi->dma_out);
555 if (fifo_error(spi)) {
556 printk(BIOS_ERR, "%s: ERROR:\n", __func__);
557 dump_dma_regs(spi->dma_out);
558 dump_dma_regs(spi->dma_in);
559 dump_spi_regs(spi);
560 dump_fifo_status(spi);
561 ret = -1;
562 goto done;
565 ret = 0;
566 done:
567 spi->dma_in = NULL;
568 spi->dma_out = NULL;
569 return ret;
573 * xfer_setup() prepares a transfer. It does sanity checking, alignment, and
574 * sets transfer mode used by this channel (if not set already).
576 * A few caveats to watch out for:
577 * - The number of bytes which can be transferred may be smaller than the
578 * number of bytes the caller specifies. The number of bytes ready for
579 * a transfer will be returned (unless an error occurs).
581 * - Only one mode can be used for both RX and TX. The transfer mode of the
582 * SPI channel (spi->xfer_mode) is checked each time this function is called.
583 * If conflicting modes are detected, spi->xfer_mode will be set to
584 * XFER_MODE_NONE and an error will be returned.
586 * Returns bytes ready for transfer if successful, <0 to indicate error.
588 static int xfer_setup(struct tegra_spi_channel *spi, void *buf,
589 unsigned int bytes, enum spi_direction dir)
591 unsigned int line_size = dcache_line_bytes();
592 unsigned int align;
593 int ret = -1;
595 if (!bytes)
596 return 0;
598 if (dir == SPI_SEND)
599 spi->out_buf = buf;
600 else if (dir == SPI_RECEIVE)
601 spi->in_buf = buf;
604 * Alignment consideratons:
605 * When we enable caching we'll need to clean/invalidate portions of
606 * memory. So we need to be careful about memory alignment. Also, DMA
607 * likes to operate on 4-bytes at a time on the AHB side. So for
608 * example, if we only want to receive 1 byte, 4 bytes will be
609 * written in memory even if those extra 3 bytes are beyond the length
610 * we want.
612 * For now we'll use PIO to send/receive unaligned bytes. We may
613 * consider setting aside some space for a kind of bounce buffer to
614 * stay in DMA mode once we have a chance to benchmark the two
615 * approaches.
618 if (bytes < line_size) {
619 if (spi->xfer_mode == XFER_MODE_DMA) {
620 spi->xfer_mode = XFER_MODE_NONE;
621 ret = -1;
622 } else {
623 spi->xfer_mode = XFER_MODE_PIO;
624 ret = tegra_spi_pio_prepare(spi, bytes, dir);
626 goto done;
629 /* transfer bytes before the aligned boundary */
630 align = line_size - ((uintptr_t)buf % line_size);
631 if ((align != 0) && (align != line_size)) {
632 if (spi->xfer_mode == XFER_MODE_DMA) {
633 spi->xfer_mode = XFER_MODE_NONE;
634 ret = -1;
635 } else {
636 spi->xfer_mode = XFER_MODE_PIO;
637 ret = tegra_spi_pio_prepare(spi, align, dir);
639 goto done;
642 /* do aligned DMA transfer */
643 align = (((uintptr_t)buf + bytes) % line_size);
644 if (bytes - align > 0) {
645 unsigned int dma_bytes = bytes - align;
647 if (spi->xfer_mode == XFER_MODE_PIO) {
648 spi->xfer_mode = XFER_MODE_NONE;
649 ret = -1;
650 } else {
651 spi->xfer_mode = XFER_MODE_DMA;
652 ret = tegra_spi_dma_prepare(spi, dma_bytes, dir);
655 goto done;
658 /* transfer any remaining unaligned bytes */
659 if (align) {
660 if (spi->xfer_mode == XFER_MODE_DMA) {
661 spi->xfer_mode = XFER_MODE_NONE;
662 ret = -1;
663 } else {
664 spi->xfer_mode = XFER_MODE_PIO;
665 ret = tegra_spi_pio_prepare(spi, align, dir);
667 goto done;
670 done:
671 return ret;
674 static void xfer_start(struct tegra_spi_channel *spi)
676 if (spi->xfer_mode == XFER_MODE_DMA)
677 tegra_spi_dma_start(spi);
678 else
679 tegra_spi_pio_start(spi);
682 static void xfer_wait(struct tegra_spi_channel *spi)
684 tegra_spi_wait(spi);
687 static int xfer_finish(struct tegra_spi_channel *spi)
689 int ret;
691 if (spi->xfer_mode == XFER_MODE_DMA)
692 ret = tegra_spi_dma_finish(spi);
693 else
694 ret = tegra_spi_pio_finish(spi);
696 spi->xfer_mode = XFER_MODE_NONE;
697 return ret;
700 static int spi_ctrlr_xfer(const struct spi_slave *slave, const void *dout,
701 size_t out_bytes, void *din, size_t in_bytes)
703 struct tegra_spi_channel *spi = to_tegra_spi(slave->bus);
704 u8 *out_buf = (u8 *)dout;
705 u8 *in_buf = (u8 *)din;
706 size_t todo;
707 int ret = 0;
709 /* tegra bus numbers start at 1 */
710 ASSERT(slave->bus >= 1 && slave->bus <= ARRAY_SIZE(tegra_spi_channels));
712 while (out_bytes || in_bytes) {
713 int x = 0;
715 if (out_bytes == 0)
716 todo = in_bytes;
717 else if (in_bytes == 0)
718 todo = out_bytes;
719 else
720 todo = MIN(out_bytes, in_bytes);
722 if (out_bytes) {
723 x = xfer_setup(spi, out_buf, todo, SPI_SEND);
724 if (x < 0) {
725 if (spi->xfer_mode == XFER_MODE_NONE) {
726 spi->xfer_mode = XFER_MODE_PIO;
727 continue;
728 } else {
729 ret = -1;
730 break;
734 if (in_bytes) {
735 x = xfer_setup(spi, in_buf, todo, SPI_RECEIVE);
736 if (x < 0) {
737 if (spi->xfer_mode == XFER_MODE_NONE) {
738 spi->xfer_mode = XFER_MODE_PIO;
739 continue;
740 } else {
741 ret = -1;
742 break;
748 * Note: Some devices (such as Chrome EC) are sensitive to
749 * delays, so be careful when adding debug prints not to
750 * cause timeouts between transfers.
752 xfer_start(spi);
753 xfer_wait(spi);
754 if (xfer_finish(spi)) {
755 ret = -1;
756 break;
759 /* Post-processing. */
760 if (out_bytes) {
761 out_bytes -= x;
762 out_buf += x;
764 if (in_bytes) {
765 in_bytes -= x;
766 in_buf += x;
770 if (ret < 0) {
771 printk(BIOS_ERR, "%s: Error detected\n", __func__);
772 printk(BIOS_ERR, "Transaction size: %u, bytes remaining: "
773 "%u out / %u in\n", todo, out_bytes, in_bytes);
774 clear_fifo_status(spi);
776 return ret;
779 static const struct spi_ctrlr spi_ctrlr = {
780 .claim_bus = spi_ctrlr_claim_bus,
781 .release_bus = spi_ctrlr_release_bus,
782 .xfer = spi_ctrlr_xfer,
783 .max_xfer_size = SPI_CTRLR_DEFAULT_MAX_XFER_SIZE,
786 const struct spi_ctrlr_buses spi_ctrlr_bus_map[] = {
788 .ctrlr = &spi_ctrlr,
789 .bus_start = 1,
790 .bus_end = ARRAY_SIZE(tegra_spi_channels)
794 const size_t spi_ctrlr_bus_map_count = ARRAY_SIZE(spi_ctrlr_bus_map);