1 // SPDX-License-Identifier: GPL-2.0-only
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
5 * Copyright (c) 2009, Intel Corporation.
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/preempt.h>
12 #include <linux/highmem.h>
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 #include <linux/spi/spi.h>
16 #include <linux/spi/spi-mem.h>
17 #include <linux/string.h>
22 #ifdef CONFIG_DEBUG_FS
23 #include <linux/debugfs.h>
26 /* Slave spi_device related */
29 u32 rx_sample_dly
; /* RX sample delay */
32 #ifdef CONFIG_DEBUG_FS
34 #define DW_SPI_DBGFS_REG(_name, _off) \
40 static const struct debugfs_reg32 dw_spi_dbgfs_regs
[] = {
41 DW_SPI_DBGFS_REG("CTRLR0", DW_SPI_CTRLR0
),
42 DW_SPI_DBGFS_REG("CTRLR1", DW_SPI_CTRLR1
),
43 DW_SPI_DBGFS_REG("SSIENR", DW_SPI_SSIENR
),
44 DW_SPI_DBGFS_REG("SER", DW_SPI_SER
),
45 DW_SPI_DBGFS_REG("BAUDR", DW_SPI_BAUDR
),
46 DW_SPI_DBGFS_REG("TXFTLR", DW_SPI_TXFTLR
),
47 DW_SPI_DBGFS_REG("RXFTLR", DW_SPI_RXFTLR
),
48 DW_SPI_DBGFS_REG("TXFLR", DW_SPI_TXFLR
),
49 DW_SPI_DBGFS_REG("RXFLR", DW_SPI_RXFLR
),
50 DW_SPI_DBGFS_REG("SR", DW_SPI_SR
),
51 DW_SPI_DBGFS_REG("IMR", DW_SPI_IMR
),
52 DW_SPI_DBGFS_REG("ISR", DW_SPI_ISR
),
53 DW_SPI_DBGFS_REG("DMACR", DW_SPI_DMACR
),
54 DW_SPI_DBGFS_REG("DMATDLR", DW_SPI_DMATDLR
),
55 DW_SPI_DBGFS_REG("DMARDLR", DW_SPI_DMARDLR
),
56 DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY
),
59 static int dw_spi_debugfs_init(struct dw_spi
*dws
)
63 snprintf(name
, 32, "dw_spi%d", dws
->master
->bus_num
);
64 dws
->debugfs
= debugfs_create_dir(name
, NULL
);
68 dws
->regset
.regs
= dw_spi_dbgfs_regs
;
69 dws
->regset
.nregs
= ARRAY_SIZE(dw_spi_dbgfs_regs
);
70 dws
->regset
.base
= dws
->regs
;
71 debugfs_create_regset32("registers", 0400, dws
->debugfs
, &dws
->regset
);
76 static void dw_spi_debugfs_remove(struct dw_spi
*dws
)
78 debugfs_remove_recursive(dws
->debugfs
);
82 static inline int dw_spi_debugfs_init(struct dw_spi
*dws
)
87 static inline void dw_spi_debugfs_remove(struct dw_spi
*dws
)
90 #endif /* CONFIG_DEBUG_FS */
92 void dw_spi_set_cs(struct spi_device
*spi
, bool enable
)
94 struct dw_spi
*dws
= spi_controller_get_devdata(spi
->controller
);
95 bool cs_high
= !!(spi
->mode
& SPI_CS_HIGH
);
98 * DW SPI controller demands any native CS being set in order to
99 * proceed with data transfer. So in order to activate the SPI
100 * communications we must set a corresponding bit in the Slave
101 * Enable register no matter whether the SPI core is configured to
102 * support active-high or active-low CS level.
104 if (cs_high
== enable
)
105 dw_writel(dws
, DW_SPI_SER
, BIT(spi
->chip_select
));
107 dw_writel(dws
, DW_SPI_SER
, 0);
109 EXPORT_SYMBOL_GPL(dw_spi_set_cs
);
111 /* Return the max entries we can fill into tx fifo */
112 static inline u32
tx_max(struct dw_spi
*dws
)
114 u32 tx_room
, rxtx_gap
;
116 tx_room
= dws
->fifo_len
- dw_readl(dws
, DW_SPI_TXFLR
);
119 * Another concern is about the tx/rx mismatch, we
120 * though to use (dws->fifo_len - rxflr - txflr) as
121 * one maximum value for tx, but it doesn't cover the
122 * data which is out of tx/rx fifo and inside the
123 * shift registers. So a control from sw point of
126 rxtx_gap
= dws
->fifo_len
- (dws
->rx_len
- dws
->tx_len
);
128 return min3((u32
)dws
->tx_len
, tx_room
, rxtx_gap
);
131 /* Return the max entries we should read out of rx fifo */
132 static inline u32
rx_max(struct dw_spi
*dws
)
134 return min_t(u32
, dws
->rx_len
, dw_readl(dws
, DW_SPI_RXFLR
));
137 static void dw_writer(struct dw_spi
*dws
)
139 u32 max
= tx_max(dws
);
144 if (dws
->n_bytes
== 1)
145 txw
= *(u8
*)(dws
->tx
);
146 else if (dws
->n_bytes
== 2)
147 txw
= *(u16
*)(dws
->tx
);
149 txw
= *(u32
*)(dws
->tx
);
151 dws
->tx
+= dws
->n_bytes
;
153 dw_write_io_reg(dws
, DW_SPI_DR
, txw
);
158 static void dw_reader(struct dw_spi
*dws
)
160 u32 max
= rx_max(dws
);
164 rxw
= dw_read_io_reg(dws
, DW_SPI_DR
);
166 if (dws
->n_bytes
== 1)
167 *(u8
*)(dws
->rx
) = rxw
;
168 else if (dws
->n_bytes
== 2)
169 *(u16
*)(dws
->rx
) = rxw
;
171 *(u32
*)(dws
->rx
) = rxw
;
173 dws
->rx
+= dws
->n_bytes
;
179 int dw_spi_check_status(struct dw_spi
*dws
, bool raw
)
185 irq_status
= dw_readl(dws
, DW_SPI_RISR
);
187 irq_status
= dw_readl(dws
, DW_SPI_ISR
);
189 if (irq_status
& SPI_INT_RXOI
) {
190 dev_err(&dws
->master
->dev
, "RX FIFO overflow detected\n");
194 if (irq_status
& SPI_INT_RXUI
) {
195 dev_err(&dws
->master
->dev
, "RX FIFO underflow detected\n");
199 if (irq_status
& SPI_INT_TXOI
) {
200 dev_err(&dws
->master
->dev
, "TX FIFO overflow detected\n");
204 /* Generically handle the erroneous situation */
207 if (dws
->master
->cur_msg
)
208 dws
->master
->cur_msg
->status
= ret
;
213 EXPORT_SYMBOL_GPL(dw_spi_check_status
);
215 static irqreturn_t
dw_spi_transfer_handler(struct dw_spi
*dws
)
217 u16 irq_status
= dw_readl(dws
, DW_SPI_ISR
);
219 if (dw_spi_check_status(dws
, false)) {
220 spi_finalize_current_transfer(dws
->master
);
225 * Read data from the Rx FIFO every time we've got a chance executing
226 * this method. If there is nothing left to receive, terminate the
227 * procedure. Otherwise adjust the Rx FIFO Threshold level if it's a
228 * final stage of the transfer. By doing so we'll get the next IRQ
229 * right when the leftover incoming data is received.
233 spi_mask_intr(dws
, 0xff);
234 spi_finalize_current_transfer(dws
->master
);
235 } else if (dws
->rx_len
<= dw_readl(dws
, DW_SPI_RXFTLR
)) {
236 dw_writel(dws
, DW_SPI_RXFTLR
, dws
->rx_len
- 1);
240 * Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
241 * disabled after the data transmission is finished so not to
242 * have the TXE IRQ flood at the final stage of the transfer.
244 if (irq_status
& SPI_INT_TXEI
) {
247 spi_mask_intr(dws
, SPI_INT_TXEI
);
253 static irqreturn_t
dw_spi_irq(int irq
, void *dev_id
)
255 struct spi_controller
*master
= dev_id
;
256 struct dw_spi
*dws
= spi_controller_get_devdata(master
);
257 u16 irq_status
= dw_readl(dws
, DW_SPI_ISR
) & 0x3f;
262 if (!master
->cur_msg
) {
263 spi_mask_intr(dws
, 0xff);
267 return dws
->transfer_handler(dws
);
270 static u32
dw_spi_prepare_cr0(struct dw_spi
*dws
, struct spi_device
*spi
)
274 if (!(dws
->caps
& DW_SPI_CAP_DWC_SSI
)) {
275 /* CTRLR0[ 5: 4] Frame Format */
276 cr0
|= SSI_MOTO_SPI
<< SPI_FRF_OFFSET
;
279 * SPI mode (SCPOL|SCPH)
280 * CTRLR0[ 6] Serial Clock Phase
281 * CTRLR0[ 7] Serial Clock Polarity
283 cr0
|= ((spi
->mode
& SPI_CPOL
) ? 1 : 0) << SPI_SCOL_OFFSET
;
284 cr0
|= ((spi
->mode
& SPI_CPHA
) ? 1 : 0) << SPI_SCPH_OFFSET
;
286 /* CTRLR0[11] Shift Register Loop */
287 cr0
|= ((spi
->mode
& SPI_LOOP
) ? 1 : 0) << SPI_SRL_OFFSET
;
289 /* CTRLR0[ 7: 6] Frame Format */
290 cr0
|= SSI_MOTO_SPI
<< DWC_SSI_CTRLR0_FRF_OFFSET
;
293 * SPI mode (SCPOL|SCPH)
294 * CTRLR0[ 8] Serial Clock Phase
295 * CTRLR0[ 9] Serial Clock Polarity
297 cr0
|= ((spi
->mode
& SPI_CPOL
) ? 1 : 0) << DWC_SSI_CTRLR0_SCPOL_OFFSET
;
298 cr0
|= ((spi
->mode
& SPI_CPHA
) ? 1 : 0) << DWC_SSI_CTRLR0_SCPH_OFFSET
;
300 /* CTRLR0[13] Shift Register Loop */
301 cr0
|= ((spi
->mode
& SPI_LOOP
) ? 1 : 0) << DWC_SSI_CTRLR0_SRL_OFFSET
;
303 if (dws
->caps
& DW_SPI_CAP_KEEMBAY_MST
)
304 cr0
|= DWC_SSI_CTRLR0_KEEMBAY_MST
;
310 void dw_spi_update_config(struct dw_spi
*dws
, struct spi_device
*spi
,
311 struct dw_spi_cfg
*cfg
)
313 struct chip_data
*chip
= spi_get_ctldata(spi
);
318 /* CTRLR0[ 4/3: 0] or CTRLR0[ 20: 16] Data Frame Size */
319 cr0
|= (cfg
->dfs
- 1) << dws
->dfs_offset
;
321 if (!(dws
->caps
& DW_SPI_CAP_DWC_SSI
))
322 /* CTRLR0[ 9:8] Transfer Mode */
323 cr0
|= cfg
->tmode
<< SPI_TMOD_OFFSET
;
325 /* CTRLR0[11:10] Transfer Mode */
326 cr0
|= cfg
->tmode
<< DWC_SSI_CTRLR0_TMOD_OFFSET
;
328 dw_writel(dws
, DW_SPI_CTRLR0
, cr0
);
330 if (cfg
->tmode
== SPI_TMOD_EPROMREAD
|| cfg
->tmode
== SPI_TMOD_RO
)
331 dw_writel(dws
, DW_SPI_CTRLR1
, cfg
->ndf
? cfg
->ndf
- 1 : 0);
333 /* Note DW APB SSI clock divider doesn't support odd numbers */
334 clk_div
= (DIV_ROUND_UP(dws
->max_freq
, cfg
->freq
) + 1) & 0xfffe;
335 speed_hz
= dws
->max_freq
/ clk_div
;
337 if (dws
->current_freq
!= speed_hz
) {
338 spi_set_clk(dws
, clk_div
);
339 dws
->current_freq
= speed_hz
;
342 /* Update RX sample delay if required */
343 if (dws
->cur_rx_sample_dly
!= chip
->rx_sample_dly
) {
344 dw_writel(dws
, DW_SPI_RX_SAMPLE_DLY
, chip
->rx_sample_dly
);
345 dws
->cur_rx_sample_dly
= chip
->rx_sample_dly
;
348 EXPORT_SYMBOL_GPL(dw_spi_update_config
);
350 static void dw_spi_irq_setup(struct dw_spi
*dws
)
356 * Originally Tx and Rx data lengths match. Rx FIFO Threshold level
357 * will be adjusted at the final stage of the IRQ-based SPI transfer
358 * execution so not to lose the leftover of the incoming data.
360 level
= min_t(u16
, dws
->fifo_len
/ 2, dws
->tx_len
);
361 dw_writel(dws
, DW_SPI_TXFTLR
, level
);
362 dw_writel(dws
, DW_SPI_RXFTLR
, level
- 1);
364 dws
->transfer_handler
= dw_spi_transfer_handler
;
366 imask
= SPI_INT_TXEI
| SPI_INT_TXOI
| SPI_INT_RXUI
| SPI_INT_RXOI
|
368 spi_umask_intr(dws
, imask
);
372 * The iterative procedure of the poll-based transfer is simple: write as much
373 * as possible to the Tx FIFO, wait until the pending to receive data is ready
374 * to be read, read it from the Rx FIFO and check whether the performed
375 * procedure has been successful.
377 * Note this method the same way as the IRQ-based transfer won't work well for
378 * the SPI devices connected to the controller with native CS due to the
379 * automatic CS assertion/de-assertion.
381 static int dw_spi_poll_transfer(struct dw_spi
*dws
,
382 struct spi_transfer
*transfer
)
384 struct spi_delay delay
;
388 delay
.unit
= SPI_DELAY_UNIT_SCK
;
389 nbits
= dws
->n_bytes
* BITS_PER_BYTE
;
394 delay
.value
= nbits
* (dws
->rx_len
- dws
->tx_len
);
395 spi_delay_exec(&delay
, transfer
);
399 ret
= dw_spi_check_status(dws
, true);
402 } while (dws
->rx_len
);
407 static int dw_spi_transfer_one(struct spi_controller
*master
,
408 struct spi_device
*spi
, struct spi_transfer
*transfer
)
410 struct dw_spi
*dws
= spi_controller_get_devdata(master
);
411 struct dw_spi_cfg cfg
= {
412 .tmode
= SPI_TMOD_TR
,
413 .dfs
= transfer
->bits_per_word
,
414 .freq
= transfer
->speed_hz
,
419 dws
->n_bytes
= DIV_ROUND_UP(transfer
->bits_per_word
, BITS_PER_BYTE
);
420 dws
->tx
= (void *)transfer
->tx_buf
;
421 dws
->tx_len
= transfer
->len
/ dws
->n_bytes
;
422 dws
->rx
= transfer
->rx_buf
;
423 dws
->rx_len
= dws
->tx_len
;
425 /* Ensure the data above is visible for all CPUs */
428 spi_enable_chip(dws
, 0);
430 dw_spi_update_config(dws
, spi
, &cfg
);
432 transfer
->effective_speed_hz
= dws
->current_freq
;
434 /* Check if current transfer is a DMA transaction */
435 if (master
->can_dma
&& master
->can_dma(master
, spi
, transfer
))
436 dws
->dma_mapped
= master
->cur_msg_mapped
;
438 /* For poll mode just disable all interrupts */
439 spi_mask_intr(dws
, 0xff);
441 if (dws
->dma_mapped
) {
442 ret
= dws
->dma_ops
->dma_setup(dws
, transfer
);
447 spi_enable_chip(dws
, 1);
450 return dws
->dma_ops
->dma_transfer(dws
, transfer
);
451 else if (dws
->irq
== IRQ_NOTCONNECTED
)
452 return dw_spi_poll_transfer(dws
, transfer
);
454 dw_spi_irq_setup(dws
);
459 static void dw_spi_handle_err(struct spi_controller
*master
,
460 struct spi_message
*msg
)
462 struct dw_spi
*dws
= spi_controller_get_devdata(master
);
465 dws
->dma_ops
->dma_stop(dws
);
470 static int dw_spi_adjust_mem_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
472 if (op
->data
.dir
== SPI_MEM_DATA_IN
)
473 op
->data
.nbytes
= clamp_val(op
->data
.nbytes
, 0, SPI_NDF_MASK
+ 1);
478 static bool dw_spi_supports_mem_op(struct spi_mem
*mem
,
479 const struct spi_mem_op
*op
)
481 if (op
->data
.buswidth
> 1 || op
->addr
.buswidth
> 1 ||
482 op
->dummy
.buswidth
> 1 || op
->cmd
.buswidth
> 1)
485 return spi_mem_default_supports_op(mem
, op
);
488 static int dw_spi_init_mem_buf(struct dw_spi
*dws
, const struct spi_mem_op
*op
)
490 unsigned int i
, j
, len
;
494 * Calculate the total length of the EEPROM command transfer and
495 * either use the pre-allocated buffer or create a temporary one.
497 len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
;
498 if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
499 len
+= op
->data
.nbytes
;
501 if (len
<= SPI_BUF_SIZE
) {
504 out
= kzalloc(len
, GFP_KERNEL
);
510 * Collect the operation code, address and dummy bytes into the single
511 * buffer. If it's a transfer with data to be sent, also copy it into the
512 * single buffer in order to speed the data transmission up.
514 for (i
= 0; i
< op
->cmd
.nbytes
; ++i
)
515 out
[i
] = SPI_GET_BYTE(op
->cmd
.opcode
, op
->cmd
.nbytes
- i
- 1);
516 for (j
= 0; j
< op
->addr
.nbytes
; ++i
, ++j
)
517 out
[i
] = SPI_GET_BYTE(op
->addr
.val
, op
->addr
.nbytes
- j
- 1);
518 for (j
= 0; j
< op
->dummy
.nbytes
; ++i
, ++j
)
521 if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
522 memcpy(&out
[i
], op
->data
.buf
.out
, op
->data
.nbytes
);
527 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
528 dws
->rx
= op
->data
.buf
.in
;
529 dws
->rx_len
= op
->data
.nbytes
;
538 static void dw_spi_free_mem_buf(struct dw_spi
*dws
)
540 if (dws
->tx
!= dws
->buf
)
544 static int dw_spi_write_then_read(struct dw_spi
*dws
, struct spi_device
*spi
)
546 u32 room
, entries
, sts
;
551 * At initial stage we just pre-fill the Tx FIFO in with no rush,
552 * since native CS hasn't been enabled yet and the automatic data
553 * transmission won't start til we do that.
555 len
= min(dws
->fifo_len
, dws
->tx_len
);
558 dw_write_io_reg(dws
, DW_SPI_DR
, *buf
++);
561 * After setting any bit in the SER register the transmission will
562 * start automatically. We have to keep up with that procedure
563 * otherwise the CS de-assertion will happen whereupon the memory
564 * operation will be pre-terminated.
566 len
= dws
->tx_len
- ((void *)buf
- dws
->tx
);
567 dw_spi_set_cs(spi
, false);
569 entries
= readl_relaxed(dws
->regs
+ DW_SPI_TXFLR
);
571 dev_err(&dws
->master
->dev
, "CS de-assertion on Tx\n");
574 room
= min(dws
->fifo_len
- entries
, len
);
575 for (; room
; --room
, --len
)
576 dw_write_io_reg(dws
, DW_SPI_DR
, *buf
++);
580 * Data fetching will start automatically if the EEPROM-read mode is
581 * activated. We have to keep up with the incoming data pace to
582 * prevent the Rx FIFO overflow causing the inbound data loss.
587 entries
= readl_relaxed(dws
->regs
+ DW_SPI_RXFLR
);
589 sts
= readl_relaxed(dws
->regs
+ DW_SPI_RISR
);
590 if (sts
& SPI_INT_RXOI
) {
591 dev_err(&dws
->master
->dev
, "FIFO overflow on Rx\n");
596 entries
= min(entries
, len
);
597 for (; entries
; --entries
, --len
)
598 *buf
++ = dw_read_io_reg(dws
, DW_SPI_DR
);
604 static inline bool dw_spi_ctlr_busy(struct dw_spi
*dws
)
606 return dw_readl(dws
, DW_SPI_SR
) & SR_BUSY
;
609 static int dw_spi_wait_mem_op_done(struct dw_spi
*dws
)
611 int retry
= SPI_WAIT_RETRIES
;
612 struct spi_delay delay
;
613 unsigned long ns
, us
;
616 nents
= dw_readl(dws
, DW_SPI_TXFLR
);
617 ns
= NSEC_PER_SEC
/ dws
->current_freq
* nents
;
618 ns
*= dws
->n_bytes
* BITS_PER_BYTE
;
619 if (ns
<= NSEC_PER_USEC
) {
620 delay
.unit
= SPI_DELAY_UNIT_NSECS
;
623 us
= DIV_ROUND_UP(ns
, NSEC_PER_USEC
);
624 delay
.unit
= SPI_DELAY_UNIT_USECS
;
625 delay
.value
= clamp_val(us
, 0, USHRT_MAX
);
628 while (dw_spi_ctlr_busy(dws
) && retry
--)
629 spi_delay_exec(&delay
, NULL
);
632 dev_err(&dws
->master
->dev
, "Mem op hanged up\n");
639 static void dw_spi_stop_mem_op(struct dw_spi
*dws
, struct spi_device
*spi
)
641 spi_enable_chip(dws
, 0);
642 dw_spi_set_cs(spi
, true);
643 spi_enable_chip(dws
, 1);
647 * The SPI memory operation implementation below is the best choice for the
648 * devices, which are selected by the native chip-select lane. It's
649 * specifically developed to workaround the problem with automatic chip-select
650 * lane toggle when there is no data in the Tx FIFO buffer. Luckily the current
651 * SPI-mem core calls exec_op() callback only if the GPIO-based CS is
654 static int dw_spi_exec_mem_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
656 struct dw_spi
*dws
= spi_controller_get_devdata(mem
->spi
->controller
);
657 struct dw_spi_cfg cfg
;
662 * Collect the outbound data into a single buffer to speed the
663 * transmission up at least on the initial stage.
665 ret
= dw_spi_init_mem_buf(dws
, op
);
670 * DW SPI EEPROM-read mode is required only for the SPI memory Data-IN
671 * operation. Transmit-only mode is suitable for the rest of them.
674 cfg
.freq
= clamp(mem
->spi
->max_speed_hz
, 0U, dws
->max_mem_freq
);
675 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
676 cfg
.tmode
= SPI_TMOD_EPROMREAD
;
677 cfg
.ndf
= op
->data
.nbytes
;
679 cfg
.tmode
= SPI_TMOD_TO
;
682 spi_enable_chip(dws
, 0);
684 dw_spi_update_config(dws
, mem
->spi
, &cfg
);
686 spi_mask_intr(dws
, 0xff);
688 spi_enable_chip(dws
, 1);
691 * DW APB SSI controller has very nasty peculiarities. First originally
692 * (without any vendor-specific modifications) it doesn't provide a
693 * direct way to set and clear the native chip-select signal. Instead
694 * the controller asserts the CS lane if Tx FIFO isn't empty and a
695 * transmission is going on, and automatically de-asserts it back to
696 * the high level if the Tx FIFO doesn't have anything to be pushed
697 * out. Due to that a multi-tasking or heavy IRQs activity might be
698 * fatal, since the transfer procedure preemption may cause the Tx FIFO
699 * getting empty and sudden CS de-assertion, which in the middle of the
700 * transfer will most likely cause the data loss. Secondly the
701 * EEPROM-read or Read-only DW SPI transfer modes imply the incoming
702 * data being automatically pulled in into the Rx FIFO. So if the
703 * driver software is late in fetching the data from the FIFO before
704 * it's overflown, new incoming data will be lost. In order to make
705 * sure the executed memory operations are CS-atomic and to prevent the
706 * Rx FIFO overflow we have to disable the local interrupts so to block
707 * any preemption during the subsequent IO operations.
709 * Note. At some circumstances disabling IRQs may not help to prevent
710 * the problems described above. The CS de-assertion and Rx FIFO
711 * overflow may still happen due to the relatively slow system bus or
712 * CPU not working fast enough, so the write-then-read algo implemented
713 * here just won't keep up with the SPI bus data transfer. Such
714 * situation is highly platform specific and is supposed to be fixed by
715 * manually restricting the SPI bus frequency using the
716 * dws->max_mem_freq parameter.
718 local_irq_save(flags
);
721 ret
= dw_spi_write_then_read(dws
, mem
->spi
);
723 local_irq_restore(flags
);
727 * Wait for the operation being finished and check the controller
728 * status only if there hasn't been any run-time error detected. In the
729 * former case it's just pointless. In the later one to prevent an
730 * additional error message printing since any hw error flag being set
731 * would be due to an error detected on the data transfer.
734 ret
= dw_spi_wait_mem_op_done(dws
);
736 ret
= dw_spi_check_status(dws
, true);
739 dw_spi_stop_mem_op(dws
, mem
->spi
);
741 dw_spi_free_mem_buf(dws
);
747 * Initialize the default memory operations if a glue layer hasn't specified
748 * custom ones. Direct mapping operations will be preserved anyway since DW SPI
749 * controller doesn't have an embedded dirmap interface. Note the memory
750 * operations implemented in this driver is the best choice only for the DW APB
751 * SSI controller with standard native CS functionality. If a hardware vendor
752 * has fixed the automatic CS assertion/de-assertion peculiarity, then it will
753 * be safer to use the normal SPI-messages-based transfers implementation.
755 static void dw_spi_init_mem_ops(struct dw_spi
*dws
)
757 if (!dws
->mem_ops
.exec_op
&& !(dws
->caps
& DW_SPI_CAP_CS_OVERRIDE
) &&
759 dws
->mem_ops
.adjust_op_size
= dw_spi_adjust_mem_op_size
;
760 dws
->mem_ops
.supports_op
= dw_spi_supports_mem_op
;
761 dws
->mem_ops
.exec_op
= dw_spi_exec_mem_op
;
762 if (!dws
->max_mem_freq
)
763 dws
->max_mem_freq
= dws
->max_freq
;
767 /* This may be called twice for each spi dev */
768 static int dw_spi_setup(struct spi_device
*spi
)
770 struct dw_spi
*dws
= spi_controller_get_devdata(spi
->controller
);
771 struct chip_data
*chip
;
773 /* Only alloc on first setup */
774 chip
= spi_get_ctldata(spi
);
776 struct dw_spi
*dws
= spi_controller_get_devdata(spi
->controller
);
777 u32 rx_sample_dly_ns
;
779 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
782 spi_set_ctldata(spi
, chip
);
783 /* Get specific / default rx-sample-delay */
784 if (device_property_read_u32(&spi
->dev
,
785 "rx-sample-delay-ns",
786 &rx_sample_dly_ns
) != 0)
787 /* Use default controller value */
788 rx_sample_dly_ns
= dws
->def_rx_sample_dly_ns
;
789 chip
->rx_sample_dly
= DIV_ROUND_CLOSEST(rx_sample_dly_ns
,
795 * Update CR0 data each time the setup callback is invoked since
796 * the device parameters could have been changed, for instance, by
797 * the MMC SPI driver or something else.
799 chip
->cr0
= dw_spi_prepare_cr0(dws
, spi
);
804 static void dw_spi_cleanup(struct spi_device
*spi
)
806 struct chip_data
*chip
= spi_get_ctldata(spi
);
809 spi_set_ctldata(spi
, NULL
);
812 /* Restart the controller, disable all interrupts, clean rx fifo */
813 static void spi_hw_init(struct device
*dev
, struct dw_spi
*dws
)
818 * Try to detect the FIFO depth if not set by interface driver,
819 * the depth could be from 2 to 256 from HW spec
821 if (!dws
->fifo_len
) {
824 for (fifo
= 1; fifo
< 256; fifo
++) {
825 dw_writel(dws
, DW_SPI_TXFTLR
, fifo
);
826 if (fifo
!= dw_readl(dws
, DW_SPI_TXFTLR
))
829 dw_writel(dws
, DW_SPI_TXFTLR
, 0);
831 dws
->fifo_len
= (fifo
== 1) ? 0 : fifo
;
832 dev_dbg(dev
, "Detected FIFO size: %u bytes\n", dws
->fifo_len
);
836 * Detect CTRLR0.DFS field size and offset by testing the lowest bits
837 * writability. Note DWC SSI controller also has the extended DFS, but
840 if (!(dws
->caps
& DW_SPI_CAP_DWC_SSI
)) {
841 u32 cr0
, tmp
= dw_readl(dws
, DW_SPI_CTRLR0
);
843 spi_enable_chip(dws
, 0);
844 dw_writel(dws
, DW_SPI_CTRLR0
, 0xffffffff);
845 cr0
= dw_readl(dws
, DW_SPI_CTRLR0
);
846 dw_writel(dws
, DW_SPI_CTRLR0
, tmp
);
847 spi_enable_chip(dws
, 1);
849 if (!(cr0
& SPI_DFS_MASK
)) {
850 dws
->caps
|= DW_SPI_CAP_DFS32
;
851 dws
->dfs_offset
= SPI_DFS32_OFFSET
;
852 dev_dbg(dev
, "Detected 32-bits max data frame size\n");
855 dws
->caps
|= DW_SPI_CAP_DFS32
;
858 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
859 if (dws
->caps
& DW_SPI_CAP_CS_OVERRIDE
)
860 dw_writel(dws
, DW_SPI_CS_OVERRIDE
, 0xF);
863 int dw_spi_add_host(struct device
*dev
, struct dw_spi
*dws
)
865 struct spi_controller
*master
;
871 master
= spi_alloc_master(dev
, 0);
875 dws
->master
= master
;
876 dws
->dma_addr
= (dma_addr_t
)(dws
->paddr
+ DW_SPI_DR
);
878 spi_controller_set_devdata(master
, dws
);
881 spi_hw_init(dev
, dws
);
883 ret
= request_irq(dws
->irq
, dw_spi_irq
, IRQF_SHARED
, dev_name(dev
),
885 if (ret
< 0 && ret
!= -ENOTCONN
) {
886 dev_err(dev
, "can not get IRQ\n");
887 goto err_free_master
;
890 dw_spi_init_mem_ops(dws
);
892 master
->use_gpio_descriptors
= true;
893 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LOOP
;
894 if (dws
->caps
& DW_SPI_CAP_DFS32
)
895 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
897 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
898 master
->bus_num
= dws
->bus_num
;
899 master
->num_chipselect
= dws
->num_cs
;
900 master
->setup
= dw_spi_setup
;
901 master
->cleanup
= dw_spi_cleanup
;
903 master
->set_cs
= dws
->set_cs
;
905 master
->set_cs
= dw_spi_set_cs
;
906 master
->transfer_one
= dw_spi_transfer_one
;
907 master
->handle_err
= dw_spi_handle_err
;
908 if (dws
->mem_ops
.exec_op
)
909 master
->mem_ops
= &dws
->mem_ops
;
910 master
->max_speed_hz
= dws
->max_freq
;
911 master
->dev
.of_node
= dev
->of_node
;
912 master
->dev
.fwnode
= dev
->fwnode
;
913 master
->flags
= SPI_MASTER_GPIO_SS
;
914 master
->auto_runtime_pm
= true;
916 /* Get default rx sample delay */
917 device_property_read_u32(dev
, "rx-sample-delay-ns",
918 &dws
->def_rx_sample_dly_ns
);
920 if (dws
->dma_ops
&& dws
->dma_ops
->dma_init
) {
921 ret
= dws
->dma_ops
->dma_init(dev
, dws
);
923 dev_warn(dev
, "DMA init failed\n");
925 master
->can_dma
= dws
->dma_ops
->can_dma
;
926 master
->flags
|= SPI_CONTROLLER_MUST_TX
;
930 ret
= spi_register_controller(master
);
932 dev_err(&master
->dev
, "problem registering spi master\n");
936 dw_spi_debugfs_init(dws
);
940 if (dws
->dma_ops
&& dws
->dma_ops
->dma_exit
)
941 dws
->dma_ops
->dma_exit(dws
);
942 spi_enable_chip(dws
, 0);
943 free_irq(dws
->irq
, master
);
945 spi_controller_put(master
);
948 EXPORT_SYMBOL_GPL(dw_spi_add_host
);
950 void dw_spi_remove_host(struct dw_spi
*dws
)
952 dw_spi_debugfs_remove(dws
);
954 spi_unregister_controller(dws
->master
);
956 if (dws
->dma_ops
&& dws
->dma_ops
->dma_exit
)
957 dws
->dma_ops
->dma_exit(dws
);
959 spi_shutdown_chip(dws
);
961 free_irq(dws
->irq
, dws
->master
);
963 EXPORT_SYMBOL_GPL(dw_spi_remove_host
);
965 int dw_spi_suspend_host(struct dw_spi
*dws
)
969 ret
= spi_controller_suspend(dws
->master
);
973 spi_shutdown_chip(dws
);
976 EXPORT_SYMBOL_GPL(dw_spi_suspend_host
);
978 int dw_spi_resume_host(struct dw_spi
*dws
)
980 spi_hw_init(&dws
->master
->dev
, dws
);
981 return spi_controller_resume(dws
->master
);
983 EXPORT_SYMBOL_GPL(dw_spi_resume_host
);
985 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
986 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
987 MODULE_LICENSE("GPL v2");