1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
4 * Author: Addy Ke <addy.ke@rock-chips.com>
8 #include <linux/dmaengine.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
12 #include <linux/pinctrl/consumer.h>
13 #include <linux/platform_device.h>
14 #include <linux/spi/spi.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/scatterlist.h>
18 #define DRIVER_NAME "rockchip-spi"
20 #define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
21 writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
22 #define ROCKCHIP_SPI_SET_BITS(reg, bits) \
23 writel_relaxed(readl_relaxed(reg) | (bits), reg)
25 /* SPI register offsets */
26 #define ROCKCHIP_SPI_CTRLR0 0x0000
27 #define ROCKCHIP_SPI_CTRLR1 0x0004
28 #define ROCKCHIP_SPI_SSIENR 0x0008
29 #define ROCKCHIP_SPI_SER 0x000c
30 #define ROCKCHIP_SPI_BAUDR 0x0010
31 #define ROCKCHIP_SPI_TXFTLR 0x0014
32 #define ROCKCHIP_SPI_RXFTLR 0x0018
33 #define ROCKCHIP_SPI_TXFLR 0x001c
34 #define ROCKCHIP_SPI_RXFLR 0x0020
35 #define ROCKCHIP_SPI_SR 0x0024
36 #define ROCKCHIP_SPI_IPR 0x0028
37 #define ROCKCHIP_SPI_IMR 0x002c
38 #define ROCKCHIP_SPI_ISR 0x0030
39 #define ROCKCHIP_SPI_RISR 0x0034
40 #define ROCKCHIP_SPI_ICR 0x0038
41 #define ROCKCHIP_SPI_DMACR 0x003c
42 #define ROCKCHIP_SPI_DMATDLR 0x0040
43 #define ROCKCHIP_SPI_DMARDLR 0x0044
44 #define ROCKCHIP_SPI_VERSION 0x0048
45 #define ROCKCHIP_SPI_TXDR 0x0400
46 #define ROCKCHIP_SPI_RXDR 0x0800
48 /* Bit fields in CTRLR0 */
49 #define CR0_DFS_OFFSET 0
50 #define CR0_DFS_4BIT 0x0
51 #define CR0_DFS_8BIT 0x1
52 #define CR0_DFS_16BIT 0x2
54 #define CR0_CFS_OFFSET 2
56 #define CR0_SCPH_OFFSET 6
58 #define CR0_SCPOL_OFFSET 7
60 #define CR0_CSM_OFFSET 8
61 #define CR0_CSM_KEEP 0x0
62 /* ss_n be high for half sclk_out cycles */
63 #define CR0_CSM_HALF 0X1
64 /* ss_n be high for one sclk_out cycle */
65 #define CR0_CSM_ONE 0x2
67 /* ss_n to sclk_out delay */
68 #define CR0_SSD_OFFSET 10
70 * The period between ss_n active and
71 * sclk_out active is half sclk_out cycles
73 #define CR0_SSD_HALF 0x0
75 * The period between ss_n active and
76 * sclk_out active is one sclk_out cycle
78 #define CR0_SSD_ONE 0x1
80 #define CR0_EM_OFFSET 11
81 #define CR0_EM_LITTLE 0x0
82 #define CR0_EM_BIG 0x1
84 #define CR0_FBM_OFFSET 12
85 #define CR0_FBM_MSB 0x0
86 #define CR0_FBM_LSB 0x1
88 #define CR0_BHT_OFFSET 13
89 #define CR0_BHT_16BIT 0x0
90 #define CR0_BHT_8BIT 0x1
92 #define CR0_RSD_OFFSET 14
93 #define CR0_RSD_MAX 0x3
95 #define CR0_FRF_OFFSET 16
96 #define CR0_FRF_SPI 0x0
97 #define CR0_FRF_SSP 0x1
98 #define CR0_FRF_MICROWIRE 0x2
100 #define CR0_XFM_OFFSET 18
101 #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
102 #define CR0_XFM_TR 0x0
103 #define CR0_XFM_TO 0x1
104 #define CR0_XFM_RO 0x2
106 #define CR0_OPM_OFFSET 20
107 #define CR0_OPM_HOST 0x0
108 #define CR0_OPM_TARGET 0x1
110 #define CR0_SOI_OFFSET 23
112 #define CR0_MTM_OFFSET 0x21
114 /* Bit fields in SER, 2bit */
117 /* Bit fields in BAUDR */
118 #define BAUDR_SCKDV_MIN 2
119 #define BAUDR_SCKDV_MAX 65534
121 /* Bit fields in SR, 6bit */
123 #define SR_BUSY (1 << 0)
124 #define SR_TF_FULL (1 << 1)
125 #define SR_TF_EMPTY (1 << 2)
126 #define SR_RF_EMPTY (1 << 3)
127 #define SR_RF_FULL (1 << 4)
128 #define SR_TARGET_TX_BUSY (1 << 5)
130 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
131 #define INT_MASK 0x1f
132 #define INT_TF_EMPTY (1 << 0)
133 #define INT_TF_OVERFLOW (1 << 1)
134 #define INT_RF_UNDERFLOW (1 << 2)
135 #define INT_RF_OVERFLOW (1 << 3)
136 #define INT_RF_FULL (1 << 4)
137 #define INT_CS_INACTIVE (1 << 6)
139 /* Bit fields in ICR, 4bit */
140 #define ICR_MASK 0x0f
141 #define ICR_ALL (1 << 0)
142 #define ICR_RF_UNDERFLOW (1 << 1)
143 #define ICR_RF_OVERFLOW (1 << 2)
144 #define ICR_TF_OVERFLOW (1 << 3)
146 /* Bit fields in DMACR */
147 #define RF_DMA_EN (1 << 0)
148 #define TF_DMA_EN (1 << 1)
150 /* Driver state flags */
151 #define RXDMA (1 << 0)
152 #define TXDMA (1 << 1)
154 /* sclk_out: spi host internal logic in rk3x can support 50Mhz */
155 #define MAX_SCLK_OUT 50000000U
158 * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
159 * the controller seems to hang when given 0x10000, so stick with this for now.
161 #define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
163 #define ROCKCHIP_SPI_MAX_NATIVE_CS_NUM 2
164 #define ROCKCHIP_SPI_VER2_TYPE1 0x05EC0002
165 #define ROCKCHIP_SPI_VER2_TYPE2 0x00110002
167 #define ROCKCHIP_AUTOSUSPEND_TIMEOUT 2000
169 struct rockchip_spi
{
173 struct clk
*apb_pclk
;
176 dma_addr_t dma_addr_rx
;
177 dma_addr_t dma_addr_tx
;
181 unsigned int tx_left
;
182 unsigned int rx_left
;
186 /*depth of the FIFO buffer */
188 /* frequency of spiclk */
195 bool cs_inactive
; /* spi target transmission stop when cs inactive */
196 bool cs_high_supported
; /* native CS supports active-high polarity */
198 struct spi_transfer
*xfer
; /* Store xfer temporarily */
201 static inline void spi_enable_chip(struct rockchip_spi
*rs
, bool enable
)
203 writel_relaxed((enable
? 1U : 0U), rs
->regs
+ ROCKCHIP_SPI_SSIENR
);
206 static inline void wait_for_tx_idle(struct rockchip_spi
*rs
, bool target_mode
)
208 unsigned long timeout
= jiffies
+ msecs_to_jiffies(5);
212 if (!(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_TARGET_TX_BUSY
) &&
213 !((readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
)))
216 if (!(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
))
219 } while (!time_after(jiffies
, timeout
));
221 dev_warn(rs
->dev
, "spi controller is in busy state!\n");
224 static u32
get_fifo_len(struct rockchip_spi
*rs
)
228 ver
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_VERSION
);
231 case ROCKCHIP_SPI_VER2_TYPE1
:
232 case ROCKCHIP_SPI_VER2_TYPE2
:
239 static void rockchip_spi_set_cs(struct spi_device
*spi
, bool enable
)
241 struct spi_controller
*ctlr
= spi
->controller
;
242 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
243 bool cs_asserted
= spi
->mode
& SPI_CS_HIGH
? enable
: !enable
;
246 /* Keep things powered as long as CS is asserted */
247 pm_runtime_get_sync(rs
->dev
);
249 if (spi_get_csgpiod(spi
, 0))
250 ROCKCHIP_SPI_SET_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
, 1);
252 ROCKCHIP_SPI_SET_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
,
253 BIT(spi_get_chipselect(spi
, 0)));
255 if (spi_get_csgpiod(spi
, 0))
256 ROCKCHIP_SPI_CLR_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
, 1);
258 ROCKCHIP_SPI_CLR_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
,
259 BIT(spi_get_chipselect(spi
, 0)));
261 /* Drop reference from when we first asserted CS */
262 pm_runtime_put(rs
->dev
);
266 static void rockchip_spi_handle_err(struct spi_controller
*ctlr
,
267 struct spi_message
*msg
)
269 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
271 /* stop running spi transfer
272 * this also flushes both rx and tx fifos
274 spi_enable_chip(rs
, false);
276 /* make sure all interrupts are masked and status cleared */
277 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_IMR
);
278 writel_relaxed(0xffffffff, rs
->regs
+ ROCKCHIP_SPI_ICR
);
280 if (atomic_read(&rs
->state
) & TXDMA
)
281 dmaengine_terminate_async(ctlr
->dma_tx
);
283 if (atomic_read(&rs
->state
) & RXDMA
)
284 dmaengine_terminate_async(ctlr
->dma_rx
);
287 static void rockchip_spi_pio_writer(struct rockchip_spi
*rs
)
289 u32 tx_free
= rs
->fifo_len
- readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_TXFLR
);
290 u32 words
= min(rs
->tx_left
, tx_free
);
292 rs
->tx_left
-= words
;
293 for (; words
; words
--) {
296 if (rs
->n_bytes
== 1)
299 txw
= *(u16
*)rs
->tx
;
301 writel_relaxed(txw
, rs
->regs
+ ROCKCHIP_SPI_TXDR
);
302 rs
->tx
+= rs
->n_bytes
;
306 static void rockchip_spi_pio_reader(struct rockchip_spi
*rs
)
308 u32 words
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
);
309 u32 rx_left
= (rs
->rx_left
> words
) ? rs
->rx_left
- words
: 0;
311 /* the hardware doesn't allow us to change fifo threshold
312 * level while spi is enabled, so instead make sure to leave
313 * enough words in the rx fifo to get the last interrupt
314 * exactly when all words have been received
317 u32 ftl
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFTLR
) + 1;
321 words
= rs
->rx_left
- rx_left
;
325 rs
->rx_left
= rx_left
;
326 for (; words
; words
--) {
327 u32 rxw
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
332 if (rs
->n_bytes
== 1)
333 *(u8
*)rs
->rx
= (u8
)rxw
;
335 *(u16
*)rs
->rx
= (u16
)rxw
;
336 rs
->rx
+= rs
->n_bytes
;
340 static irqreturn_t
rockchip_spi_isr(int irq
, void *dev_id
)
342 struct spi_controller
*ctlr
= dev_id
;
343 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
345 /* When int_cs_inactive comes, spi target abort */
346 if (rs
->cs_inactive
&& readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_IMR
) & INT_CS_INACTIVE
) {
347 ctlr
->target_abort(ctlr
);
348 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_IMR
);
349 writel_relaxed(0xffffffff, rs
->regs
+ ROCKCHIP_SPI_ICR
);
355 rockchip_spi_pio_writer(rs
);
357 rockchip_spi_pio_reader(rs
);
359 spi_enable_chip(rs
, false);
360 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_IMR
);
361 writel_relaxed(0xffffffff, rs
->regs
+ ROCKCHIP_SPI_ICR
);
362 spi_finalize_current_transfer(ctlr
);
368 static int rockchip_spi_prepare_irq(struct rockchip_spi
*rs
,
369 struct spi_controller
*ctlr
,
370 struct spi_transfer
*xfer
)
372 rs
->tx
= xfer
->tx_buf
;
373 rs
->rx
= xfer
->rx_buf
;
374 rs
->tx_left
= rs
->tx
? xfer
->len
/ rs
->n_bytes
: 0;
375 rs
->rx_left
= xfer
->len
/ rs
->n_bytes
;
377 writel_relaxed(0xffffffff, rs
->regs
+ ROCKCHIP_SPI_ICR
);
379 spi_enable_chip(rs
, true);
382 rockchip_spi_pio_writer(rs
);
385 writel_relaxed(INT_RF_FULL
| INT_CS_INACTIVE
, rs
->regs
+ ROCKCHIP_SPI_IMR
);
387 writel_relaxed(INT_RF_FULL
, rs
->regs
+ ROCKCHIP_SPI_IMR
);
389 /* 1 means the transfer is in progress */
393 static void rockchip_spi_dma_rxcb(void *data
)
395 struct spi_controller
*ctlr
= data
;
396 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
397 int state
= atomic_fetch_andnot(RXDMA
, &rs
->state
);
399 if (state
& TXDMA
&& !rs
->target_abort
)
403 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_IMR
);
405 spi_enable_chip(rs
, false);
406 spi_finalize_current_transfer(ctlr
);
409 static void rockchip_spi_dma_txcb(void *data
)
411 struct spi_controller
*ctlr
= data
;
412 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
413 int state
= atomic_fetch_andnot(TXDMA
, &rs
->state
);
415 if (state
& RXDMA
&& !rs
->target_abort
)
418 /* Wait until the FIFO data completely. */
419 wait_for_tx_idle(rs
, ctlr
->target
);
421 spi_enable_chip(rs
, false);
422 spi_finalize_current_transfer(ctlr
);
425 static u32
rockchip_spi_calc_burst_size(u32 data_len
)
429 /* burst size: 1, 2, 4, 8 */
430 for (i
= 1; i
< 8; i
<<= 1) {
438 static int rockchip_spi_prepare_dma(struct rockchip_spi
*rs
,
439 struct spi_controller
*ctlr
, struct spi_transfer
*xfer
)
441 struct dma_async_tx_descriptor
*rxdesc
, *txdesc
;
443 atomic_set(&rs
->state
, 0);
445 rs
->tx
= xfer
->tx_buf
;
446 rs
->rx
= xfer
->rx_buf
;
450 struct dma_slave_config rxconf
= {
451 .direction
= DMA_DEV_TO_MEM
,
452 .src_addr
= rs
->dma_addr_rx
,
453 .src_addr_width
= rs
->n_bytes
,
454 .src_maxburst
= rockchip_spi_calc_burst_size(xfer
->len
/ rs
->n_bytes
),
457 dmaengine_slave_config(ctlr
->dma_rx
, &rxconf
);
459 rxdesc
= dmaengine_prep_slave_sg(
461 xfer
->rx_sg
.sgl
, xfer
->rx_sg
.nents
,
462 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
);
466 rxdesc
->callback
= rockchip_spi_dma_rxcb
;
467 rxdesc
->callback_param
= ctlr
;
472 struct dma_slave_config txconf
= {
473 .direction
= DMA_MEM_TO_DEV
,
474 .dst_addr
= rs
->dma_addr_tx
,
475 .dst_addr_width
= rs
->n_bytes
,
476 .dst_maxburst
= rs
->fifo_len
/ 4,
479 dmaengine_slave_config(ctlr
->dma_tx
, &txconf
);
481 txdesc
= dmaengine_prep_slave_sg(
483 xfer
->tx_sg
.sgl
, xfer
->tx_sg
.nents
,
484 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
);
487 dmaengine_terminate_sync(ctlr
->dma_rx
);
491 txdesc
->callback
= rockchip_spi_dma_txcb
;
492 txdesc
->callback_param
= ctlr
;
495 /* rx must be started before tx due to spi instinct */
497 atomic_or(RXDMA
, &rs
->state
);
498 ctlr
->dma_rx
->cookie
= dmaengine_submit(rxdesc
);
499 dma_async_issue_pending(ctlr
->dma_rx
);
503 writel_relaxed(INT_CS_INACTIVE
, rs
->regs
+ ROCKCHIP_SPI_IMR
);
505 spi_enable_chip(rs
, true);
508 atomic_or(TXDMA
, &rs
->state
);
509 dmaengine_submit(txdesc
);
510 dma_async_issue_pending(ctlr
->dma_tx
);
513 /* 1 means the transfer is in progress */
517 static int rockchip_spi_config(struct rockchip_spi
*rs
,
518 struct spi_device
*spi
, struct spi_transfer
*xfer
,
519 bool use_dma
, bool target_mode
)
521 u32 cr0
= CR0_FRF_SPI
<< CR0_FRF_OFFSET
522 | CR0_BHT_8BIT
<< CR0_BHT_OFFSET
523 | CR0_SSD_ONE
<< CR0_SSD_OFFSET
524 | CR0_EM_BIG
<< CR0_EM_OFFSET
;
529 cr0
|= CR0_OPM_TARGET
<< CR0_OPM_OFFSET
;
530 rs
->target_abort
= false;
532 cr0
|= rs
->rsd
<< CR0_RSD_OFFSET
;
533 cr0
|= (spi
->mode
& 0x3U
) << CR0_SCPH_OFFSET
;
534 if (spi
->mode
& SPI_LSB_FIRST
)
535 cr0
|= CR0_FBM_LSB
<< CR0_FBM_OFFSET
;
536 if (spi
->mode
& SPI_CS_HIGH
)
537 cr0
|= BIT(spi_get_chipselect(spi
, 0)) << CR0_SOI_OFFSET
;
539 if (xfer
->rx_buf
&& xfer
->tx_buf
)
540 cr0
|= CR0_XFM_TR
<< CR0_XFM_OFFSET
;
541 else if (xfer
->rx_buf
)
542 cr0
|= CR0_XFM_RO
<< CR0_XFM_OFFSET
;
544 cr0
|= CR0_XFM_TO
<< CR0_XFM_OFFSET
;
546 switch (xfer
->bits_per_word
) {
548 cr0
|= CR0_DFS_4BIT
<< CR0_DFS_OFFSET
;
552 cr0
|= CR0_DFS_8BIT
<< CR0_DFS_OFFSET
;
556 cr0
|= CR0_DFS_16BIT
<< CR0_DFS_OFFSET
;
557 cr1
= xfer
->len
/ 2 - 1;
560 /* we only whitelist 4, 8 and 16 bit words in
561 * ctlr->bits_per_word_mask, so this shouldn't
564 dev_err(rs
->dev
, "unknown bits per word: %d\n",
565 xfer
->bits_per_word
);
576 writel_relaxed(cr0
, rs
->regs
+ ROCKCHIP_SPI_CTRLR0
);
577 writel_relaxed(cr1
, rs
->regs
+ ROCKCHIP_SPI_CTRLR1
);
579 /* unfortunately setting the fifo threshold level to generate an
580 * interrupt exactly when the fifo is full doesn't seem to work,
581 * so we need the strict inequality here
583 if ((xfer
->len
/ rs
->n_bytes
) < rs
->fifo_len
)
584 writel_relaxed(xfer
->len
/ rs
->n_bytes
- 1, rs
->regs
+ ROCKCHIP_SPI_RXFTLR
);
586 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_RXFTLR
);
588 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_DMATDLR
);
589 writel_relaxed(rockchip_spi_calc_burst_size(xfer
->len
/ rs
->n_bytes
) - 1,
590 rs
->regs
+ ROCKCHIP_SPI_DMARDLR
);
591 writel_relaxed(dmacr
, rs
->regs
+ ROCKCHIP_SPI_DMACR
);
593 /* the hardware only supports an even clock divisor, so
594 * round divisor = spiclk / speed up to nearest even number
595 * so that the resulting speed is <= the requested speed
597 writel_relaxed(2 * DIV_ROUND_UP(rs
->freq
, 2 * xfer
->speed_hz
),
598 rs
->regs
+ ROCKCHIP_SPI_BAUDR
);
603 static size_t rockchip_spi_max_transfer_size(struct spi_device
*spi
)
605 return ROCKCHIP_SPI_MAX_TRANLEN
;
608 static int rockchip_spi_target_abort(struct spi_controller
*ctlr
)
610 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
612 struct dma_tx_state state
;
613 enum dma_status status
;
615 /* Get current dma rx point */
616 if (atomic_read(&rs
->state
) & RXDMA
) {
617 dmaengine_pause(ctlr
->dma_rx
);
618 status
= dmaengine_tx_status(ctlr
->dma_rx
, ctlr
->dma_rx
->cookie
, &state
);
619 if (status
== DMA_ERROR
) {
620 rs
->rx
= rs
->xfer
->rx_buf
;
622 rx_fifo_left
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
);
623 for (; rx_fifo_left
; rx_fifo_left
--)
624 readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
627 rs
->rx
+= rs
->xfer
->len
- rs
->n_bytes
* state
.residue
;
631 /* Get the valid data left in rx fifo and set rs->xfer->len real rx size */
633 rx_fifo_left
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
);
634 for (; rx_fifo_left
; rx_fifo_left
--) {
635 u32 rxw
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
637 if (rs
->n_bytes
== 1)
638 *(u8
*)rs
->rx
= (u8
)rxw
;
640 *(u16
*)rs
->rx
= (u16
)rxw
;
641 rs
->rx
+= rs
->n_bytes
;
643 rs
->xfer
->len
= (unsigned int)(rs
->rx
- rs
->xfer
->rx_buf
);
647 if (atomic_read(&rs
->state
) & RXDMA
)
648 dmaengine_terminate_sync(ctlr
->dma_rx
);
649 if (atomic_read(&rs
->state
) & TXDMA
)
650 dmaengine_terminate_sync(ctlr
->dma_tx
);
651 atomic_set(&rs
->state
, 0);
652 spi_enable_chip(rs
, false);
653 rs
->target_abort
= true;
654 spi_finalize_current_transfer(ctlr
);
659 static int rockchip_spi_transfer_one(
660 struct spi_controller
*ctlr
,
661 struct spi_device
*spi
,
662 struct spi_transfer
*xfer
)
664 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
668 /* Zero length transfers won't trigger an interrupt on completion */
670 spi_finalize_current_transfer(ctlr
);
674 WARN_ON(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SSIENR
) &&
675 (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
));
677 if (!xfer
->tx_buf
&& !xfer
->rx_buf
) {
678 dev_err(rs
->dev
, "No buffer for transfer\n");
682 if (xfer
->len
> ROCKCHIP_SPI_MAX_TRANLEN
) {
683 dev_err(rs
->dev
, "Transfer is too long (%d)\n", xfer
->len
);
687 rs
->n_bytes
= xfer
->bits_per_word
<= 8 ? 1 : 2;
689 use_dma
= ctlr
->can_dma
? ctlr
->can_dma(ctlr
, spi
, xfer
) : false;
691 ret
= rockchip_spi_config(rs
, spi
, xfer
, use_dma
, ctlr
->target
);
696 return rockchip_spi_prepare_dma(rs
, ctlr
, xfer
);
698 return rockchip_spi_prepare_irq(rs
, ctlr
, xfer
);
701 static bool rockchip_spi_can_dma(struct spi_controller
*ctlr
,
702 struct spi_device
*spi
,
703 struct spi_transfer
*xfer
)
705 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
706 unsigned int bytes_per_word
= xfer
->bits_per_word
<= 8 ? 1 : 2;
708 /* if the numbor of spi words to transfer is less than the fifo
709 * length we can just fill the fifo and wait for a single irq,
710 * so don't bother setting up dma
712 return xfer
->len
/ bytes_per_word
>= rs
->fifo_len
;
715 static int rockchip_spi_setup(struct spi_device
*spi
)
717 struct rockchip_spi
*rs
= spi_controller_get_devdata(spi
->controller
);
720 if (!spi_get_csgpiod(spi
, 0) && (spi
->mode
& SPI_CS_HIGH
) && !rs
->cs_high_supported
) {
721 dev_warn(&spi
->dev
, "setup: non GPIO CS can't be active-high\n");
725 pm_runtime_get_sync(rs
->dev
);
727 cr0
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_CTRLR0
);
729 cr0
&= ~(0x3 << CR0_SCPH_OFFSET
);
730 cr0
|= ((spi
->mode
& 0x3) << CR0_SCPH_OFFSET
);
731 if (spi
->mode
& SPI_CS_HIGH
&& spi_get_chipselect(spi
, 0) <= 1)
732 cr0
|= BIT(spi_get_chipselect(spi
, 0)) << CR0_SOI_OFFSET
;
733 else if (spi_get_chipselect(spi
, 0) <= 1)
734 cr0
&= ~(BIT(spi_get_chipselect(spi
, 0)) << CR0_SOI_OFFSET
);
736 writel_relaxed(cr0
, rs
->regs
+ ROCKCHIP_SPI_CTRLR0
);
738 pm_runtime_put(rs
->dev
);
743 static int rockchip_spi_probe(struct platform_device
*pdev
)
745 struct device_node
*np
= pdev
->dev
.of_node
;
746 struct spi_controller
*ctlr
;
747 struct rockchip_spi
*rs
;
748 struct resource
*mem
;
749 u32 rsd_nsecs
, num_cs
;
753 target_mode
= of_property_read_bool(np
, "spi-slave");
756 ctlr
= spi_alloc_target(&pdev
->dev
, sizeof(struct rockchip_spi
));
758 ctlr
= spi_alloc_host(&pdev
->dev
, sizeof(struct rockchip_spi
));
763 platform_set_drvdata(pdev
, ctlr
);
765 rs
= spi_controller_get_devdata(ctlr
);
767 /* Get basic io resource and map it */
768 rs
->regs
= devm_platform_get_and_ioremap_resource(pdev
, 0, &mem
);
769 if (IS_ERR(rs
->regs
)) {
770 ret
= PTR_ERR(rs
->regs
);
774 rs
->apb_pclk
= devm_clk_get_enabled(&pdev
->dev
, "apb_pclk");
775 if (IS_ERR(rs
->apb_pclk
)) {
776 ret
= dev_err_probe(&pdev
->dev
, PTR_ERR(rs
->apb_pclk
),
777 "Failed to get apb_pclk\n");
781 rs
->spiclk
= devm_clk_get_enabled(&pdev
->dev
, "spiclk");
782 if (IS_ERR(rs
->spiclk
)) {
783 ret
= dev_err_probe(&pdev
->dev
, PTR_ERR(rs
->spiclk
),
784 "Failed to get spi_pclk\n");
788 spi_enable_chip(rs
, false);
790 ret
= platform_get_irq(pdev
, 0);
794 ret
= devm_request_threaded_irq(&pdev
->dev
, ret
, rockchip_spi_isr
, NULL
,
795 IRQF_ONESHOT
, dev_name(&pdev
->dev
), ctlr
);
799 rs
->dev
= &pdev
->dev
;
800 rs
->freq
= clk_get_rate(rs
->spiclk
);
802 if (!of_property_read_u32(pdev
->dev
.of_node
, "rx-sample-delay-ns",
804 /* rx sample delay is expressed in parent clock cycles (max 3) */
805 u32 rsd
= DIV_ROUND_CLOSEST(rsd_nsecs
* (rs
->freq
>> 8), 1000000000 >> 8);
807 dev_warn(rs
->dev
, "%u Hz are too slow to express %u ns delay\n",
808 rs
->freq
, rsd_nsecs
);
809 } else if (rsd
> CR0_RSD_MAX
) {
812 "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
813 rs
->freq
, rsd_nsecs
, CR0_RSD_MAX
* 1000000000U / rs
->freq
);
818 rs
->fifo_len
= get_fifo_len(rs
);
820 ret
= dev_err_probe(&pdev
->dev
, -EINVAL
, "Failed to get fifo length\n");
824 pm_runtime_set_autosuspend_delay(&pdev
->dev
, ROCKCHIP_AUTOSUSPEND_TIMEOUT
);
825 pm_runtime_use_autosuspend(&pdev
->dev
);
826 pm_runtime_set_active(&pdev
->dev
);
827 pm_runtime_enable(&pdev
->dev
);
829 ctlr
->auto_runtime_pm
= true;
830 ctlr
->bus_num
= pdev
->id
;
831 ctlr
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LOOP
| SPI_LSB_FIRST
;
833 ctlr
->mode_bits
|= SPI_NO_CS
;
834 ctlr
->target_abort
= rockchip_spi_target_abort
;
836 ctlr
->flags
= SPI_CONTROLLER_GPIO_SS
;
837 ctlr
->max_native_cs
= ROCKCHIP_SPI_MAX_NATIVE_CS_NUM
;
839 * rk spi0 has two native cs, spi1..5 one cs only
840 * if num-cs is missing in the dts, default to 1
842 if (of_property_read_u32(np
, "num-cs", &num_cs
))
844 ctlr
->num_chipselect
= num_cs
;
845 ctlr
->use_gpio_descriptors
= true;
847 ctlr
->dev
.of_node
= pdev
->dev
.of_node
;
848 ctlr
->bits_per_word_mask
= SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
849 ctlr
->min_speed_hz
= rs
->freq
/ BAUDR_SCKDV_MAX
;
850 ctlr
->max_speed_hz
= min(rs
->freq
/ BAUDR_SCKDV_MIN
, MAX_SCLK_OUT
);
852 ctlr
->setup
= rockchip_spi_setup
;
853 ctlr
->set_cs
= rockchip_spi_set_cs
;
854 ctlr
->transfer_one
= rockchip_spi_transfer_one
;
855 ctlr
->max_transfer_size
= rockchip_spi_max_transfer_size
;
856 ctlr
->handle_err
= rockchip_spi_handle_err
;
858 ctlr
->dma_tx
= dma_request_chan(rs
->dev
, "tx");
859 if (IS_ERR(ctlr
->dma_tx
)) {
860 /* Check tx to see if we need to defer driver probing */
861 ret
= dev_warn_probe(rs
->dev
, PTR_ERR(ctlr
->dma_tx
),
862 "Failed to request optional TX DMA channel\n");
863 if (ret
== -EPROBE_DEFER
)
864 goto err_disable_pm_runtime
;
868 ctlr
->dma_rx
= dma_request_chan(rs
->dev
, "rx");
869 if (IS_ERR(ctlr
->dma_rx
)) {
870 /* Check rx to see if we need to defer driver probing */
871 ret
= dev_warn_probe(rs
->dev
, PTR_ERR(ctlr
->dma_rx
),
872 "Failed to request optional RX DMA channel\n");
873 if (ret
== -EPROBE_DEFER
)
874 goto err_free_dma_tx
;
878 if (ctlr
->dma_tx
&& ctlr
->dma_rx
) {
879 rs
->dma_addr_tx
= mem
->start
+ ROCKCHIP_SPI_TXDR
;
880 rs
->dma_addr_rx
= mem
->start
+ ROCKCHIP_SPI_RXDR
;
881 ctlr
->can_dma
= rockchip_spi_can_dma
;
884 switch (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_VERSION
)) {
885 case ROCKCHIP_SPI_VER2_TYPE2
:
886 rs
->cs_high_supported
= true;
887 ctlr
->mode_bits
|= SPI_CS_HIGH
;
888 if (ctlr
->can_dma
&& target_mode
)
889 rs
->cs_inactive
= true;
891 rs
->cs_inactive
= false;
894 rs
->cs_inactive
= false;
898 ret
= devm_spi_register_controller(&pdev
->dev
, ctlr
);
900 dev_err(&pdev
->dev
, "Failed to register controller\n");
901 goto err_free_dma_rx
;
908 dma_release_channel(ctlr
->dma_rx
);
911 dma_release_channel(ctlr
->dma_tx
);
912 err_disable_pm_runtime
:
913 pm_runtime_disable(&pdev
->dev
);
915 spi_controller_put(ctlr
);
920 static void rockchip_spi_remove(struct platform_device
*pdev
)
922 struct spi_controller
*ctlr
= spi_controller_get(platform_get_drvdata(pdev
));
924 pm_runtime_get_sync(&pdev
->dev
);
926 pm_runtime_put_noidle(&pdev
->dev
);
927 pm_runtime_disable(&pdev
->dev
);
928 pm_runtime_set_suspended(&pdev
->dev
);
931 dma_release_channel(ctlr
->dma_tx
);
933 dma_release_channel(ctlr
->dma_rx
);
935 spi_controller_put(ctlr
);
938 #ifdef CONFIG_PM_SLEEP
939 static int rockchip_spi_suspend(struct device
*dev
)
942 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
944 ret
= spi_controller_suspend(ctlr
);
948 ret
= pm_runtime_force_suspend(dev
);
950 spi_controller_resume(ctlr
);
954 pinctrl_pm_select_sleep_state(dev
);
959 static int rockchip_spi_resume(struct device
*dev
)
962 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
964 pinctrl_pm_select_default_state(dev
);
966 ret
= pm_runtime_force_resume(dev
);
970 return spi_controller_resume(ctlr
);
972 #endif /* CONFIG_PM_SLEEP */
975 static int rockchip_spi_runtime_suspend(struct device
*dev
)
977 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
978 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
980 clk_disable_unprepare(rs
->spiclk
);
981 clk_disable_unprepare(rs
->apb_pclk
);
986 static int rockchip_spi_runtime_resume(struct device
*dev
)
989 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
990 struct rockchip_spi
*rs
= spi_controller_get_devdata(ctlr
);
992 ret
= clk_prepare_enable(rs
->apb_pclk
);
996 ret
= clk_prepare_enable(rs
->spiclk
);
998 clk_disable_unprepare(rs
->apb_pclk
);
1002 #endif /* CONFIG_PM */
1004 static const struct dev_pm_ops rockchip_spi_pm
= {
1005 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend
, rockchip_spi_resume
)
1006 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend
,
1007 rockchip_spi_runtime_resume
, NULL
)
1010 static const struct of_device_id rockchip_spi_dt_match
[] = {
1011 { .compatible
= "rockchip,px30-spi", },
1012 { .compatible
= "rockchip,rk3036-spi", },
1013 { .compatible
= "rockchip,rk3066-spi", },
1014 { .compatible
= "rockchip,rk3188-spi", },
1015 { .compatible
= "rockchip,rk3228-spi", },
1016 { .compatible
= "rockchip,rk3288-spi", },
1017 { .compatible
= "rockchip,rk3308-spi", },
1018 { .compatible
= "rockchip,rk3328-spi", },
1019 { .compatible
= "rockchip,rk3368-spi", },
1020 { .compatible
= "rockchip,rk3399-spi", },
1021 { .compatible
= "rockchip,rv1108-spi", },
1022 { .compatible
= "rockchip,rv1126-spi", },
1025 MODULE_DEVICE_TABLE(of
, rockchip_spi_dt_match
);
1027 static struct platform_driver rockchip_spi_driver
= {
1029 .name
= DRIVER_NAME
,
1030 .pm
= &rockchip_spi_pm
,
1031 .of_match_table
= of_match_ptr(rockchip_spi_dt_match
),
1033 .probe
= rockchip_spi_probe
,
1034 .remove
= rockchip_spi_remove
,
1037 module_platform_driver(rockchip_spi_driver
);
1039 MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
1040 MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
1041 MODULE_LICENSE("GPL v2");