2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * Author: Addy Ke <addy.ke@rock-chips.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/clk.h>
17 #include <linux/dmaengine.h>
18 #include <linux/module.h>
20 #include <linux/pinctrl/consumer.h>
21 #include <linux/platform_device.h>
22 #include <linux/spi/spi.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/scatterlist.h>
26 #define DRIVER_NAME "rockchip-spi"
28 #define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
29 writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
30 #define ROCKCHIP_SPI_SET_BITS(reg, bits) \
31 writel_relaxed(readl_relaxed(reg) | (bits), reg)
33 /* SPI register offsets */
34 #define ROCKCHIP_SPI_CTRLR0 0x0000
35 #define ROCKCHIP_SPI_CTRLR1 0x0004
36 #define ROCKCHIP_SPI_SSIENR 0x0008
37 #define ROCKCHIP_SPI_SER 0x000c
38 #define ROCKCHIP_SPI_BAUDR 0x0010
39 #define ROCKCHIP_SPI_TXFTLR 0x0014
40 #define ROCKCHIP_SPI_RXFTLR 0x0018
41 #define ROCKCHIP_SPI_TXFLR 0x001c
42 #define ROCKCHIP_SPI_RXFLR 0x0020
43 #define ROCKCHIP_SPI_SR 0x0024
44 #define ROCKCHIP_SPI_IPR 0x0028
45 #define ROCKCHIP_SPI_IMR 0x002c
46 #define ROCKCHIP_SPI_ISR 0x0030
47 #define ROCKCHIP_SPI_RISR 0x0034
48 #define ROCKCHIP_SPI_ICR 0x0038
49 #define ROCKCHIP_SPI_DMACR 0x003c
50 #define ROCKCHIP_SPI_DMATDLR 0x0040
51 #define ROCKCHIP_SPI_DMARDLR 0x0044
52 #define ROCKCHIP_SPI_TXDR 0x0400
53 #define ROCKCHIP_SPI_RXDR 0x0800
55 /* Bit fields in CTRLR0 */
56 #define CR0_DFS_OFFSET 0
58 #define CR0_CFS_OFFSET 2
60 #define CR0_SCPH_OFFSET 6
62 #define CR0_SCPOL_OFFSET 7
64 #define CR0_CSM_OFFSET 8
65 #define CR0_CSM_KEEP 0x0
66 /* ss_n be high for half sclk_out cycles */
67 #define CR0_CSM_HALF 0X1
68 /* ss_n be high for one sclk_out cycle */
69 #define CR0_CSM_ONE 0x2
71 /* ss_n to sclk_out delay */
72 #define CR0_SSD_OFFSET 10
74 * The period between ss_n active and
75 * sclk_out active is half sclk_out cycles
77 #define CR0_SSD_HALF 0x0
79 * The period between ss_n active and
80 * sclk_out active is one sclk_out cycle
82 #define CR0_SSD_ONE 0x1
84 #define CR0_EM_OFFSET 11
85 #define CR0_EM_LITTLE 0x0
86 #define CR0_EM_BIG 0x1
88 #define CR0_FBM_OFFSET 12
89 #define CR0_FBM_MSB 0x0
90 #define CR0_FBM_LSB 0x1
92 #define CR0_BHT_OFFSET 13
93 #define CR0_BHT_16BIT 0x0
94 #define CR0_BHT_8BIT 0x1
96 #define CR0_RSD_OFFSET 14
98 #define CR0_FRF_OFFSET 16
99 #define CR0_FRF_SPI 0x0
100 #define CR0_FRF_SSP 0x1
101 #define CR0_FRF_MICROWIRE 0x2
103 #define CR0_XFM_OFFSET 18
104 #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
105 #define CR0_XFM_TR 0x0
106 #define CR0_XFM_TO 0x1
107 #define CR0_XFM_RO 0x2
109 #define CR0_OPM_OFFSET 20
110 #define CR0_OPM_MASTER 0x0
111 #define CR0_OPM_SLAVE 0x1
113 #define CR0_MTM_OFFSET 0x21
115 /* Bit fields in SER, 2bit */
118 /* Bit fields in SR, 5bit */
120 #define SR_BUSY (1 << 0)
121 #define SR_TF_FULL (1 << 1)
122 #define SR_TF_EMPTY (1 << 2)
123 #define SR_RF_EMPTY (1 << 3)
124 #define SR_RF_FULL (1 << 4)
126 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
127 #define INT_MASK 0x1f
128 #define INT_TF_EMPTY (1 << 0)
129 #define INT_TF_OVERFLOW (1 << 1)
130 #define INT_RF_UNDERFLOW (1 << 2)
131 #define INT_RF_OVERFLOW (1 << 3)
132 #define INT_RF_FULL (1 << 4)
134 /* Bit fields in ICR, 4bit */
135 #define ICR_MASK 0x0f
136 #define ICR_ALL (1 << 0)
137 #define ICR_RF_UNDERFLOW (1 << 1)
138 #define ICR_RF_OVERFLOW (1 << 2)
139 #define ICR_TF_OVERFLOW (1 << 3)
141 /* Bit fields in DMACR */
142 #define RF_DMA_EN (1 << 0)
143 #define TF_DMA_EN (1 << 1)
145 #define RXBUSY (1 << 0)
146 #define TXBUSY (1 << 1)
148 /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
149 #define MAX_SCLK_OUT 50000000
152 * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
153 * the controller seems to hang when given 0x10000, so stick with this for now.
155 #define ROCKCHIP_SPI_MAX_TRANLEN 0xffff
157 #define ROCKCHIP_SPI_MAX_CS_NUM 2
159 enum rockchip_ssi_type
{
165 struct rockchip_spi_dma_data
{
167 enum dma_transfer_direction direction
;
171 struct rockchip_spi
{
173 struct spi_master
*master
;
176 struct clk
*apb_pclk
;
179 /*depth of the FIFO buffer */
181 /* max bus freq supported */
183 /* supported slave numbers */
184 enum rockchip_ssi_type type
;
203 bool cs_asserted
[ROCKCHIP_SPI_MAX_CS_NUM
];
206 struct sg_table tx_sg
;
207 struct sg_table rx_sg
;
208 struct rockchip_spi_dma_data dma_rx
;
209 struct rockchip_spi_dma_data dma_tx
;
210 struct dma_slave_caps dma_caps
;
213 static inline void spi_enable_chip(struct rockchip_spi
*rs
, int enable
)
215 writel_relaxed((enable
? 1 : 0), rs
->regs
+ ROCKCHIP_SPI_SSIENR
);
218 static inline void spi_set_clk(struct rockchip_spi
*rs
, u16 div
)
220 writel_relaxed(div
, rs
->regs
+ ROCKCHIP_SPI_BAUDR
);
223 static inline void flush_fifo(struct rockchip_spi
*rs
)
225 while (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
))
226 readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
229 static inline void wait_for_idle(struct rockchip_spi
*rs
)
231 unsigned long timeout
= jiffies
+ msecs_to_jiffies(5);
234 if (!(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
))
236 } while (!time_after(jiffies
, timeout
));
238 dev_warn(rs
->dev
, "spi controller is in busy state!\n");
241 static u32
get_fifo_len(struct rockchip_spi
*rs
)
245 for (fifo
= 2; fifo
< 32; fifo
++) {
246 writel_relaxed(fifo
, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
247 if (fifo
!= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_TXFTLR
))
251 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
253 return (fifo
== 31) ? 0 : fifo
;
256 static inline u32
tx_max(struct rockchip_spi
*rs
)
258 u32 tx_left
, tx_room
;
260 tx_left
= (rs
->tx_end
- rs
->tx
) / rs
->n_bytes
;
261 tx_room
= rs
->fifo_len
- readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_TXFLR
);
263 return min(tx_left
, tx_room
);
266 static inline u32
rx_max(struct rockchip_spi
*rs
)
268 u32 rx_left
= (rs
->rx_end
- rs
->rx
) / rs
->n_bytes
;
269 u32 rx_room
= (u32
)readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
);
271 return min(rx_left
, rx_room
);
274 static void rockchip_spi_set_cs(struct spi_device
*spi
, bool enable
)
276 struct spi_master
*master
= spi
->master
;
277 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
278 bool cs_asserted
= !enable
;
280 /* Return immediately for no-op */
281 if (cs_asserted
== rs
->cs_asserted
[spi
->chip_select
])
285 /* Keep things powered as long as CS is asserted */
286 pm_runtime_get_sync(rs
->dev
);
288 ROCKCHIP_SPI_SET_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
,
289 BIT(spi
->chip_select
));
291 ROCKCHIP_SPI_CLR_BITS(rs
->regs
+ ROCKCHIP_SPI_SER
,
292 BIT(spi
->chip_select
));
294 /* Drop reference from when we first asserted CS */
295 pm_runtime_put(rs
->dev
);
298 rs
->cs_asserted
[spi
->chip_select
] = cs_asserted
;
301 static int rockchip_spi_prepare_message(struct spi_master
*master
,
302 struct spi_message
*msg
)
304 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
305 struct spi_device
*spi
= msg
->spi
;
307 rs
->mode
= spi
->mode
;
312 static void rockchip_spi_handle_err(struct spi_master
*master
,
313 struct spi_message
*msg
)
316 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
318 spin_lock_irqsave(&rs
->lock
, flags
);
321 * For DMA mode, we need terminate DMA channel and flush
322 * fifo for the next transfer if DMA thansfer timeout.
323 * handle_err() was called by core if transfer failed.
324 * Maybe it is reasonable for error handling here.
327 if (rs
->state
& RXBUSY
) {
328 dmaengine_terminate_async(rs
->dma_rx
.ch
);
332 if (rs
->state
& TXBUSY
)
333 dmaengine_terminate_async(rs
->dma_tx
.ch
);
336 spin_unlock_irqrestore(&rs
->lock
, flags
);
339 static int rockchip_spi_unprepare_message(struct spi_master
*master
,
340 struct spi_message
*msg
)
342 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
344 spi_enable_chip(rs
, 0);
349 static void rockchip_spi_pio_writer(struct rockchip_spi
*rs
)
351 u32 max
= tx_max(rs
);
355 if (rs
->n_bytes
== 1)
356 txw
= *(u8
*)(rs
->tx
);
358 txw
= *(u16
*)(rs
->tx
);
360 writel_relaxed(txw
, rs
->regs
+ ROCKCHIP_SPI_TXDR
);
361 rs
->tx
+= rs
->n_bytes
;
365 static void rockchip_spi_pio_reader(struct rockchip_spi
*rs
)
367 u32 max
= rx_max(rs
);
371 rxw
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
372 if (rs
->n_bytes
== 1)
373 *(u8
*)(rs
->rx
) = (u8
)rxw
;
375 *(u16
*)(rs
->rx
) = (u16
)rxw
;
376 rs
->rx
+= rs
->n_bytes
;
380 static int rockchip_spi_pio_transfer(struct rockchip_spi
*rs
)
386 remain
= rs
->tx_end
- rs
->tx
;
387 rockchip_spi_pio_writer(rs
);
391 remain
= rs
->rx_end
- rs
->rx
;
392 rockchip_spi_pio_reader(rs
);
398 /* If tx, wait until the FIFO data completely. */
402 spi_enable_chip(rs
, 0);
407 static void rockchip_spi_dma_rxcb(void *data
)
410 struct rockchip_spi
*rs
= data
;
412 spin_lock_irqsave(&rs
->lock
, flags
);
414 rs
->state
&= ~RXBUSY
;
415 if (!(rs
->state
& TXBUSY
)) {
416 spi_enable_chip(rs
, 0);
417 spi_finalize_current_transfer(rs
->master
);
420 spin_unlock_irqrestore(&rs
->lock
, flags
);
423 static void rockchip_spi_dma_txcb(void *data
)
426 struct rockchip_spi
*rs
= data
;
428 /* Wait until the FIFO data completely. */
431 spin_lock_irqsave(&rs
->lock
, flags
);
433 rs
->state
&= ~TXBUSY
;
434 if (!(rs
->state
& RXBUSY
)) {
435 spi_enable_chip(rs
, 0);
436 spi_finalize_current_transfer(rs
->master
);
439 spin_unlock_irqrestore(&rs
->lock
, flags
);
442 static int rockchip_spi_prepare_dma(struct rockchip_spi
*rs
)
445 struct dma_slave_config rxconf
, txconf
;
446 struct dma_async_tx_descriptor
*rxdesc
, *txdesc
;
448 memset(&rxconf
, 0, sizeof(rxconf
));
449 memset(&txconf
, 0, sizeof(txconf
));
451 spin_lock_irqsave(&rs
->lock
, flags
);
452 rs
->state
&= ~RXBUSY
;
453 rs
->state
&= ~TXBUSY
;
454 spin_unlock_irqrestore(&rs
->lock
, flags
);
458 rxconf
.direction
= rs
->dma_rx
.direction
;
459 rxconf
.src_addr
= rs
->dma_rx
.addr
;
460 rxconf
.src_addr_width
= rs
->n_bytes
;
461 if (rs
->dma_caps
.max_burst
> 4)
462 rxconf
.src_maxburst
= 4;
464 rxconf
.src_maxburst
= 1;
465 dmaengine_slave_config(rs
->dma_rx
.ch
, &rxconf
);
467 rxdesc
= dmaengine_prep_slave_sg(
469 rs
->rx_sg
.sgl
, rs
->rx_sg
.nents
,
470 rs
->dma_rx
.direction
, DMA_PREP_INTERRUPT
);
474 rxdesc
->callback
= rockchip_spi_dma_rxcb
;
475 rxdesc
->callback_param
= rs
;
480 txconf
.direction
= rs
->dma_tx
.direction
;
481 txconf
.dst_addr
= rs
->dma_tx
.addr
;
482 txconf
.dst_addr_width
= rs
->n_bytes
;
483 if (rs
->dma_caps
.max_burst
> 4)
484 txconf
.dst_maxburst
= 4;
486 txconf
.dst_maxburst
= 1;
487 dmaengine_slave_config(rs
->dma_tx
.ch
, &txconf
);
489 txdesc
= dmaengine_prep_slave_sg(
491 rs
->tx_sg
.sgl
, rs
->tx_sg
.nents
,
492 rs
->dma_tx
.direction
, DMA_PREP_INTERRUPT
);
495 dmaengine_terminate_sync(rs
->dma_rx
.ch
);
499 txdesc
->callback
= rockchip_spi_dma_txcb
;
500 txdesc
->callback_param
= rs
;
503 /* rx must be started before tx due to spi instinct */
505 spin_lock_irqsave(&rs
->lock
, flags
);
507 spin_unlock_irqrestore(&rs
->lock
, flags
);
508 dmaengine_submit(rxdesc
);
509 dma_async_issue_pending(rs
->dma_rx
.ch
);
513 spin_lock_irqsave(&rs
->lock
, flags
);
515 spin_unlock_irqrestore(&rs
->lock
, flags
);
516 dmaengine_submit(txdesc
);
517 dma_async_issue_pending(rs
->dma_tx
.ch
);
523 static void rockchip_spi_config(struct rockchip_spi
*rs
)
529 u32 cr0
= (CR0_BHT_8BIT
<< CR0_BHT_OFFSET
)
530 | (CR0_SSD_ONE
<< CR0_SSD_OFFSET
)
531 | (CR0_EM_BIG
<< CR0_EM_OFFSET
);
533 cr0
|= (rs
->n_bytes
<< CR0_DFS_OFFSET
);
534 cr0
|= ((rs
->mode
& 0x3) << CR0_SCPH_OFFSET
);
535 cr0
|= (rs
->tmode
<< CR0_XFM_OFFSET
);
536 cr0
|= (rs
->type
<< CR0_FRF_OFFSET
);
545 if (WARN_ON(rs
->speed
> MAX_SCLK_OUT
))
546 rs
->speed
= MAX_SCLK_OUT
;
548 /* the minimum divisor is 2 */
549 if (rs
->max_freq
< 2 * rs
->speed
) {
550 clk_set_rate(rs
->spiclk
, 2 * rs
->speed
);
551 rs
->max_freq
= clk_get_rate(rs
->spiclk
);
554 /* div doesn't support odd number */
555 div
= DIV_ROUND_UP(rs
->max_freq
, rs
->speed
);
556 div
= (div
+ 1) & 0xfffe;
558 /* Rx sample delay is expressed in parent clock cycles (max 3) */
559 rsd
= DIV_ROUND_CLOSEST(rs
->rsd_nsecs
* (rs
->max_freq
>> 8),
561 if (!rsd
&& rs
->rsd_nsecs
) {
562 pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
563 rs
->max_freq
, rs
->rsd_nsecs
);
564 } else if (rsd
> 3) {
566 pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
567 rs
->max_freq
, rs
->rsd_nsecs
,
568 rsd
* 1000000000U / rs
->max_freq
);
570 cr0
|= rsd
<< CR0_RSD_OFFSET
;
572 writel_relaxed(cr0
, rs
->regs
+ ROCKCHIP_SPI_CTRLR0
);
574 if (rs
->n_bytes
== 1)
575 writel_relaxed(rs
->len
- 1, rs
->regs
+ ROCKCHIP_SPI_CTRLR1
);
576 else if (rs
->n_bytes
== 2)
577 writel_relaxed((rs
->len
/ 2) - 1, rs
->regs
+ ROCKCHIP_SPI_CTRLR1
);
579 writel_relaxed((rs
->len
* 2) - 1, rs
->regs
+ ROCKCHIP_SPI_CTRLR1
);
581 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
582 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_RXFTLR
);
584 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_DMATDLR
);
585 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_DMARDLR
);
586 writel_relaxed(dmacr
, rs
->regs
+ ROCKCHIP_SPI_DMACR
);
588 spi_set_clk(rs
, div
);
590 dev_dbg(rs
->dev
, "cr0 0x%x, div %d\n", cr0
, div
);
593 static size_t rockchip_spi_max_transfer_size(struct spi_device
*spi
)
595 return ROCKCHIP_SPI_MAX_TRANLEN
;
598 static int rockchip_spi_transfer_one(
599 struct spi_master
*master
,
600 struct spi_device
*spi
,
601 struct spi_transfer
*xfer
)
604 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
606 WARN_ON(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SSIENR
) &&
607 (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
));
609 if (!xfer
->tx_buf
&& !xfer
->rx_buf
) {
610 dev_err(rs
->dev
, "No buffer for transfer\n");
614 if (xfer
->len
> ROCKCHIP_SPI_MAX_TRANLEN
) {
615 dev_err(rs
->dev
, "Transfer is too long (%d)\n", xfer
->len
);
619 rs
->speed
= xfer
->speed_hz
;
620 rs
->bpw
= xfer
->bits_per_word
;
621 rs
->n_bytes
= rs
->bpw
>> 3;
623 rs
->tx
= xfer
->tx_buf
;
624 rs
->tx_end
= rs
->tx
+ xfer
->len
;
625 rs
->rx
= xfer
->rx_buf
;
626 rs
->rx_end
= rs
->rx
+ xfer
->len
;
629 rs
->tx_sg
= xfer
->tx_sg
;
630 rs
->rx_sg
= xfer
->rx_sg
;
632 if (rs
->tx
&& rs
->rx
)
633 rs
->tmode
= CR0_XFM_TR
;
635 rs
->tmode
= CR0_XFM_TO
;
637 rs
->tmode
= CR0_XFM_RO
;
639 /* we need prepare dma before spi was enabled */
640 if (master
->can_dma
&& master
->can_dma(master
, spi
, xfer
))
645 rockchip_spi_config(rs
);
648 if (rs
->tmode
== CR0_XFM_RO
) {
649 /* rx: dma must be prepared first */
650 ret
= rockchip_spi_prepare_dma(rs
);
651 spi_enable_chip(rs
, 1);
653 /* tx or tr: spi must be enabled first */
654 spi_enable_chip(rs
, 1);
655 ret
= rockchip_spi_prepare_dma(rs
);
657 /* successful DMA prepare means the transfer is in progress */
660 spi_enable_chip(rs
, 1);
661 ret
= rockchip_spi_pio_transfer(rs
);
667 static bool rockchip_spi_can_dma(struct spi_master
*master
,
668 struct spi_device
*spi
,
669 struct spi_transfer
*xfer
)
671 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
673 return (xfer
->len
> rs
->fifo_len
);
676 static int rockchip_spi_probe(struct platform_device
*pdev
)
679 struct rockchip_spi
*rs
;
680 struct spi_master
*master
;
681 struct resource
*mem
;
684 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct rockchip_spi
));
688 platform_set_drvdata(pdev
, master
);
690 rs
= spi_master_get_devdata(master
);
692 /* Get basic io resource and map it */
693 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
694 rs
->regs
= devm_ioremap_resource(&pdev
->dev
, mem
);
695 if (IS_ERR(rs
->regs
)) {
696 ret
= PTR_ERR(rs
->regs
);
700 rs
->apb_pclk
= devm_clk_get(&pdev
->dev
, "apb_pclk");
701 if (IS_ERR(rs
->apb_pclk
)) {
702 dev_err(&pdev
->dev
, "Failed to get apb_pclk\n");
703 ret
= PTR_ERR(rs
->apb_pclk
);
707 rs
->spiclk
= devm_clk_get(&pdev
->dev
, "spiclk");
708 if (IS_ERR(rs
->spiclk
)) {
709 dev_err(&pdev
->dev
, "Failed to get spi_pclk\n");
710 ret
= PTR_ERR(rs
->spiclk
);
714 ret
= clk_prepare_enable(rs
->apb_pclk
);
716 dev_err(&pdev
->dev
, "Failed to enable apb_pclk\n");
720 ret
= clk_prepare_enable(rs
->spiclk
);
722 dev_err(&pdev
->dev
, "Failed to enable spi_clk\n");
723 goto err_disable_apbclk
;
726 spi_enable_chip(rs
, 0);
728 rs
->type
= SSI_MOTO_SPI
;
730 rs
->dev
= &pdev
->dev
;
731 rs
->max_freq
= clk_get_rate(rs
->spiclk
);
733 if (!of_property_read_u32(pdev
->dev
.of_node
, "rx-sample-delay-ns",
735 rs
->rsd_nsecs
= rsd_nsecs
;
737 rs
->fifo_len
= get_fifo_len(rs
);
739 dev_err(&pdev
->dev
, "Failed to get fifo length\n");
741 goto err_disable_spiclk
;
744 spin_lock_init(&rs
->lock
);
746 pm_runtime_set_active(&pdev
->dev
);
747 pm_runtime_enable(&pdev
->dev
);
749 master
->auto_runtime_pm
= true;
750 master
->bus_num
= pdev
->id
;
751 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LOOP
;
752 master
->num_chipselect
= ROCKCHIP_SPI_MAX_CS_NUM
;
753 master
->dev
.of_node
= pdev
->dev
.of_node
;
754 master
->bits_per_word_mask
= SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
756 master
->set_cs
= rockchip_spi_set_cs
;
757 master
->prepare_message
= rockchip_spi_prepare_message
;
758 master
->unprepare_message
= rockchip_spi_unprepare_message
;
759 master
->transfer_one
= rockchip_spi_transfer_one
;
760 master
->max_transfer_size
= rockchip_spi_max_transfer_size
;
761 master
->handle_err
= rockchip_spi_handle_err
;
762 master
->flags
= SPI_MASTER_GPIO_SS
;
764 rs
->dma_tx
.ch
= dma_request_chan(rs
->dev
, "tx");
765 if (IS_ERR(rs
->dma_tx
.ch
)) {
766 /* Check tx to see if we need defer probing driver */
767 if (PTR_ERR(rs
->dma_tx
.ch
) == -EPROBE_DEFER
) {
769 goto err_disable_pm_runtime
;
771 dev_warn(rs
->dev
, "Failed to request TX DMA channel\n");
772 rs
->dma_tx
.ch
= NULL
;
775 rs
->dma_rx
.ch
= dma_request_chan(rs
->dev
, "rx");
776 if (IS_ERR(rs
->dma_rx
.ch
)) {
777 if (PTR_ERR(rs
->dma_rx
.ch
) == -EPROBE_DEFER
) {
779 goto err_free_dma_tx
;
781 dev_warn(rs
->dev
, "Failed to request RX DMA channel\n");
782 rs
->dma_rx
.ch
= NULL
;
785 if (rs
->dma_tx
.ch
&& rs
->dma_rx
.ch
) {
786 dma_get_slave_caps(rs
->dma_rx
.ch
, &(rs
->dma_caps
));
787 rs
->dma_tx
.addr
= (dma_addr_t
)(mem
->start
+ ROCKCHIP_SPI_TXDR
);
788 rs
->dma_rx
.addr
= (dma_addr_t
)(mem
->start
+ ROCKCHIP_SPI_RXDR
);
789 rs
->dma_tx
.direction
= DMA_MEM_TO_DEV
;
790 rs
->dma_rx
.direction
= DMA_DEV_TO_MEM
;
792 master
->can_dma
= rockchip_spi_can_dma
;
793 master
->dma_tx
= rs
->dma_tx
.ch
;
794 master
->dma_rx
= rs
->dma_rx
.ch
;
797 ret
= devm_spi_register_master(&pdev
->dev
, master
);
799 dev_err(&pdev
->dev
, "Failed to register master\n");
800 goto err_free_dma_rx
;
807 dma_release_channel(rs
->dma_rx
.ch
);
810 dma_release_channel(rs
->dma_tx
.ch
);
811 err_disable_pm_runtime
:
812 pm_runtime_disable(&pdev
->dev
);
814 clk_disable_unprepare(rs
->spiclk
);
816 clk_disable_unprepare(rs
->apb_pclk
);
818 spi_master_put(master
);
823 static int rockchip_spi_remove(struct platform_device
*pdev
)
825 struct spi_master
*master
= spi_master_get(platform_get_drvdata(pdev
));
826 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
828 pm_runtime_get_sync(&pdev
->dev
);
830 clk_disable_unprepare(rs
->spiclk
);
831 clk_disable_unprepare(rs
->apb_pclk
);
833 pm_runtime_put_noidle(&pdev
->dev
);
834 pm_runtime_disable(&pdev
->dev
);
835 pm_runtime_set_suspended(&pdev
->dev
);
838 dma_release_channel(rs
->dma_tx
.ch
);
840 dma_release_channel(rs
->dma_rx
.ch
);
842 spi_master_put(master
);
847 #ifdef CONFIG_PM_SLEEP
848 static int rockchip_spi_suspend(struct device
*dev
)
851 struct spi_master
*master
= dev_get_drvdata(dev
);
852 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
854 ret
= spi_master_suspend(rs
->master
);
858 ret
= pm_runtime_force_suspend(dev
);
862 pinctrl_pm_select_sleep_state(dev
);
867 static int rockchip_spi_resume(struct device
*dev
)
870 struct spi_master
*master
= dev_get_drvdata(dev
);
871 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
873 pinctrl_pm_select_default_state(dev
);
875 ret
= pm_runtime_force_resume(dev
);
879 ret
= spi_master_resume(rs
->master
);
881 clk_disable_unprepare(rs
->spiclk
);
882 clk_disable_unprepare(rs
->apb_pclk
);
887 #endif /* CONFIG_PM_SLEEP */
890 static int rockchip_spi_runtime_suspend(struct device
*dev
)
892 struct spi_master
*master
= dev_get_drvdata(dev
);
893 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
895 clk_disable_unprepare(rs
->spiclk
);
896 clk_disable_unprepare(rs
->apb_pclk
);
901 static int rockchip_spi_runtime_resume(struct device
*dev
)
904 struct spi_master
*master
= dev_get_drvdata(dev
);
905 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
907 ret
= clk_prepare_enable(rs
->apb_pclk
);
911 ret
= clk_prepare_enable(rs
->spiclk
);
913 clk_disable_unprepare(rs
->apb_pclk
);
917 #endif /* CONFIG_PM */
919 static const struct dev_pm_ops rockchip_spi_pm
= {
920 SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend
, rockchip_spi_resume
)
921 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend
,
922 rockchip_spi_runtime_resume
, NULL
)
925 static const struct of_device_id rockchip_spi_dt_match
[] = {
926 { .compatible
= "rockchip,rv1108-spi", },
927 { .compatible
= "rockchip,rk3036-spi", },
928 { .compatible
= "rockchip,rk3066-spi", },
929 { .compatible
= "rockchip,rk3188-spi", },
930 { .compatible
= "rockchip,rk3228-spi", },
931 { .compatible
= "rockchip,rk3288-spi", },
932 { .compatible
= "rockchip,rk3368-spi", },
933 { .compatible
= "rockchip,rk3399-spi", },
936 MODULE_DEVICE_TABLE(of
, rockchip_spi_dt_match
);
938 static struct platform_driver rockchip_spi_driver
= {
941 .pm
= &rockchip_spi_pm
,
942 .of_match_table
= of_match_ptr(rockchip_spi_dt_match
),
944 .probe
= rockchip_spi_probe
,
945 .remove
= rockchip_spi_remove
,
948 module_platform_driver(rockchip_spi_driver
);
950 MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
951 MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
952 MODULE_LICENSE("GPL v2");