2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * Author: Addy Ke <addy.ke@rock-chips.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spi/spi.h>
25 #include <linux/scatterlist.h>
27 #include <linux/pm_runtime.h>
29 #include <linux/dmaengine.h>
31 #define DRIVER_NAME "rockchip-spi"
33 /* SPI register offsets */
34 #define ROCKCHIP_SPI_CTRLR0 0x0000
35 #define ROCKCHIP_SPI_CTRLR1 0x0004
36 #define ROCKCHIP_SPI_SSIENR 0x0008
37 #define ROCKCHIP_SPI_SER 0x000c
38 #define ROCKCHIP_SPI_BAUDR 0x0010
39 #define ROCKCHIP_SPI_TXFTLR 0x0014
40 #define ROCKCHIP_SPI_RXFTLR 0x0018
41 #define ROCKCHIP_SPI_TXFLR 0x001c
42 #define ROCKCHIP_SPI_RXFLR 0x0020
43 #define ROCKCHIP_SPI_SR 0x0024
44 #define ROCKCHIP_SPI_IPR 0x0028
45 #define ROCKCHIP_SPI_IMR 0x002c
46 #define ROCKCHIP_SPI_ISR 0x0030
47 #define ROCKCHIP_SPI_RISR 0x0034
48 #define ROCKCHIP_SPI_ICR 0x0038
49 #define ROCKCHIP_SPI_DMACR 0x003c
50 #define ROCKCHIP_SPI_DMATDLR 0x0040
51 #define ROCKCHIP_SPI_DMARDLR 0x0044
52 #define ROCKCHIP_SPI_TXDR 0x0400
53 #define ROCKCHIP_SPI_RXDR 0x0800
55 /* Bit fields in CTRLR0 */
56 #define CR0_DFS_OFFSET 0
58 #define CR0_CFS_OFFSET 2
60 #define CR0_SCPH_OFFSET 6
62 #define CR0_SCPOL_OFFSET 7
64 #define CR0_CSM_OFFSET 8
65 #define CR0_CSM_KEEP 0x0
66 /* ss_n be high for half sclk_out cycles */
67 #define CR0_CSM_HALF 0X1
68 /* ss_n be high for one sclk_out cycle */
69 #define CR0_CSM_ONE 0x2
71 /* ss_n to sclk_out delay */
72 #define CR0_SSD_OFFSET 10
74 * The period between ss_n active and
75 * sclk_out active is half sclk_out cycles
77 #define CR0_SSD_HALF 0x0
79 * The period between ss_n active and
80 * sclk_out active is one sclk_out cycle
82 #define CR0_SSD_ONE 0x1
84 #define CR0_EM_OFFSET 11
85 #define CR0_EM_LITTLE 0x0
86 #define CR0_EM_BIG 0x1
88 #define CR0_FBM_OFFSET 12
89 #define CR0_FBM_MSB 0x0
90 #define CR0_FBM_LSB 0x1
92 #define CR0_BHT_OFFSET 13
93 #define CR0_BHT_16BIT 0x0
94 #define CR0_BHT_8BIT 0x1
96 #define CR0_RSD_OFFSET 14
98 #define CR0_FRF_OFFSET 16
99 #define CR0_FRF_SPI 0x0
100 #define CR0_FRF_SSP 0x1
101 #define CR0_FRF_MICROWIRE 0x2
103 #define CR0_XFM_OFFSET 18
104 #define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
105 #define CR0_XFM_TR 0x0
106 #define CR0_XFM_TO 0x1
107 #define CR0_XFM_RO 0x2
109 #define CR0_OPM_OFFSET 20
110 #define CR0_OPM_MASTER 0x0
111 #define CR0_OPM_SLAVE 0x1
113 #define CR0_MTM_OFFSET 0x21
115 /* Bit fields in SER, 2bit */
118 /* Bit fields in SR, 5bit */
120 #define SR_BUSY (1 << 0)
121 #define SR_TF_FULL (1 << 1)
122 #define SR_TF_EMPTY (1 << 2)
123 #define SR_RF_EMPTY (1 << 3)
124 #define SR_RF_FULL (1 << 4)
126 /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
127 #define INT_MASK 0x1f
128 #define INT_TF_EMPTY (1 << 0)
129 #define INT_TF_OVERFLOW (1 << 1)
130 #define INT_RF_UNDERFLOW (1 << 2)
131 #define INT_RF_OVERFLOW (1 << 3)
132 #define INT_RF_FULL (1 << 4)
134 /* Bit fields in ICR, 4bit */
135 #define ICR_MASK 0x0f
136 #define ICR_ALL (1 << 0)
137 #define ICR_RF_UNDERFLOW (1 << 1)
138 #define ICR_RF_OVERFLOW (1 << 2)
139 #define ICR_TF_OVERFLOW (1 << 3)
141 /* Bit fields in DMACR */
142 #define RF_DMA_EN (1 << 0)
143 #define TF_DMA_EN (1 << 1)
145 #define RXBUSY (1 << 0)
146 #define TXBUSY (1 << 1)
148 /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
149 #define MAX_SCLK_OUT 50000000
151 enum rockchip_ssi_type
{
157 struct rockchip_spi_dma_data
{
159 enum dma_transfer_direction direction
;
163 struct rockchip_spi
{
165 struct spi_master
*master
;
168 struct clk
*apb_pclk
;
171 /*depth of the FIFO buffer */
173 /* max bus freq supported */
175 /* supported slave numbers */
176 enum rockchip_ssi_type type
;
194 struct completion xfer_completion
;
197 struct sg_table tx_sg
;
198 struct sg_table rx_sg
;
199 struct rockchip_spi_dma_data dma_rx
;
200 struct rockchip_spi_dma_data dma_tx
;
203 static inline void spi_enable_chip(struct rockchip_spi
*rs
, int enable
)
205 writel_relaxed((enable
? 1 : 0), rs
->regs
+ ROCKCHIP_SPI_SSIENR
);
208 static inline void spi_set_clk(struct rockchip_spi
*rs
, u16 div
)
210 writel_relaxed(div
, rs
->regs
+ ROCKCHIP_SPI_BAUDR
);
213 static inline void flush_fifo(struct rockchip_spi
*rs
)
215 while (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
))
216 readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
219 static inline void wait_for_idle(struct rockchip_spi
*rs
)
221 unsigned long timeout
= jiffies
+ msecs_to_jiffies(5);
224 if (!(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
))
226 } while (!time_after(jiffies
, timeout
));
228 dev_warn(rs
->dev
, "spi controller is in busy state!\n");
231 static u32
get_fifo_len(struct rockchip_spi
*rs
)
235 for (fifo
= 2; fifo
< 32; fifo
++) {
236 writel_relaxed(fifo
, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
237 if (fifo
!= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_TXFTLR
))
241 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
243 return (fifo
== 31) ? 0 : fifo
;
246 static inline u32
tx_max(struct rockchip_spi
*rs
)
248 u32 tx_left
, tx_room
;
250 tx_left
= (rs
->tx_end
- rs
->tx
) / rs
->n_bytes
;
251 tx_room
= rs
->fifo_len
- readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_TXFLR
);
253 return min(tx_left
, tx_room
);
256 static inline u32
rx_max(struct rockchip_spi
*rs
)
258 u32 rx_left
= (rs
->rx_end
- rs
->rx
) / rs
->n_bytes
;
259 u32 rx_room
= (u32
)readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXFLR
);
261 return min(rx_left
, rx_room
);
264 static void rockchip_spi_set_cs(struct spi_device
*spi
, bool enable
)
267 struct rockchip_spi
*rs
= spi_master_get_devdata(spi
->master
);
269 ser
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SER
) & SER_MASK
;
273 * static void spi_set_cs(struct spi_device *spi, bool enable)
275 * if (spi->mode & SPI_CS_HIGH)
278 * if (spi->cs_gpio >= 0)
279 * gpio_set_value(spi->cs_gpio, !enable);
280 * else if (spi->master->set_cs)
281 * spi->master->set_cs(spi, !enable);
284 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
287 ser
|= 1 << spi
->chip_select
;
289 ser
&= ~(1 << spi
->chip_select
);
291 writel_relaxed(ser
, rs
->regs
+ ROCKCHIP_SPI_SER
);
294 static int rockchip_spi_prepare_message(struct spi_master
*master
,
295 struct spi_message
*msg
)
297 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
298 struct spi_device
*spi
= msg
->spi
;
300 rs
->mode
= spi
->mode
;
305 static int rockchip_spi_unprepare_message(struct spi_master
*master
,
306 struct spi_message
*msg
)
309 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
311 spin_lock_irqsave(&rs
->lock
, flags
);
314 * For DMA mode, we need terminate DMA channel and flush
315 * fifo for the next transfer if DMA thansfer timeout.
316 * unprepare_message() was called by core if transfer complete
317 * or timeout. Maybe it is reasonable for error handling here.
320 if (rs
->state
& RXBUSY
) {
321 dmaengine_terminate_all(rs
->dma_rx
.ch
);
325 if (rs
->state
& TXBUSY
)
326 dmaengine_terminate_all(rs
->dma_tx
.ch
);
329 spin_unlock_irqrestore(&rs
->lock
, flags
);
331 spi_enable_chip(rs
, 0);
336 static void rockchip_spi_pio_writer(struct rockchip_spi
*rs
)
338 u32 max
= tx_max(rs
);
342 if (rs
->n_bytes
== 1)
343 txw
= *(u8
*)(rs
->tx
);
345 txw
= *(u16
*)(rs
->tx
);
347 writel_relaxed(txw
, rs
->regs
+ ROCKCHIP_SPI_TXDR
);
348 rs
->tx
+= rs
->n_bytes
;
352 static void rockchip_spi_pio_reader(struct rockchip_spi
*rs
)
354 u32 max
= rx_max(rs
);
358 rxw
= readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_RXDR
);
359 if (rs
->n_bytes
== 1)
360 *(u8
*)(rs
->rx
) = (u8
)rxw
;
362 *(u16
*)(rs
->rx
) = (u16
)rxw
;
363 rs
->rx
+= rs
->n_bytes
;
367 static int rockchip_spi_pio_transfer(struct rockchip_spi
*rs
)
373 remain
= rs
->tx_end
- rs
->tx
;
374 rockchip_spi_pio_writer(rs
);
378 remain
= rs
->rx_end
- rs
->rx
;
379 rockchip_spi_pio_reader(rs
);
385 /* If tx, wait until the FIFO data completely. */
389 spi_enable_chip(rs
, 0);
394 static void rockchip_spi_dma_rxcb(void *data
)
397 struct rockchip_spi
*rs
= data
;
399 spin_lock_irqsave(&rs
->lock
, flags
);
401 rs
->state
&= ~RXBUSY
;
402 if (!(rs
->state
& TXBUSY
)) {
403 spi_enable_chip(rs
, 0);
404 spi_finalize_current_transfer(rs
->master
);
407 spin_unlock_irqrestore(&rs
->lock
, flags
);
410 static void rockchip_spi_dma_txcb(void *data
)
413 struct rockchip_spi
*rs
= data
;
415 /* Wait until the FIFO data completely. */
418 spin_lock_irqsave(&rs
->lock
, flags
);
420 rs
->state
&= ~TXBUSY
;
421 if (!(rs
->state
& RXBUSY
)) {
422 spi_enable_chip(rs
, 0);
423 spi_finalize_current_transfer(rs
->master
);
426 spin_unlock_irqrestore(&rs
->lock
, flags
);
429 static void rockchip_spi_prepare_dma(struct rockchip_spi
*rs
)
432 struct dma_slave_config rxconf
, txconf
;
433 struct dma_async_tx_descriptor
*rxdesc
, *txdesc
;
435 spin_lock_irqsave(&rs
->lock
, flags
);
436 rs
->state
&= ~RXBUSY
;
437 rs
->state
&= ~TXBUSY
;
438 spin_unlock_irqrestore(&rs
->lock
, flags
);
441 rxconf
.direction
= rs
->dma_rx
.direction
;
442 rxconf
.src_addr
= rs
->dma_rx
.addr
;
443 rxconf
.src_addr_width
= rs
->n_bytes
;
444 rxconf
.src_maxburst
= rs
->n_bytes
;
445 dmaengine_slave_config(rs
->dma_rx
.ch
, &rxconf
);
447 rxdesc
= dmaengine_prep_slave_sg(
449 rs
->rx_sg
.sgl
, rs
->rx_sg
.nents
,
450 rs
->dma_rx
.direction
, DMA_PREP_INTERRUPT
);
452 rxdesc
->callback
= rockchip_spi_dma_rxcb
;
453 rxdesc
->callback_param
= rs
;
457 txconf
.direction
= rs
->dma_tx
.direction
;
458 txconf
.dst_addr
= rs
->dma_tx
.addr
;
459 txconf
.dst_addr_width
= rs
->n_bytes
;
460 txconf
.dst_maxburst
= rs
->n_bytes
;
461 dmaengine_slave_config(rs
->dma_tx
.ch
, &txconf
);
463 txdesc
= dmaengine_prep_slave_sg(
465 rs
->tx_sg
.sgl
, rs
->tx_sg
.nents
,
466 rs
->dma_tx
.direction
, DMA_PREP_INTERRUPT
);
468 txdesc
->callback
= rockchip_spi_dma_txcb
;
469 txdesc
->callback_param
= rs
;
472 /* rx must be started before tx due to spi instinct */
474 spin_lock_irqsave(&rs
->lock
, flags
);
476 spin_unlock_irqrestore(&rs
->lock
, flags
);
477 dmaengine_submit(rxdesc
);
478 dma_async_issue_pending(rs
->dma_rx
.ch
);
482 spin_lock_irqsave(&rs
->lock
, flags
);
484 spin_unlock_irqrestore(&rs
->lock
, flags
);
485 dmaengine_submit(txdesc
);
486 dma_async_issue_pending(rs
->dma_tx
.ch
);
490 static void rockchip_spi_config(struct rockchip_spi
*rs
)
495 u32 cr0
= (CR0_BHT_8BIT
<< CR0_BHT_OFFSET
)
496 | (CR0_SSD_ONE
<< CR0_SSD_OFFSET
);
498 cr0
|= (rs
->n_bytes
<< CR0_DFS_OFFSET
);
499 cr0
|= ((rs
->mode
& 0x3) << CR0_SCPH_OFFSET
);
500 cr0
|= (rs
->tmode
<< CR0_XFM_OFFSET
);
501 cr0
|= (rs
->type
<< CR0_FRF_OFFSET
);
510 if (WARN_ON(rs
->speed
> MAX_SCLK_OUT
))
511 rs
->speed
= MAX_SCLK_OUT
;
513 /* the minimum divsor is 2 */
514 if (rs
->max_freq
< 2 * rs
->speed
) {
515 clk_set_rate(rs
->spiclk
, 2 * rs
->speed
);
516 rs
->max_freq
= clk_get_rate(rs
->spiclk
);
519 /* div doesn't support odd number */
520 div
= max_t(u32
, rs
->max_freq
/ rs
->speed
, 1);
521 div
= (div
+ 1) & 0xfffe;
523 writel_relaxed(cr0
, rs
->regs
+ ROCKCHIP_SPI_CTRLR0
);
525 writel_relaxed(rs
->len
- 1, rs
->regs
+ ROCKCHIP_SPI_CTRLR1
);
526 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_TXFTLR
);
527 writel_relaxed(rs
->fifo_len
/ 2 - 1, rs
->regs
+ ROCKCHIP_SPI_RXFTLR
);
529 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_DMATDLR
);
530 writel_relaxed(0, rs
->regs
+ ROCKCHIP_SPI_DMARDLR
);
531 writel_relaxed(dmacr
, rs
->regs
+ ROCKCHIP_SPI_DMACR
);
533 spi_set_clk(rs
, div
);
535 dev_dbg(rs
->dev
, "cr0 0x%x, div %d\n", cr0
, div
);
538 static int rockchip_spi_transfer_one(
539 struct spi_master
*master
,
540 struct spi_device
*spi
,
541 struct spi_transfer
*xfer
)
544 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
546 WARN_ON(readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SSIENR
) &&
547 (readl_relaxed(rs
->regs
+ ROCKCHIP_SPI_SR
) & SR_BUSY
));
549 if (!xfer
->tx_buf
&& !xfer
->rx_buf
) {
550 dev_err(rs
->dev
, "No buffer for transfer\n");
554 rs
->speed
= xfer
->speed_hz
;
555 rs
->bpw
= xfer
->bits_per_word
;
556 rs
->n_bytes
= rs
->bpw
>> 3;
558 rs
->tx
= xfer
->tx_buf
;
559 rs
->tx_end
= rs
->tx
+ xfer
->len
;
560 rs
->rx
= xfer
->rx_buf
;
561 rs
->rx_end
= rs
->rx
+ xfer
->len
;
564 rs
->tx_sg
= xfer
->tx_sg
;
565 rs
->rx_sg
= xfer
->rx_sg
;
567 if (rs
->tx
&& rs
->rx
)
568 rs
->tmode
= CR0_XFM_TR
;
570 rs
->tmode
= CR0_XFM_TO
;
572 rs
->tmode
= CR0_XFM_RO
;
574 /* we need prepare dma before spi was enabled */
575 if (master
->can_dma
&& master
->can_dma(master
, spi
, xfer
))
580 rockchip_spi_config(rs
);
583 if (rs
->tmode
== CR0_XFM_RO
) {
584 /* rx: dma must be prepared first */
585 rockchip_spi_prepare_dma(rs
);
586 spi_enable_chip(rs
, 1);
588 /* tx or tr: spi must be enabled first */
589 spi_enable_chip(rs
, 1);
590 rockchip_spi_prepare_dma(rs
);
593 spi_enable_chip(rs
, 1);
594 ret
= rockchip_spi_pio_transfer(rs
);
600 static bool rockchip_spi_can_dma(struct spi_master
*master
,
601 struct spi_device
*spi
,
602 struct spi_transfer
*xfer
)
604 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
606 return (xfer
->len
> rs
->fifo_len
);
609 static int rockchip_spi_probe(struct platform_device
*pdev
)
612 struct rockchip_spi
*rs
;
613 struct spi_master
*master
;
614 struct resource
*mem
;
616 master
= spi_alloc_master(&pdev
->dev
, sizeof(struct rockchip_spi
));
620 platform_set_drvdata(pdev
, master
);
622 rs
= spi_master_get_devdata(master
);
623 memset(rs
, 0, sizeof(struct rockchip_spi
));
625 /* Get basic io resource and map it */
626 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
627 rs
->regs
= devm_ioremap_resource(&pdev
->dev
, mem
);
628 if (IS_ERR(rs
->regs
)) {
629 ret
= PTR_ERR(rs
->regs
);
630 goto err_ioremap_resource
;
633 rs
->apb_pclk
= devm_clk_get(&pdev
->dev
, "apb_pclk");
634 if (IS_ERR(rs
->apb_pclk
)) {
635 dev_err(&pdev
->dev
, "Failed to get apb_pclk\n");
636 ret
= PTR_ERR(rs
->apb_pclk
);
637 goto err_ioremap_resource
;
640 rs
->spiclk
= devm_clk_get(&pdev
->dev
, "spiclk");
641 if (IS_ERR(rs
->spiclk
)) {
642 dev_err(&pdev
->dev
, "Failed to get spi_pclk\n");
643 ret
= PTR_ERR(rs
->spiclk
);
644 goto err_ioremap_resource
;
647 ret
= clk_prepare_enable(rs
->apb_pclk
);
649 dev_err(&pdev
->dev
, "Failed to enable apb_pclk\n");
650 goto err_ioremap_resource
;
653 ret
= clk_prepare_enable(rs
->spiclk
);
655 dev_err(&pdev
->dev
, "Failed to enable spi_clk\n");
656 goto err_spiclk_enable
;
659 spi_enable_chip(rs
, 0);
661 rs
->type
= SSI_MOTO_SPI
;
663 rs
->dev
= &pdev
->dev
;
664 rs
->max_freq
= clk_get_rate(rs
->spiclk
);
666 rs
->fifo_len
= get_fifo_len(rs
);
668 dev_err(&pdev
->dev
, "Failed to get fifo length\n");
670 goto err_get_fifo_len
;
673 spin_lock_init(&rs
->lock
);
675 pm_runtime_set_active(&pdev
->dev
);
676 pm_runtime_enable(&pdev
->dev
);
678 master
->auto_runtime_pm
= true;
679 master
->bus_num
= pdev
->id
;
680 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LOOP
;
681 master
->num_chipselect
= 2;
682 master
->dev
.of_node
= pdev
->dev
.of_node
;
683 master
->bits_per_word_mask
= SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
685 master
->set_cs
= rockchip_spi_set_cs
;
686 master
->prepare_message
= rockchip_spi_prepare_message
;
687 master
->unprepare_message
= rockchip_spi_unprepare_message
;
688 master
->transfer_one
= rockchip_spi_transfer_one
;
690 rs
->dma_tx
.ch
= dma_request_slave_channel(rs
->dev
, "tx");
692 dev_warn(rs
->dev
, "Failed to request TX DMA channel\n");
694 rs
->dma_rx
.ch
= dma_request_slave_channel(rs
->dev
, "rx");
695 if (!rs
->dma_rx
.ch
) {
697 dma_release_channel(rs
->dma_tx
.ch
);
698 rs
->dma_tx
.ch
= NULL
;
700 dev_warn(rs
->dev
, "Failed to request RX DMA channel\n");
703 if (rs
->dma_tx
.ch
&& rs
->dma_rx
.ch
) {
704 rs
->dma_tx
.addr
= (dma_addr_t
)(mem
->start
+ ROCKCHIP_SPI_TXDR
);
705 rs
->dma_rx
.addr
= (dma_addr_t
)(mem
->start
+ ROCKCHIP_SPI_RXDR
);
706 rs
->dma_tx
.direction
= DMA_MEM_TO_DEV
;
707 rs
->dma_rx
.direction
= DMA_DEV_TO_MEM
;
709 master
->can_dma
= rockchip_spi_can_dma
;
710 master
->dma_tx
= rs
->dma_tx
.ch
;
711 master
->dma_rx
= rs
->dma_rx
.ch
;
714 ret
= devm_spi_register_master(&pdev
->dev
, master
);
716 dev_err(&pdev
->dev
, "Failed to register master\n");
717 goto err_register_master
;
724 dma_release_channel(rs
->dma_tx
.ch
);
726 dma_release_channel(rs
->dma_rx
.ch
);
728 clk_disable_unprepare(rs
->spiclk
);
730 clk_disable_unprepare(rs
->apb_pclk
);
731 err_ioremap_resource
:
732 spi_master_put(master
);
737 static int rockchip_spi_remove(struct platform_device
*pdev
)
739 struct spi_master
*master
= spi_master_get(platform_get_drvdata(pdev
));
740 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
742 pm_runtime_disable(&pdev
->dev
);
744 clk_disable_unprepare(rs
->spiclk
);
745 clk_disable_unprepare(rs
->apb_pclk
);
748 dma_release_channel(rs
->dma_tx
.ch
);
750 dma_release_channel(rs
->dma_rx
.ch
);
755 #ifdef CONFIG_PM_SLEEP
756 static int rockchip_spi_suspend(struct device
*dev
)
759 struct spi_master
*master
= dev_get_drvdata(dev
);
760 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
762 ret
= spi_master_suspend(rs
->master
);
766 if (!pm_runtime_suspended(dev
)) {
767 clk_disable_unprepare(rs
->spiclk
);
768 clk_disable_unprepare(rs
->apb_pclk
);
774 static int rockchip_spi_resume(struct device
*dev
)
777 struct spi_master
*master
= dev_get_drvdata(dev
);
778 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
780 if (!pm_runtime_suspended(dev
)) {
781 ret
= clk_prepare_enable(rs
->apb_pclk
);
785 ret
= clk_prepare_enable(rs
->spiclk
);
787 clk_disable_unprepare(rs
->apb_pclk
);
792 ret
= spi_master_resume(rs
->master
);
794 clk_disable_unprepare(rs
->spiclk
);
795 clk_disable_unprepare(rs
->apb_pclk
);
800 #endif /* CONFIG_PM_SLEEP */
803 static int rockchip_spi_runtime_suspend(struct device
*dev
)
805 struct spi_master
*master
= dev_get_drvdata(dev
);
806 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
808 clk_disable_unprepare(rs
->spiclk
);
809 clk_disable_unprepare(rs
->apb_pclk
);
814 static int rockchip_spi_runtime_resume(struct device
*dev
)
817 struct spi_master
*master
= dev_get_drvdata(dev
);
818 struct rockchip_spi
*rs
= spi_master_get_devdata(master
);
820 ret
= clk_prepare_enable(rs
->apb_pclk
);
824 ret
= clk_prepare_enable(rs
->spiclk
);
826 clk_disable_unprepare(rs
->apb_pclk
);
830 #endif /* CONFIG_PM */
832 static const struct dev_pm_ops rockchip_spi_pm
= {
833 SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend
, rockchip_spi_resume
)
834 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend
,
835 rockchip_spi_runtime_resume
, NULL
)
838 static const struct of_device_id rockchip_spi_dt_match
[] = {
839 { .compatible
= "rockchip,rk3066-spi", },
840 { .compatible
= "rockchip,rk3188-spi", },
841 { .compatible
= "rockchip,rk3288-spi", },
844 MODULE_DEVICE_TABLE(of
, rockchip_spi_dt_match
);
846 static struct platform_driver rockchip_spi_driver
= {
849 .pm
= &rockchip_spi_pm
,
850 .of_match_table
= of_match_ptr(rockchip_spi_dt_match
),
852 .probe
= rockchip_spi_probe
,
853 .remove
= rockchip_spi_remove
,
856 module_platform_driver(rockchip_spi_driver
);
858 MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
859 MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
860 MODULE_LICENSE("GPL v2");