1 // SPDX-License-Identifier: GPL-2.0+
3 // Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 // Jaswinder Singh <jassi.brar@samsung.com>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/clk.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/spi/spi.h>
16 #include <linux/gpio.h>
18 #include <linux/of_gpio.h>
20 #include <linux/platform_data/spi-s3c64xx.h>
22 #define MAX_SPI_PORTS 6
23 #define S3C64XX_SPI_QUIRK_POLL (1 << 0)
24 #define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
25 #define AUTOSUSPEND_TIMEOUT 2000
27 /* Registers and bit-fields */
29 #define S3C64XX_SPI_CH_CFG 0x00
30 #define S3C64XX_SPI_CLK_CFG 0x04
31 #define S3C64XX_SPI_MODE_CFG 0x08
32 #define S3C64XX_SPI_CS_REG 0x0C
33 #define S3C64XX_SPI_INT_EN 0x10
34 #define S3C64XX_SPI_STATUS 0x14
35 #define S3C64XX_SPI_TX_DATA 0x18
36 #define S3C64XX_SPI_RX_DATA 0x1C
37 #define S3C64XX_SPI_PACKET_CNT 0x20
38 #define S3C64XX_SPI_PENDING_CLR 0x24
39 #define S3C64XX_SPI_SWAP_CFG 0x28
40 #define S3C64XX_SPI_FB_CLK 0x2C
42 #define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
43 #define S3C64XX_SPI_CH_SW_RST (1<<5)
44 #define S3C64XX_SPI_CH_SLAVE (1<<4)
45 #define S3C64XX_SPI_CPOL_L (1<<3)
46 #define S3C64XX_SPI_CPHA_B (1<<2)
47 #define S3C64XX_SPI_CH_RXCH_ON (1<<1)
48 #define S3C64XX_SPI_CH_TXCH_ON (1<<0)
50 #define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
51 #define S3C64XX_SPI_CLKSEL_SRCSHFT 9
52 #define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
53 #define S3C64XX_SPI_PSR_MASK 0xff
55 #define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
56 #define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
57 #define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
58 #define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
59 #define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
60 #define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
61 #define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
62 #define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
63 #define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
64 #define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
65 #define S3C64XX_SPI_MODE_4BURST (1<<0)
67 #define S3C64XX_SPI_CS_NSC_CNT_2 (2<<4)
68 #define S3C64XX_SPI_CS_AUTO (1<<1)
69 #define S3C64XX_SPI_CS_SIG_INACT (1<<0)
71 #define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
72 #define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
73 #define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
74 #define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
75 #define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
76 #define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
77 #define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
79 #define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
80 #define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
81 #define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
82 #define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
83 #define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
84 #define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
86 #define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
88 #define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
89 #define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
90 #define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
91 #define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
92 #define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
94 #define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
95 #define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
96 #define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
97 #define S3C64XX_SPI_SWAP_RX_EN (1<<4)
98 #define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
99 #define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
100 #define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
101 #define S3C64XX_SPI_SWAP_TX_EN (1<<0)
103 #define S3C64XX_SPI_FBCLK_MSK (3<<0)
105 #define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
106 #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
107 (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
108 #define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
109 #define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
112 #define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
113 #define S3C64XX_SPI_TRAILCNT_OFF 19
115 #define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
117 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
118 #define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
120 #define RXBUSY (1<<2)
121 #define TXBUSY (1<<3)
123 struct s3c64xx_spi_dma_data
{
126 enum dma_transfer_direction direction
;
130 * struct s3c64xx_spi_info - SPI Controller hardware info
131 * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
132 * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
133 * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
134 * @quirks: Bitmask of known quirks
135 * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
136 * @clk_from_cmu: True, if the controller does not include a clock mux and
138 * @clk_ioclk: True if clock is present on this device
140 * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
141 * differ in some aspects such as the size of the fifo and spi bus clock
142 * setup. Such differences are specified to the driver using this structure
143 * which is provided as driver data to the driver.
145 struct s3c64xx_spi_port_config
{
146 int fifo_lvl_mask
[MAX_SPI_PORTS
];
156 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
157 * @clk: Pointer to the spi clock.
158 * @src_clk: Pointer to the clock used to generate SPI signals.
159 * @ioclk: Pointer to the i/o clock between master and slave
160 * @pdev: Pointer to device's platform device data
161 * @master: Pointer to the SPI Protocol master.
162 * @cntrlr_info: Platform specific data for the controller this driver manages.
163 * @lock: Controller specific lock.
164 * @state: Set of FLAGS to indicate status.
165 * @sfr_start: BUS address of SPI controller regs.
166 * @regs: Pointer to ioremap'ed controller registers.
167 * @xfer_completion: To indicate completion of xfer task.
168 * @cur_mode: Stores the active configuration of the controller.
169 * @cur_bpw: Stores the active bits per word settings.
170 * @cur_speed: Current clock speed
171 * @rx_dma: Local receive DMA data (e.g. chan and direction)
172 * @tx_dma: Local transmit DMA data (e.g. chan and direction)
173 * @port_conf: Local SPI port configuartion data
174 * @port_id: Port identification number
176 struct s3c64xx_spi_driver_data
{
181 struct platform_device
*pdev
;
182 struct spi_master
*master
;
183 struct s3c64xx_spi_info
*cntrlr_info
;
185 unsigned long sfr_start
;
186 struct completion xfer_completion
;
188 unsigned cur_mode
, cur_bpw
;
190 struct s3c64xx_spi_dma_data rx_dma
;
191 struct s3c64xx_spi_dma_data tx_dma
;
192 struct s3c64xx_spi_port_config
*port_conf
;
193 unsigned int port_id
;
196 static void s3c64xx_flush_fifo(struct s3c64xx_spi_driver_data
*sdd
)
198 void __iomem
*regs
= sdd
->regs
;
202 writel(0, regs
+ S3C64XX_SPI_PACKET_CNT
);
204 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
205 val
&= ~(S3C64XX_SPI_CH_RXCH_ON
| S3C64XX_SPI_CH_TXCH_ON
);
206 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
208 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
209 val
|= S3C64XX_SPI_CH_SW_RST
;
210 val
&= ~S3C64XX_SPI_CH_HS_EN
;
211 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
214 loops
= msecs_to_loops(1);
216 val
= readl(regs
+ S3C64XX_SPI_STATUS
);
217 } while (TX_FIFO_LVL(val
, sdd
) && loops
--);
220 dev_warn(&sdd
->pdev
->dev
, "Timed out flushing TX FIFO\n");
223 loops
= msecs_to_loops(1);
225 val
= readl(regs
+ S3C64XX_SPI_STATUS
);
226 if (RX_FIFO_LVL(val
, sdd
))
227 readl(regs
+ S3C64XX_SPI_RX_DATA
);
233 dev_warn(&sdd
->pdev
->dev
, "Timed out flushing RX FIFO\n");
235 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
236 val
&= ~S3C64XX_SPI_CH_SW_RST
;
237 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
239 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
240 val
&= ~(S3C64XX_SPI_MODE_TXDMA_ON
| S3C64XX_SPI_MODE_RXDMA_ON
);
241 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
244 static void s3c64xx_spi_dmacb(void *data
)
246 struct s3c64xx_spi_driver_data
*sdd
;
247 struct s3c64xx_spi_dma_data
*dma
= data
;
250 if (dma
->direction
== DMA_DEV_TO_MEM
)
251 sdd
= container_of(data
,
252 struct s3c64xx_spi_driver_data
, rx_dma
);
254 sdd
= container_of(data
,
255 struct s3c64xx_spi_driver_data
, tx_dma
);
257 spin_lock_irqsave(&sdd
->lock
, flags
);
259 if (dma
->direction
== DMA_DEV_TO_MEM
) {
260 sdd
->state
&= ~RXBUSY
;
261 if (!(sdd
->state
& TXBUSY
))
262 complete(&sdd
->xfer_completion
);
264 sdd
->state
&= ~TXBUSY
;
265 if (!(sdd
->state
& RXBUSY
))
266 complete(&sdd
->xfer_completion
);
269 spin_unlock_irqrestore(&sdd
->lock
, flags
);
272 static int prepare_dma(struct s3c64xx_spi_dma_data
*dma
,
273 struct sg_table
*sgt
)
275 struct s3c64xx_spi_driver_data
*sdd
;
276 struct dma_slave_config config
;
277 struct dma_async_tx_descriptor
*desc
;
280 memset(&config
, 0, sizeof(config
));
282 if (dma
->direction
== DMA_DEV_TO_MEM
) {
283 sdd
= container_of((void *)dma
,
284 struct s3c64xx_spi_driver_data
, rx_dma
);
285 config
.direction
= dma
->direction
;
286 config
.src_addr
= sdd
->sfr_start
+ S3C64XX_SPI_RX_DATA
;
287 config
.src_addr_width
= sdd
->cur_bpw
/ 8;
288 config
.src_maxburst
= 1;
289 dmaengine_slave_config(dma
->ch
, &config
);
291 sdd
= container_of((void *)dma
,
292 struct s3c64xx_spi_driver_data
, tx_dma
);
293 config
.direction
= dma
->direction
;
294 config
.dst_addr
= sdd
->sfr_start
+ S3C64XX_SPI_TX_DATA
;
295 config
.dst_addr_width
= sdd
->cur_bpw
/ 8;
296 config
.dst_maxburst
= 1;
297 dmaengine_slave_config(dma
->ch
, &config
);
300 desc
= dmaengine_prep_slave_sg(dma
->ch
, sgt
->sgl
, sgt
->nents
,
301 dma
->direction
, DMA_PREP_INTERRUPT
);
303 dev_err(&sdd
->pdev
->dev
, "unable to prepare %s scatterlist",
304 dma
->direction
== DMA_DEV_TO_MEM
? "rx" : "tx");
308 desc
->callback
= s3c64xx_spi_dmacb
;
309 desc
->callback_param
= dma
;
311 dma
->cookie
= dmaengine_submit(desc
);
312 ret
= dma_submit_error(dma
->cookie
);
314 dev_err(&sdd
->pdev
->dev
, "DMA submission failed");
318 dma_async_issue_pending(dma
->ch
);
322 static void s3c64xx_spi_set_cs(struct spi_device
*spi
, bool enable
)
324 struct s3c64xx_spi_driver_data
*sdd
=
325 spi_master_get_devdata(spi
->master
);
327 if (sdd
->cntrlr_info
->no_cs
)
331 if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
)) {
332 writel(0, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
334 u32 ssel
= readl(sdd
->regs
+ S3C64XX_SPI_CS_REG
);
336 ssel
|= (S3C64XX_SPI_CS_AUTO
|
337 S3C64XX_SPI_CS_NSC_CNT_2
);
338 writel(ssel
, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
341 if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
))
342 writel(S3C64XX_SPI_CS_SIG_INACT
,
343 sdd
->regs
+ S3C64XX_SPI_CS_REG
);
347 static int s3c64xx_spi_prepare_transfer(struct spi_master
*spi
)
349 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(spi
);
354 spi
->dma_rx
= sdd
->rx_dma
.ch
;
355 spi
->dma_tx
= sdd
->tx_dma
.ch
;
360 static bool s3c64xx_spi_can_dma(struct spi_master
*master
,
361 struct spi_device
*spi
,
362 struct spi_transfer
*xfer
)
364 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
366 return xfer
->len
> (FIFO_LVL_MASK(sdd
) >> 1) + 1;
369 static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data
*sdd
,
370 struct spi_transfer
*xfer
, int dma_mode
)
372 void __iomem
*regs
= sdd
->regs
;
376 modecfg
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
377 modecfg
&= ~(S3C64XX_SPI_MODE_TXDMA_ON
| S3C64XX_SPI_MODE_RXDMA_ON
);
379 chcfg
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
380 chcfg
&= ~S3C64XX_SPI_CH_TXCH_ON
;
383 chcfg
&= ~S3C64XX_SPI_CH_RXCH_ON
;
385 /* Always shift in data in FIFO, even if xfer is Tx only,
386 * this helps setting PCKT_CNT value for generating clocks
389 chcfg
|= S3C64XX_SPI_CH_RXCH_ON
;
390 writel(((xfer
->len
* 8 / sdd
->cur_bpw
) & 0xffff)
391 | S3C64XX_SPI_PACKET_CNT_EN
,
392 regs
+ S3C64XX_SPI_PACKET_CNT
);
395 if (xfer
->tx_buf
!= NULL
) {
396 sdd
->state
|= TXBUSY
;
397 chcfg
|= S3C64XX_SPI_CH_TXCH_ON
;
399 modecfg
|= S3C64XX_SPI_MODE_TXDMA_ON
;
400 ret
= prepare_dma(&sdd
->tx_dma
, &xfer
->tx_sg
);
402 switch (sdd
->cur_bpw
) {
404 iowrite32_rep(regs
+ S3C64XX_SPI_TX_DATA
,
405 xfer
->tx_buf
, xfer
->len
/ 4);
408 iowrite16_rep(regs
+ S3C64XX_SPI_TX_DATA
,
409 xfer
->tx_buf
, xfer
->len
/ 2);
412 iowrite8_rep(regs
+ S3C64XX_SPI_TX_DATA
,
413 xfer
->tx_buf
, xfer
->len
);
419 if (xfer
->rx_buf
!= NULL
) {
420 sdd
->state
|= RXBUSY
;
422 if (sdd
->port_conf
->high_speed
&& sdd
->cur_speed
>= 30000000UL
423 && !(sdd
->cur_mode
& SPI_CPHA
))
424 chcfg
|= S3C64XX_SPI_CH_HS_EN
;
427 modecfg
|= S3C64XX_SPI_MODE_RXDMA_ON
;
428 chcfg
|= S3C64XX_SPI_CH_RXCH_ON
;
429 writel(((xfer
->len
* 8 / sdd
->cur_bpw
) & 0xffff)
430 | S3C64XX_SPI_PACKET_CNT_EN
,
431 regs
+ S3C64XX_SPI_PACKET_CNT
);
432 ret
= prepare_dma(&sdd
->rx_dma
, &xfer
->rx_sg
);
439 writel(modecfg
, regs
+ S3C64XX_SPI_MODE_CFG
);
440 writel(chcfg
, regs
+ S3C64XX_SPI_CH_CFG
);
445 static u32
s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data
*sdd
,
448 void __iomem
*regs
= sdd
->regs
;
449 unsigned long val
= 1;
452 /* max fifo depth available */
453 u32 max_fifo
= (FIFO_LVL_MASK(sdd
) >> 1) + 1;
456 val
= msecs_to_loops(timeout_ms
);
459 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
460 } while (RX_FIFO_LVL(status
, sdd
) < max_fifo
&& --val
);
462 /* return the actual received data length */
463 return RX_FIFO_LVL(status
, sdd
);
466 static int s3c64xx_wait_for_dma(struct s3c64xx_spi_driver_data
*sdd
,
467 struct spi_transfer
*xfer
)
469 void __iomem
*regs
= sdd
->regs
;
474 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
475 ms
= xfer
->len
* 8 * 1000 / sdd
->cur_speed
;
476 ms
+= 30; /* some tolerance */
477 ms
= max(ms
, 100); /* minimum timeout */
479 val
= msecs_to_jiffies(ms
) + 10;
480 val
= wait_for_completion_timeout(&sdd
->xfer_completion
, val
);
483 * If the previous xfer was completed within timeout, then
484 * proceed further else return -EIO.
485 * DmaTx returns after simply writing data in the FIFO,
486 * w/o waiting for real transmission on the bus to finish.
487 * DmaRx returns only after Dma read data from FIFO which
488 * needs bus transmission to finish, so we don't worry if
489 * Xfer involved Rx(with or without Tx).
491 if (val
&& !xfer
->rx_buf
) {
492 val
= msecs_to_loops(10);
493 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
494 while ((TX_FIFO_LVL(status
, sdd
)
495 || !S3C64XX_SPI_ST_TX_DONE(status
, sdd
))
498 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
503 /* If timed out while checking rx/tx status return error */
510 static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data
*sdd
,
511 struct spi_transfer
*xfer
)
513 void __iomem
*regs
= sdd
->regs
;
521 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
522 ms
= xfer
->len
* 8 * 1000 / sdd
->cur_speed
;
523 ms
+= 10; /* some tolerance */
525 val
= msecs_to_loops(ms
);
527 status
= readl(regs
+ S3C64XX_SPI_STATUS
);
528 } while (RX_FIFO_LVL(status
, sdd
) < xfer
->len
&& --val
);
533 /* If it was only Tx */
535 sdd
->state
&= ~TXBUSY
;
540 * If the receive length is bigger than the controller fifo
541 * size, calculate the loops and read the fifo as many times.
542 * loops = length / max fifo size (calculated by using the
544 * For any size less than the fifo size the below code is
545 * executed atleast once.
547 loops
= xfer
->len
/ ((FIFO_LVL_MASK(sdd
) >> 1) + 1);
550 /* wait for data to be received in the fifo */
551 cpy_len
= s3c64xx_spi_wait_for_timeout(sdd
,
554 switch (sdd
->cur_bpw
) {
556 ioread32_rep(regs
+ S3C64XX_SPI_RX_DATA
,
560 ioread16_rep(regs
+ S3C64XX_SPI_RX_DATA
,
564 ioread8_rep(regs
+ S3C64XX_SPI_RX_DATA
,
571 sdd
->state
&= ~RXBUSY
;
576 static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data
*sdd
)
578 void __iomem
*regs
= sdd
->regs
;
583 if (!sdd
->port_conf
->clk_from_cmu
) {
584 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
585 val
&= ~S3C64XX_SPI_ENCLK_ENABLE
;
586 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
589 /* Set Polarity and Phase */
590 val
= readl(regs
+ S3C64XX_SPI_CH_CFG
);
591 val
&= ~(S3C64XX_SPI_CH_SLAVE
|
595 if (sdd
->cur_mode
& SPI_CPOL
)
596 val
|= S3C64XX_SPI_CPOL_L
;
598 if (sdd
->cur_mode
& SPI_CPHA
)
599 val
|= S3C64XX_SPI_CPHA_B
;
601 writel(val
, regs
+ S3C64XX_SPI_CH_CFG
);
603 /* Set Channel & DMA Mode */
604 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
605 val
&= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
606 | S3C64XX_SPI_MODE_CH_TSZ_MASK
);
608 switch (sdd
->cur_bpw
) {
610 val
|= S3C64XX_SPI_MODE_BUS_TSZ_WORD
;
611 val
|= S3C64XX_SPI_MODE_CH_TSZ_WORD
;
614 val
|= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD
;
615 val
|= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD
;
618 val
|= S3C64XX_SPI_MODE_BUS_TSZ_BYTE
;
619 val
|= S3C64XX_SPI_MODE_CH_TSZ_BYTE
;
623 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
625 if (sdd
->port_conf
->clk_from_cmu
) {
626 /* The src_clk clock is divided internally by 2 */
627 ret
= clk_set_rate(sdd
->src_clk
, sdd
->cur_speed
* 2);
630 sdd
->cur_speed
= clk_get_rate(sdd
->src_clk
) / 2;
632 /* Configure Clock */
633 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
634 val
&= ~S3C64XX_SPI_PSR_MASK
;
635 val
|= ((clk_get_rate(sdd
->src_clk
) / sdd
->cur_speed
/ 2 - 1)
636 & S3C64XX_SPI_PSR_MASK
);
637 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
640 val
= readl(regs
+ S3C64XX_SPI_CLK_CFG
);
641 val
|= S3C64XX_SPI_ENCLK_ENABLE
;
642 writel(val
, regs
+ S3C64XX_SPI_CLK_CFG
);
648 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
650 static int s3c64xx_spi_prepare_message(struct spi_master
*master
,
651 struct spi_message
*msg
)
653 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
654 struct spi_device
*spi
= msg
->spi
;
655 struct s3c64xx_spi_csinfo
*cs
= spi
->controller_data
;
657 /* Configure feedback delay */
658 writel(cs
->fb_delay
& 0x3, sdd
->regs
+ S3C64XX_SPI_FB_CLK
);
663 static int s3c64xx_spi_transfer_one(struct spi_master
*master
,
664 struct spi_device
*spi
,
665 struct spi_transfer
*xfer
)
667 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
668 const unsigned int fifo_len
= (FIFO_LVL_MASK(sdd
) >> 1) + 1;
669 const void *tx_buf
= NULL
;
671 int target_len
= 0, origin_len
= 0;
678 reinit_completion(&sdd
->xfer_completion
);
680 /* Only BPW and Speed may change across transfers */
681 bpw
= xfer
->bits_per_word
;
682 speed
= xfer
->speed_hz
;
684 if (bpw
!= sdd
->cur_bpw
|| speed
!= sdd
->cur_speed
) {
686 sdd
->cur_speed
= speed
;
687 sdd
->cur_mode
= spi
->mode
;
688 status
= s3c64xx_spi_config(sdd
);
693 if (!is_polling(sdd
) && (xfer
->len
> fifo_len
) &&
694 sdd
->rx_dma
.ch
&& sdd
->tx_dma
.ch
) {
697 } else if (is_polling(sdd
) && xfer
->len
> fifo_len
) {
698 tx_buf
= xfer
->tx_buf
;
699 rx_buf
= xfer
->rx_buf
;
700 origin_len
= xfer
->len
;
702 target_len
= xfer
->len
;
703 if (xfer
->len
> fifo_len
)
704 xfer
->len
= fifo_len
;
708 spin_lock_irqsave(&sdd
->lock
, flags
);
710 /* Pending only which is to be done */
711 sdd
->state
&= ~RXBUSY
;
712 sdd
->state
&= ~TXBUSY
;
714 /* Start the signals */
715 s3c64xx_spi_set_cs(spi
, true);
717 status
= s3c64xx_enable_datapath(sdd
, xfer
, use_dma
);
719 spin_unlock_irqrestore(&sdd
->lock
, flags
);
722 dev_err(&spi
->dev
, "failed to enable data path for transfer: %d\n", status
);
727 status
= s3c64xx_wait_for_dma(sdd
, xfer
);
729 status
= s3c64xx_wait_for_pio(sdd
, xfer
);
733 "I/O Error: rx-%d tx-%d rx-%c tx-%c len-%d dma-%d res-(%d)\n",
734 xfer
->rx_buf
? 1 : 0, xfer
->tx_buf
? 1 : 0,
735 (sdd
->state
& RXBUSY
) ? 'f' : 'p',
736 (sdd
->state
& TXBUSY
) ? 'f' : 'p',
737 xfer
->len
, use_dma
? 1 : 0, status
);
740 struct dma_tx_state s
;
742 if (xfer
->tx_buf
&& (sdd
->state
& TXBUSY
)) {
743 dmaengine_pause(sdd
->tx_dma
.ch
);
744 dmaengine_tx_status(sdd
->tx_dma
.ch
, sdd
->tx_dma
.cookie
, &s
);
745 dmaengine_terminate_all(sdd
->tx_dma
.ch
);
746 dev_err(&spi
->dev
, "TX residue: %d\n", s
.residue
);
749 if (xfer
->rx_buf
&& (sdd
->state
& RXBUSY
)) {
750 dmaengine_pause(sdd
->rx_dma
.ch
);
751 dmaengine_tx_status(sdd
->rx_dma
.ch
, sdd
->rx_dma
.cookie
, &s
);
752 dmaengine_terminate_all(sdd
->rx_dma
.ch
);
753 dev_err(&spi
->dev
, "RX residue: %d\n", s
.residue
);
757 s3c64xx_flush_fifo(sdd
);
759 if (target_len
> 0) {
760 target_len
-= xfer
->len
;
763 xfer
->tx_buf
+= xfer
->len
;
766 xfer
->rx_buf
+= xfer
->len
;
768 if (target_len
> fifo_len
)
769 xfer
->len
= fifo_len
;
771 xfer
->len
= target_len
;
773 } while (target_len
> 0);
776 /* Restore original xfer buffers and length */
777 xfer
->tx_buf
= tx_buf
;
778 xfer
->rx_buf
= rx_buf
;
779 xfer
->len
= origin_len
;
785 static struct s3c64xx_spi_csinfo
*s3c64xx_get_slave_ctrldata(
786 struct spi_device
*spi
)
788 struct s3c64xx_spi_csinfo
*cs
;
789 struct device_node
*slave_np
, *data_np
= NULL
;
792 slave_np
= spi
->dev
.of_node
;
794 dev_err(&spi
->dev
, "device node not found\n");
795 return ERR_PTR(-EINVAL
);
798 data_np
= of_get_child_by_name(slave_np
, "controller-data");
800 dev_err(&spi
->dev
, "child node 'controller-data' not found\n");
801 return ERR_PTR(-EINVAL
);
804 cs
= kzalloc(sizeof(*cs
), GFP_KERNEL
);
806 of_node_put(data_np
);
807 return ERR_PTR(-ENOMEM
);
810 of_property_read_u32(data_np
, "samsung,spi-feedback-delay", &fb_delay
);
811 cs
->fb_delay
= fb_delay
;
812 of_node_put(data_np
);
817 * Here we only check the validity of requested configuration
818 * and save the configuration in a local data-structure.
819 * The controller is actually configured only just before we
820 * get a message to transfer.
822 static int s3c64xx_spi_setup(struct spi_device
*spi
)
824 struct s3c64xx_spi_csinfo
*cs
= spi
->controller_data
;
825 struct s3c64xx_spi_driver_data
*sdd
;
828 sdd
= spi_master_get_devdata(spi
->master
);
829 if (spi
->dev
.of_node
) {
830 cs
= s3c64xx_get_slave_ctrldata(spi
);
831 spi
->controller_data
= cs
;
833 /* On non-DT platforms the SPI core will set spi->cs_gpio
834 * to -ENOENT. The GPIO pin used to drive the chip select
835 * is defined by using platform data so spi->cs_gpio value
836 * has to be override to have the proper GPIO pin number.
838 spi
->cs_gpio
= cs
->line
;
841 if (IS_ERR_OR_NULL(cs
)) {
842 dev_err(&spi
->dev
, "No CS for SPI(%d)\n", spi
->chip_select
);
846 if (!spi_get_ctldata(spi
)) {
847 if (gpio_is_valid(spi
->cs_gpio
)) {
848 err
= gpio_request_one(spi
->cs_gpio
, GPIOF_OUT_INIT_HIGH
,
849 dev_name(&spi
->dev
));
852 "Failed to get /CS gpio [%d]: %d\n",
858 spi_set_ctldata(spi
, cs
);
861 pm_runtime_get_sync(&sdd
->pdev
->dev
);
863 /* Check if we can provide the requested rate */
864 if (!sdd
->port_conf
->clk_from_cmu
) {
868 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (0 + 1);
870 if (spi
->max_speed_hz
> speed
)
871 spi
->max_speed_hz
= speed
;
873 psr
= clk_get_rate(sdd
->src_clk
) / 2 / spi
->max_speed_hz
- 1;
874 psr
&= S3C64XX_SPI_PSR_MASK
;
875 if (psr
== S3C64XX_SPI_PSR_MASK
)
878 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (psr
+ 1);
879 if (spi
->max_speed_hz
< speed
) {
880 if (psr
+1 < S3C64XX_SPI_PSR_MASK
) {
888 speed
= clk_get_rate(sdd
->src_clk
) / 2 / (psr
+ 1);
889 if (spi
->max_speed_hz
>= speed
) {
890 spi
->max_speed_hz
= speed
;
892 dev_err(&spi
->dev
, "Can't set %dHz transfer speed\n",
899 pm_runtime_mark_last_busy(&sdd
->pdev
->dev
);
900 pm_runtime_put_autosuspend(&sdd
->pdev
->dev
);
901 s3c64xx_spi_set_cs(spi
, false);
906 pm_runtime_mark_last_busy(&sdd
->pdev
->dev
);
907 pm_runtime_put_autosuspend(&sdd
->pdev
->dev
);
908 /* setup() returns with device de-selected */
909 s3c64xx_spi_set_cs(spi
, false);
911 if (gpio_is_valid(spi
->cs_gpio
))
912 gpio_free(spi
->cs_gpio
);
913 spi_set_ctldata(spi
, NULL
);
916 if (spi
->dev
.of_node
)
922 static void s3c64xx_spi_cleanup(struct spi_device
*spi
)
924 struct s3c64xx_spi_csinfo
*cs
= spi_get_ctldata(spi
);
926 if (gpio_is_valid(spi
->cs_gpio
)) {
927 gpio_free(spi
->cs_gpio
);
928 if (spi
->dev
.of_node
)
931 /* On non-DT platforms, the SPI core sets
932 * spi->cs_gpio to -ENOENT and .setup()
933 * overrides it with the GPIO pin value
934 * passed using platform data.
936 spi
->cs_gpio
= -ENOENT
;
940 spi_set_ctldata(spi
, NULL
);
943 static irqreturn_t
s3c64xx_spi_irq(int irq
, void *data
)
945 struct s3c64xx_spi_driver_data
*sdd
= data
;
946 struct spi_master
*spi
= sdd
->master
;
947 unsigned int val
, clr
= 0;
949 val
= readl(sdd
->regs
+ S3C64XX_SPI_STATUS
);
951 if (val
& S3C64XX_SPI_ST_RX_OVERRUN_ERR
) {
952 clr
= S3C64XX_SPI_PND_RX_OVERRUN_CLR
;
953 dev_err(&spi
->dev
, "RX overrun\n");
955 if (val
& S3C64XX_SPI_ST_RX_UNDERRUN_ERR
) {
956 clr
|= S3C64XX_SPI_PND_RX_UNDERRUN_CLR
;
957 dev_err(&spi
->dev
, "RX underrun\n");
959 if (val
& S3C64XX_SPI_ST_TX_OVERRUN_ERR
) {
960 clr
|= S3C64XX_SPI_PND_TX_OVERRUN_CLR
;
961 dev_err(&spi
->dev
, "TX overrun\n");
963 if (val
& S3C64XX_SPI_ST_TX_UNDERRUN_ERR
) {
964 clr
|= S3C64XX_SPI_PND_TX_UNDERRUN_CLR
;
965 dev_err(&spi
->dev
, "TX underrun\n");
968 /* Clear the pending irq by setting and then clearing it */
969 writel(clr
, sdd
->regs
+ S3C64XX_SPI_PENDING_CLR
);
970 writel(0, sdd
->regs
+ S3C64XX_SPI_PENDING_CLR
);
975 static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data
*sdd
)
977 struct s3c64xx_spi_info
*sci
= sdd
->cntrlr_info
;
978 void __iomem
*regs
= sdd
->regs
;
984 writel(0, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
985 else if (!(sdd
->port_conf
->quirks
& S3C64XX_SPI_QUIRK_CS_AUTO
))
986 writel(S3C64XX_SPI_CS_SIG_INACT
, sdd
->regs
+ S3C64XX_SPI_CS_REG
);
988 /* Disable Interrupts - we use Polling if not DMA mode */
989 writel(0, regs
+ S3C64XX_SPI_INT_EN
);
991 if (!sdd
->port_conf
->clk_from_cmu
)
992 writel(sci
->src_clk_nr
<< S3C64XX_SPI_CLKSEL_SRCSHFT
,
993 regs
+ S3C64XX_SPI_CLK_CFG
);
994 writel(0, regs
+ S3C64XX_SPI_MODE_CFG
);
995 writel(0, regs
+ S3C64XX_SPI_PACKET_CNT
);
997 /* Clear any irq pending bits, should set and clear the bits */
998 val
= S3C64XX_SPI_PND_RX_OVERRUN_CLR
|
999 S3C64XX_SPI_PND_RX_UNDERRUN_CLR
|
1000 S3C64XX_SPI_PND_TX_OVERRUN_CLR
|
1001 S3C64XX_SPI_PND_TX_UNDERRUN_CLR
;
1002 writel(val
, regs
+ S3C64XX_SPI_PENDING_CLR
);
1003 writel(0, regs
+ S3C64XX_SPI_PENDING_CLR
);
1005 writel(0, regs
+ S3C64XX_SPI_SWAP_CFG
);
1007 val
= readl(regs
+ S3C64XX_SPI_MODE_CFG
);
1008 val
&= ~S3C64XX_SPI_MODE_4BURST
;
1009 val
&= ~(S3C64XX_SPI_MAX_TRAILCNT
<< S3C64XX_SPI_TRAILCNT_OFF
);
1010 val
|= (S3C64XX_SPI_TRAILCNT
<< S3C64XX_SPI_TRAILCNT_OFF
);
1011 writel(val
, regs
+ S3C64XX_SPI_MODE_CFG
);
1013 s3c64xx_flush_fifo(sdd
);
1017 static struct s3c64xx_spi_info
*s3c64xx_spi_parse_dt(struct device
*dev
)
1019 struct s3c64xx_spi_info
*sci
;
1022 sci
= devm_kzalloc(dev
, sizeof(*sci
), GFP_KERNEL
);
1024 return ERR_PTR(-ENOMEM
);
1026 if (of_property_read_u32(dev
->of_node
, "samsung,spi-src-clk", &temp
)) {
1027 dev_warn(dev
, "spi bus clock parent not specified, using clock at index 0 as parent\n");
1028 sci
->src_clk_nr
= 0;
1030 sci
->src_clk_nr
= temp
;
1033 if (of_property_read_u32(dev
->of_node
, "num-cs", &temp
)) {
1034 dev_warn(dev
, "number of chip select lines not specified, assuming 1 chip select line\n");
1040 sci
->no_cs
= of_property_read_bool(dev
->of_node
, "no-cs-readback");
1045 static struct s3c64xx_spi_info
*s3c64xx_spi_parse_dt(struct device
*dev
)
1047 return dev_get_platdata(dev
);
1051 static const struct of_device_id s3c64xx_spi_dt_match
[];
1053 static inline struct s3c64xx_spi_port_config
*s3c64xx_spi_get_port_config(
1054 struct platform_device
*pdev
)
1057 if (pdev
->dev
.of_node
) {
1058 const struct of_device_id
*match
;
1059 match
= of_match_node(s3c64xx_spi_dt_match
, pdev
->dev
.of_node
);
1060 return (struct s3c64xx_spi_port_config
*)match
->data
;
1063 return (struct s3c64xx_spi_port_config
*)
1064 platform_get_device_id(pdev
)->driver_data
;
1067 static int s3c64xx_spi_probe(struct platform_device
*pdev
)
1069 struct resource
*mem_res
;
1070 struct s3c64xx_spi_driver_data
*sdd
;
1071 struct s3c64xx_spi_info
*sci
= dev_get_platdata(&pdev
->dev
);
1072 struct spi_master
*master
;
1076 if (!sci
&& pdev
->dev
.of_node
) {
1077 sci
= s3c64xx_spi_parse_dt(&pdev
->dev
);
1079 return PTR_ERR(sci
);
1083 dev_err(&pdev
->dev
, "platform_data missing!\n");
1087 mem_res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1088 if (mem_res
== NULL
) {
1089 dev_err(&pdev
->dev
, "Unable to get SPI MEM resource\n");
1093 irq
= platform_get_irq(pdev
, 0);
1095 dev_warn(&pdev
->dev
, "Failed to get IRQ: %d\n", irq
);
1099 master
= spi_alloc_master(&pdev
->dev
,
1100 sizeof(struct s3c64xx_spi_driver_data
));
1101 if (master
== NULL
) {
1102 dev_err(&pdev
->dev
, "Unable to allocate SPI Master\n");
1106 platform_set_drvdata(pdev
, master
);
1108 sdd
= spi_master_get_devdata(master
);
1109 sdd
->port_conf
= s3c64xx_spi_get_port_config(pdev
);
1110 sdd
->master
= master
;
1111 sdd
->cntrlr_info
= sci
;
1113 sdd
->sfr_start
= mem_res
->start
;
1114 if (pdev
->dev
.of_node
) {
1115 ret
= of_alias_get_id(pdev
->dev
.of_node
, "spi");
1117 dev_err(&pdev
->dev
, "failed to get alias id, errno %d\n",
1119 goto err_deref_master
;
1123 sdd
->port_id
= pdev
->id
;
1128 sdd
->tx_dma
.direction
= DMA_MEM_TO_DEV
;
1129 sdd
->rx_dma
.direction
= DMA_DEV_TO_MEM
;
1131 master
->dev
.of_node
= pdev
->dev
.of_node
;
1132 master
->bus_num
= sdd
->port_id
;
1133 master
->setup
= s3c64xx_spi_setup
;
1134 master
->cleanup
= s3c64xx_spi_cleanup
;
1135 master
->prepare_transfer_hardware
= s3c64xx_spi_prepare_transfer
;
1136 master
->prepare_message
= s3c64xx_spi_prepare_message
;
1137 master
->transfer_one
= s3c64xx_spi_transfer_one
;
1138 master
->num_chipselect
= sci
->num_cs
;
1139 master
->dma_alignment
= 8;
1140 master
->bits_per_word_mask
= SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
1142 /* the spi->mode bits understood by this driver: */
1143 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
;
1144 master
->auto_runtime_pm
= true;
1145 if (!is_polling(sdd
))
1146 master
->can_dma
= s3c64xx_spi_can_dma
;
1148 sdd
->regs
= devm_ioremap_resource(&pdev
->dev
, mem_res
);
1149 if (IS_ERR(sdd
->regs
)) {
1150 ret
= PTR_ERR(sdd
->regs
);
1151 goto err_deref_master
;
1154 if (sci
->cfg_gpio
&& sci
->cfg_gpio()) {
1155 dev_err(&pdev
->dev
, "Unable to config gpio\n");
1157 goto err_deref_master
;
1161 sdd
->clk
= devm_clk_get(&pdev
->dev
, "spi");
1162 if (IS_ERR(sdd
->clk
)) {
1163 dev_err(&pdev
->dev
, "Unable to acquire clock 'spi'\n");
1164 ret
= PTR_ERR(sdd
->clk
);
1165 goto err_deref_master
;
1168 ret
= clk_prepare_enable(sdd
->clk
);
1170 dev_err(&pdev
->dev
, "Couldn't enable clock 'spi'\n");
1171 goto err_deref_master
;
1174 sprintf(clk_name
, "spi_busclk%d", sci
->src_clk_nr
);
1175 sdd
->src_clk
= devm_clk_get(&pdev
->dev
, clk_name
);
1176 if (IS_ERR(sdd
->src_clk
)) {
1178 "Unable to acquire clock '%s'\n", clk_name
);
1179 ret
= PTR_ERR(sdd
->src_clk
);
1180 goto err_disable_clk
;
1183 ret
= clk_prepare_enable(sdd
->src_clk
);
1185 dev_err(&pdev
->dev
, "Couldn't enable clock '%s'\n", clk_name
);
1186 goto err_disable_clk
;
1189 if (sdd
->port_conf
->clk_ioclk
) {
1190 sdd
->ioclk
= devm_clk_get(&pdev
->dev
, "spi_ioclk");
1191 if (IS_ERR(sdd
->ioclk
)) {
1192 dev_err(&pdev
->dev
, "Unable to acquire 'ioclk'\n");
1193 ret
= PTR_ERR(sdd
->ioclk
);
1194 goto err_disable_src_clk
;
1197 ret
= clk_prepare_enable(sdd
->ioclk
);
1199 dev_err(&pdev
->dev
, "Couldn't enable clock 'ioclk'\n");
1200 goto err_disable_src_clk
;
1204 if (!is_polling(sdd
)) {
1205 /* Acquire DMA channels */
1206 sdd
->rx_dma
.ch
= dma_request_chan(&pdev
->dev
, "rx");
1207 if (IS_ERR(sdd
->rx_dma
.ch
)) {
1208 dev_err(&pdev
->dev
, "Failed to get RX DMA channel\n");
1209 ret
= PTR_ERR(sdd
->rx_dma
.ch
);
1210 goto err_disable_io_clk
;
1212 sdd
->tx_dma
.ch
= dma_request_chan(&pdev
->dev
, "tx");
1213 if (IS_ERR(sdd
->tx_dma
.ch
)) {
1214 dev_err(&pdev
->dev
, "Failed to get TX DMA channel\n");
1215 ret
= PTR_ERR(sdd
->tx_dma
.ch
);
1216 goto err_release_rx_dma
;
1220 pm_runtime_set_autosuspend_delay(&pdev
->dev
, AUTOSUSPEND_TIMEOUT
);
1221 pm_runtime_use_autosuspend(&pdev
->dev
);
1222 pm_runtime_set_active(&pdev
->dev
);
1223 pm_runtime_enable(&pdev
->dev
);
1224 pm_runtime_get_sync(&pdev
->dev
);
1226 /* Setup Deufult Mode */
1227 s3c64xx_spi_hwinit(sdd
);
1229 spin_lock_init(&sdd
->lock
);
1230 init_completion(&sdd
->xfer_completion
);
1232 ret
= devm_request_irq(&pdev
->dev
, irq
, s3c64xx_spi_irq
, 0,
1233 "spi-s3c64xx", sdd
);
1235 dev_err(&pdev
->dev
, "Failed to request IRQ %d: %d\n",
1240 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN
| S3C64XX_SPI_INT_RX_UNDERRUN_EN
|
1241 S3C64XX_SPI_INT_TX_OVERRUN_EN
| S3C64XX_SPI_INT_TX_UNDERRUN_EN
,
1242 sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1244 ret
= devm_spi_register_master(&pdev
->dev
, master
);
1246 dev_err(&pdev
->dev
, "cannot register SPI master: %d\n", ret
);
1250 dev_dbg(&pdev
->dev
, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
1251 sdd
->port_id
, master
->num_chipselect
);
1252 dev_dbg(&pdev
->dev
, "\tIOmem=[%pR]\tFIFO %dbytes\n",
1253 mem_res
, (FIFO_LVL_MASK(sdd
) >> 1) + 1);
1255 pm_runtime_mark_last_busy(&pdev
->dev
);
1256 pm_runtime_put_autosuspend(&pdev
->dev
);
1261 pm_runtime_put_noidle(&pdev
->dev
);
1262 pm_runtime_disable(&pdev
->dev
);
1263 pm_runtime_set_suspended(&pdev
->dev
);
1265 if (!is_polling(sdd
))
1266 dma_release_channel(sdd
->tx_dma
.ch
);
1268 if (!is_polling(sdd
))
1269 dma_release_channel(sdd
->rx_dma
.ch
);
1271 clk_disable_unprepare(sdd
->ioclk
);
1272 err_disable_src_clk
:
1273 clk_disable_unprepare(sdd
->src_clk
);
1275 clk_disable_unprepare(sdd
->clk
);
1277 spi_master_put(master
);
1282 static int s3c64xx_spi_remove(struct platform_device
*pdev
)
1284 struct spi_master
*master
= platform_get_drvdata(pdev
);
1285 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1287 pm_runtime_get_sync(&pdev
->dev
);
1289 writel(0, sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1291 if (!is_polling(sdd
)) {
1292 dma_release_channel(sdd
->rx_dma
.ch
);
1293 dma_release_channel(sdd
->tx_dma
.ch
);
1296 clk_disable_unprepare(sdd
->ioclk
);
1298 clk_disable_unprepare(sdd
->src_clk
);
1300 clk_disable_unprepare(sdd
->clk
);
1302 pm_runtime_put_noidle(&pdev
->dev
);
1303 pm_runtime_disable(&pdev
->dev
);
1304 pm_runtime_set_suspended(&pdev
->dev
);
1309 #ifdef CONFIG_PM_SLEEP
1310 static int s3c64xx_spi_suspend(struct device
*dev
)
1312 struct spi_master
*master
= dev_get_drvdata(dev
);
1313 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1315 int ret
= spi_master_suspend(master
);
1319 ret
= pm_runtime_force_suspend(dev
);
1323 sdd
->cur_speed
= 0; /* Output Clock is stopped */
1328 static int s3c64xx_spi_resume(struct device
*dev
)
1330 struct spi_master
*master
= dev_get_drvdata(dev
);
1331 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1332 struct s3c64xx_spi_info
*sci
= sdd
->cntrlr_info
;
1338 ret
= pm_runtime_force_resume(dev
);
1342 return spi_master_resume(master
);
1344 #endif /* CONFIG_PM_SLEEP */
1347 static int s3c64xx_spi_runtime_suspend(struct device
*dev
)
1349 struct spi_master
*master
= dev_get_drvdata(dev
);
1350 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1352 clk_disable_unprepare(sdd
->clk
);
1353 clk_disable_unprepare(sdd
->src_clk
);
1354 clk_disable_unprepare(sdd
->ioclk
);
1359 static int s3c64xx_spi_runtime_resume(struct device
*dev
)
1361 struct spi_master
*master
= dev_get_drvdata(dev
);
1362 struct s3c64xx_spi_driver_data
*sdd
= spi_master_get_devdata(master
);
1365 if (sdd
->port_conf
->clk_ioclk
) {
1366 ret
= clk_prepare_enable(sdd
->ioclk
);
1371 ret
= clk_prepare_enable(sdd
->src_clk
);
1373 goto err_disable_ioclk
;
1375 ret
= clk_prepare_enable(sdd
->clk
);
1377 goto err_disable_src_clk
;
1379 s3c64xx_spi_hwinit(sdd
);
1381 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN
| S3C64XX_SPI_INT_RX_UNDERRUN_EN
|
1382 S3C64XX_SPI_INT_TX_OVERRUN_EN
| S3C64XX_SPI_INT_TX_UNDERRUN_EN
,
1383 sdd
->regs
+ S3C64XX_SPI_INT_EN
);
1387 err_disable_src_clk
:
1388 clk_disable_unprepare(sdd
->src_clk
);
1390 clk_disable_unprepare(sdd
->ioclk
);
1394 #endif /* CONFIG_PM */
1396 static const struct dev_pm_ops s3c64xx_spi_pm
= {
1397 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend
, s3c64xx_spi_resume
)
1398 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend
,
1399 s3c64xx_spi_runtime_resume
, NULL
)
1402 static struct s3c64xx_spi_port_config s3c2443_spi_port_config
= {
1403 .fifo_lvl_mask
= { 0x7f },
1404 .rx_lvl_offset
= 13,
1409 static struct s3c64xx_spi_port_config s3c6410_spi_port_config
= {
1410 .fifo_lvl_mask
= { 0x7f, 0x7F },
1411 .rx_lvl_offset
= 13,
1415 static struct s3c64xx_spi_port_config s5pv210_spi_port_config
= {
1416 .fifo_lvl_mask
= { 0x1ff, 0x7F },
1417 .rx_lvl_offset
= 15,
1422 static struct s3c64xx_spi_port_config exynos4_spi_port_config
= {
1423 .fifo_lvl_mask
= { 0x1ff, 0x7F, 0x7F },
1424 .rx_lvl_offset
= 15,
1427 .clk_from_cmu
= true,
1428 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1431 static struct s3c64xx_spi_port_config exynos7_spi_port_config
= {
1432 .fifo_lvl_mask
= { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
1433 .rx_lvl_offset
= 15,
1436 .clk_from_cmu
= true,
1437 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1440 static struct s3c64xx_spi_port_config exynos5433_spi_port_config
= {
1441 .fifo_lvl_mask
= { 0x1ff, 0x7f, 0x7f, 0x7f, 0x7f, 0x1ff},
1442 .rx_lvl_offset
= 15,
1445 .clk_from_cmu
= true,
1447 .quirks
= S3C64XX_SPI_QUIRK_CS_AUTO
,
1450 static const struct platform_device_id s3c64xx_spi_driver_ids
[] = {
1452 .name
= "s3c2443-spi",
1453 .driver_data
= (kernel_ulong_t
)&s3c2443_spi_port_config
,
1455 .name
= "s3c6410-spi",
1456 .driver_data
= (kernel_ulong_t
)&s3c6410_spi_port_config
,
1461 static const struct of_device_id s3c64xx_spi_dt_match
[] = {
1462 { .compatible
= "samsung,s3c2443-spi",
1463 .data
= (void *)&s3c2443_spi_port_config
,
1465 { .compatible
= "samsung,s3c6410-spi",
1466 .data
= (void *)&s3c6410_spi_port_config
,
1468 { .compatible
= "samsung,s5pv210-spi",
1469 .data
= (void *)&s5pv210_spi_port_config
,
1471 { .compatible
= "samsung,exynos4210-spi",
1472 .data
= (void *)&exynos4_spi_port_config
,
1474 { .compatible
= "samsung,exynos7-spi",
1475 .data
= (void *)&exynos7_spi_port_config
,
1477 { .compatible
= "samsung,exynos5433-spi",
1478 .data
= (void *)&exynos5433_spi_port_config
,
1482 MODULE_DEVICE_TABLE(of
, s3c64xx_spi_dt_match
);
1484 static struct platform_driver s3c64xx_spi_driver
= {
1486 .name
= "s3c64xx-spi",
1487 .pm
= &s3c64xx_spi_pm
,
1488 .of_match_table
= of_match_ptr(s3c64xx_spi_dt_match
),
1490 .probe
= s3c64xx_spi_probe
,
1491 .remove
= s3c64xx_spi_remove
,
1492 .id_table
= s3c64xx_spi_driver_ids
,
1494 MODULE_ALIAS("platform:s3c64xx-spi");
1496 module_platform_driver(s3c64xx_spi_driver
);
1498 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1499 MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1500 MODULE_LICENSE("GPL");