1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 // Copyright (C) 2008 Juergen Beisert
6 #include <linux/completion.h>
7 #include <linux/delay.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi_bitbang.h>
22 #include <linux/types.h>
24 #include <linux/of_device.h>
25 #include <linux/property.h>
27 #include <linux/platform_data/dma-imx.h>
29 #define DRIVER_NAME "spi_imx"
31 static bool use_dma
= true;
32 module_param(use_dma
, bool, 0644);
33 MODULE_PARM_DESC(use_dma
, "Enable usage of DMA when available (default)");
35 #define MXC_RPM_TIMEOUT 2000 /* 2000ms */
37 #define MXC_CSPIRXDATA 0x00
38 #define MXC_CSPITXDATA 0x04
39 #define MXC_CSPICTRL 0x08
40 #define MXC_CSPIINT 0x0c
41 #define MXC_RESET 0x1c
43 /* generic defines to abstract from the different register layouts */
44 #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
45 #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
46 #define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
48 /* The maximum bytes that a sdma BD can transfer. */
49 #define MAX_SDMA_BD_BYTES (1 << 15)
50 #define MX51_ECSPI_CTRL_MAX_BURST 512
51 /* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
52 #define MX53_MAX_TRANSFER_BYTES 512
54 enum spi_imx_devtype
{
59 IMX35_CSPI
, /* CSPI on all i.mx except above */
60 IMX51_ECSPI
, /* ECSPI on i.mx51 */
61 IMX53_ECSPI
, /* ECSPI on i.mx53 and later */
66 struct spi_imx_devtype_data
{
67 void (*intctrl
)(struct spi_imx_data
*, int);
68 int (*prepare_message
)(struct spi_imx_data
*, struct spi_message
*);
69 int (*prepare_transfer
)(struct spi_imx_data
*, struct spi_device
*,
70 struct spi_transfer
*);
71 void (*trigger
)(struct spi_imx_data
*);
72 int (*rx_available
)(struct spi_imx_data
*);
73 void (*reset
)(struct spi_imx_data
*);
74 void (*setup_wml
)(struct spi_imx_data
*);
75 void (*disable
)(struct spi_imx_data
*);
76 void (*disable_dma
)(struct spi_imx_data
*);
79 unsigned int fifo_size
;
81 enum spi_imx_devtype devtype
;
85 struct spi_bitbang bitbang
;
88 struct completion xfer_done
;
90 unsigned long base_phys
;
94 unsigned long spi_clk
;
95 unsigned int spi_bus_clk
;
97 unsigned int bits_per_word
;
98 unsigned int spi_drctl
;
100 unsigned int count
, remainder
;
101 void (*tx
)(struct spi_imx_data
*);
102 void (*rx
)(struct spi_imx_data
*);
105 unsigned int txfifo
; /* number of words pushed in tx FIFO */
106 unsigned int dynamic_burst
;
111 unsigned int slave_burst
;
116 struct completion dma_rx_completion
;
117 struct completion dma_tx_completion
;
119 const struct spi_imx_devtype_data
*devtype_data
;
122 static inline int is_imx27_cspi(struct spi_imx_data
*d
)
124 return d
->devtype_data
->devtype
== IMX27_CSPI
;
127 static inline int is_imx35_cspi(struct spi_imx_data
*d
)
129 return d
->devtype_data
->devtype
== IMX35_CSPI
;
132 static inline int is_imx51_ecspi(struct spi_imx_data
*d
)
134 return d
->devtype_data
->devtype
== IMX51_ECSPI
;
137 static inline int is_imx53_ecspi(struct spi_imx_data
*d
)
139 return d
->devtype_data
->devtype
== IMX53_ECSPI
;
142 #define MXC_SPI_BUF_RX(type) \
143 static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
145 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
147 if (spi_imx->rx_buf) { \
148 *(type *)spi_imx->rx_buf = val; \
149 spi_imx->rx_buf += sizeof(type); \
152 spi_imx->remainder -= sizeof(type); \
155 #define MXC_SPI_BUF_TX(type) \
156 static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
160 if (spi_imx->tx_buf) { \
161 val = *(type *)spi_imx->tx_buf; \
162 spi_imx->tx_buf += sizeof(type); \
165 spi_imx->count -= sizeof(type); \
167 writel(val, spi_imx->base + MXC_CSPITXDATA); \
177 /* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
178 * (which is currently not the case in this driver)
180 static int mxc_clkdivs
[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
181 256, 384, 512, 768, 1024};
184 static unsigned int spi_imx_clkdiv_1(unsigned int fin
,
185 unsigned int fspi
, unsigned int max
, unsigned int *fres
)
189 for (i
= 2; i
< max
; i
++)
190 if (fspi
* mxc_clkdivs
[i
] >= fin
)
193 *fres
= fin
/ mxc_clkdivs
[i
];
197 /* MX1, MX31, MX35, MX51 CSPI */
198 static unsigned int spi_imx_clkdiv_2(unsigned int fin
,
199 unsigned int fspi
, unsigned int *fres
)
203 for (i
= 0; i
< 7; i
++) {
204 if (fspi
* div
>= fin
)
214 static int spi_imx_bytes_per_word(const int bits_per_word
)
216 if (bits_per_word
<= 8)
218 else if (bits_per_word
<= 16)
224 static bool spi_imx_can_dma(struct spi_master
*master
, struct spi_device
*spi
,
225 struct spi_transfer
*transfer
)
227 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
229 if (!use_dma
|| master
->fallback
)
235 if (spi_imx
->slave_mode
)
238 if (transfer
->len
< spi_imx
->devtype_data
->fifo_size
)
241 spi_imx
->dynamic_burst
= 0;
246 #define MX51_ECSPI_CTRL 0x08
247 #define MX51_ECSPI_CTRL_ENABLE (1 << 0)
248 #define MX51_ECSPI_CTRL_XCH (1 << 2)
249 #define MX51_ECSPI_CTRL_SMC (1 << 3)
250 #define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
251 #define MX51_ECSPI_CTRL_DRCTL(drctl) ((drctl) << 16)
252 #define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
253 #define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
254 #define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
255 #define MX51_ECSPI_CTRL_BL_OFFSET 20
256 #define MX51_ECSPI_CTRL_BL_MASK (0xfff << 20)
258 #define MX51_ECSPI_CONFIG 0x0c
259 #define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
260 #define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
261 #define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
262 #define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
263 #define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
265 #define MX51_ECSPI_INT 0x10
266 #define MX51_ECSPI_INT_TEEN (1 << 0)
267 #define MX51_ECSPI_INT_RREN (1 << 3)
268 #define MX51_ECSPI_INT_RDREN (1 << 4)
270 #define MX51_ECSPI_DMA 0x14
271 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
272 #define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
273 #define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
275 #define MX51_ECSPI_DMA_TEDEN (1 << 7)
276 #define MX51_ECSPI_DMA_RXDEN (1 << 23)
277 #define MX51_ECSPI_DMA_RXTDEN (1 << 31)
279 #define MX51_ECSPI_STAT 0x18
280 #define MX51_ECSPI_STAT_RR (1 << 3)
282 #define MX51_ECSPI_TESTREG 0x20
283 #define MX51_ECSPI_TESTREG_LBC BIT(31)
285 static void spi_imx_buf_rx_swap_u32(struct spi_imx_data
*spi_imx
)
287 unsigned int val
= readl(spi_imx
->base
+ MXC_CSPIRXDATA
);
288 #ifdef __LITTLE_ENDIAN
289 unsigned int bytes_per_word
;
292 if (spi_imx
->rx_buf
) {
293 #ifdef __LITTLE_ENDIAN
294 bytes_per_word
= spi_imx_bytes_per_word(spi_imx
->bits_per_word
);
295 if (bytes_per_word
== 1)
296 val
= cpu_to_be32(val
);
297 else if (bytes_per_word
== 2)
298 val
= (val
<< 16) | (val
>> 16);
300 *(u32
*)spi_imx
->rx_buf
= val
;
301 spi_imx
->rx_buf
+= sizeof(u32
);
304 spi_imx
->remainder
-= sizeof(u32
);
307 static void spi_imx_buf_rx_swap(struct spi_imx_data
*spi_imx
)
312 unaligned
= spi_imx
->remainder
% 4;
315 spi_imx_buf_rx_swap_u32(spi_imx
);
319 if (spi_imx_bytes_per_word(spi_imx
->bits_per_word
) == 2) {
320 spi_imx_buf_rx_u16(spi_imx
);
324 val
= readl(spi_imx
->base
+ MXC_CSPIRXDATA
);
326 while (unaligned
--) {
327 if (spi_imx
->rx_buf
) {
328 *(u8
*)spi_imx
->rx_buf
= (val
>> (8 * unaligned
)) & 0xff;
331 spi_imx
->remainder
--;
335 static void spi_imx_buf_tx_swap_u32(struct spi_imx_data
*spi_imx
)
338 #ifdef __LITTLE_ENDIAN
339 unsigned int bytes_per_word
;
342 if (spi_imx
->tx_buf
) {
343 val
= *(u32
*)spi_imx
->tx_buf
;
344 spi_imx
->tx_buf
+= sizeof(u32
);
347 spi_imx
->count
-= sizeof(u32
);
348 #ifdef __LITTLE_ENDIAN
349 bytes_per_word
= spi_imx_bytes_per_word(spi_imx
->bits_per_word
);
351 if (bytes_per_word
== 1)
352 val
= cpu_to_be32(val
);
353 else if (bytes_per_word
== 2)
354 val
= (val
<< 16) | (val
>> 16);
356 writel(val
, spi_imx
->base
+ MXC_CSPITXDATA
);
359 static void spi_imx_buf_tx_swap(struct spi_imx_data
*spi_imx
)
364 unaligned
= spi_imx
->count
% 4;
367 spi_imx_buf_tx_swap_u32(spi_imx
);
371 if (spi_imx_bytes_per_word(spi_imx
->bits_per_word
) == 2) {
372 spi_imx_buf_tx_u16(spi_imx
);
376 while (unaligned
--) {
377 if (spi_imx
->tx_buf
) {
378 val
|= *(u8
*)spi_imx
->tx_buf
<< (8 * unaligned
);
384 writel(val
, spi_imx
->base
+ MXC_CSPITXDATA
);
387 static void mx53_ecspi_rx_slave(struct spi_imx_data
*spi_imx
)
389 u32 val
= be32_to_cpu(readl(spi_imx
->base
+ MXC_CSPIRXDATA
));
391 if (spi_imx
->rx_buf
) {
392 int n_bytes
= spi_imx
->slave_burst
% sizeof(val
);
395 n_bytes
= sizeof(val
);
397 memcpy(spi_imx
->rx_buf
,
398 ((u8
*)&val
) + sizeof(val
) - n_bytes
, n_bytes
);
400 spi_imx
->rx_buf
+= n_bytes
;
401 spi_imx
->slave_burst
-= n_bytes
;
404 spi_imx
->remainder
-= sizeof(u32
);
407 static void mx53_ecspi_tx_slave(struct spi_imx_data
*spi_imx
)
410 int n_bytes
= spi_imx
->count
% sizeof(val
);
413 n_bytes
= sizeof(val
);
415 if (spi_imx
->tx_buf
) {
416 memcpy(((u8
*)&val
) + sizeof(val
) - n_bytes
,
417 spi_imx
->tx_buf
, n_bytes
);
418 val
= cpu_to_be32(val
);
419 spi_imx
->tx_buf
+= n_bytes
;
422 spi_imx
->count
-= n_bytes
;
424 writel(val
, spi_imx
->base
+ MXC_CSPITXDATA
);
428 static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data
*spi_imx
,
429 unsigned int fspi
, unsigned int *fres
)
432 * there are two 4-bit dividers, the pre-divider divides by
433 * $pre, the post-divider by 2^$post
435 unsigned int pre
, post
;
436 unsigned int fin
= spi_imx
->spi_clk
;
438 if (unlikely(fspi
> fin
))
441 post
= fls(fin
) - fls(fspi
);
442 if (fin
> fspi
<< post
)
445 /* now we have: (fin <= fspi << post) with post being minimal */
447 post
= max(4U, post
) - 4;
448 if (unlikely(post
> 0xf)) {
449 dev_err(spi_imx
->dev
, "cannot set clock freq: %u (base freq: %u)\n",
454 pre
= DIV_ROUND_UP(fin
, fspi
<< post
) - 1;
456 dev_dbg(spi_imx
->dev
, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
457 __func__
, fin
, fspi
, post
, pre
);
459 /* Resulting frequency for the SCLK line. */
460 *fres
= (fin
/ (pre
+ 1)) >> post
;
462 return (pre
<< MX51_ECSPI_CTRL_PREDIV_OFFSET
) |
463 (post
<< MX51_ECSPI_CTRL_POSTDIV_OFFSET
);
466 static void mx51_ecspi_intctrl(struct spi_imx_data
*spi_imx
, int enable
)
470 if (enable
& MXC_INT_TE
)
471 val
|= MX51_ECSPI_INT_TEEN
;
473 if (enable
& MXC_INT_RR
)
474 val
|= MX51_ECSPI_INT_RREN
;
476 if (enable
& MXC_INT_RDR
)
477 val
|= MX51_ECSPI_INT_RDREN
;
479 writel(val
, spi_imx
->base
+ MX51_ECSPI_INT
);
482 static void mx51_ecspi_trigger(struct spi_imx_data
*spi_imx
)
486 reg
= readl(spi_imx
->base
+ MX51_ECSPI_CTRL
);
487 reg
|= MX51_ECSPI_CTRL_XCH
;
488 writel(reg
, spi_imx
->base
+ MX51_ECSPI_CTRL
);
491 static void mx51_disable_dma(struct spi_imx_data
*spi_imx
)
493 writel(0, spi_imx
->base
+ MX51_ECSPI_DMA
);
496 static void mx51_ecspi_disable(struct spi_imx_data
*spi_imx
)
500 ctrl
= readl(spi_imx
->base
+ MX51_ECSPI_CTRL
);
501 ctrl
&= ~MX51_ECSPI_CTRL_ENABLE
;
502 writel(ctrl
, spi_imx
->base
+ MX51_ECSPI_CTRL
);
505 static int mx51_ecspi_prepare_message(struct spi_imx_data
*spi_imx
,
506 struct spi_message
*msg
)
508 struct spi_device
*spi
= msg
->spi
;
509 u32 ctrl
= MX51_ECSPI_CTRL_ENABLE
;
511 u32 cfg
= readl(spi_imx
->base
+ MX51_ECSPI_CONFIG
);
513 /* set Master or Slave mode */
514 if (spi_imx
->slave_mode
)
515 ctrl
&= ~MX51_ECSPI_CTRL_MODE_MASK
;
517 ctrl
|= MX51_ECSPI_CTRL_MODE_MASK
;
520 * Enable SPI_RDY handling (falling edge/level triggered).
522 if (spi
->mode
& SPI_READY
)
523 ctrl
|= MX51_ECSPI_CTRL_DRCTL(spi_imx
->spi_drctl
);
525 /* set chip select to use */
526 ctrl
|= MX51_ECSPI_CTRL_CS(spi
->chip_select
);
529 * The ctrl register must be written first, with the EN bit set other
530 * registers must not be written to.
532 writel(ctrl
, spi_imx
->base
+ MX51_ECSPI_CTRL
);
534 testreg
= readl(spi_imx
->base
+ MX51_ECSPI_TESTREG
);
535 if (spi
->mode
& SPI_LOOP
)
536 testreg
|= MX51_ECSPI_TESTREG_LBC
;
538 testreg
&= ~MX51_ECSPI_TESTREG_LBC
;
539 writel(testreg
, spi_imx
->base
+ MX51_ECSPI_TESTREG
);
542 * eCSPI burst completion by Chip Select signal in Slave mode
543 * is not functional for imx53 Soc, config SPI burst completed when
544 * BURST_LENGTH + 1 bits are received
546 if (spi_imx
->slave_mode
&& is_imx53_ecspi(spi_imx
))
547 cfg
&= ~MX51_ECSPI_CONFIG_SBBCTRL(spi
->chip_select
);
549 cfg
|= MX51_ECSPI_CONFIG_SBBCTRL(spi
->chip_select
);
551 if (spi
->mode
& SPI_CPHA
)
552 cfg
|= MX51_ECSPI_CONFIG_SCLKPHA(spi
->chip_select
);
554 cfg
&= ~MX51_ECSPI_CONFIG_SCLKPHA(spi
->chip_select
);
556 if (spi
->mode
& SPI_CPOL
) {
557 cfg
|= MX51_ECSPI_CONFIG_SCLKPOL(spi
->chip_select
);
558 cfg
|= MX51_ECSPI_CONFIG_SCLKCTL(spi
->chip_select
);
560 cfg
&= ~MX51_ECSPI_CONFIG_SCLKPOL(spi
->chip_select
);
561 cfg
&= ~MX51_ECSPI_CONFIG_SCLKCTL(spi
->chip_select
);
564 if (spi
->mode
& SPI_CS_HIGH
)
565 cfg
|= MX51_ECSPI_CONFIG_SSBPOL(spi
->chip_select
);
567 cfg
&= ~MX51_ECSPI_CONFIG_SSBPOL(spi
->chip_select
);
569 writel(cfg
, spi_imx
->base
+ MX51_ECSPI_CONFIG
);
574 static int mx51_ecspi_prepare_transfer(struct spi_imx_data
*spi_imx
,
575 struct spi_device
*spi
,
576 struct spi_transfer
*t
)
578 u32 ctrl
= readl(spi_imx
->base
+ MX51_ECSPI_CTRL
);
579 u32 clk
= t
->speed_hz
, delay
;
581 /* Clear BL field and set the right value */
582 ctrl
&= ~MX51_ECSPI_CTRL_BL_MASK
;
583 if (spi_imx
->slave_mode
&& is_imx53_ecspi(spi_imx
))
584 ctrl
|= (spi_imx
->slave_burst
* 8 - 1)
585 << MX51_ECSPI_CTRL_BL_OFFSET
;
587 ctrl
|= (spi_imx
->bits_per_word
- 1)
588 << MX51_ECSPI_CTRL_BL_OFFSET
;
590 /* set clock speed */
591 ctrl
&= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET
|
592 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET
);
593 ctrl
|= mx51_ecspi_clkdiv(spi_imx
, t
->speed_hz
, &clk
);
594 spi_imx
->spi_bus_clk
= clk
;
597 ctrl
|= MX51_ECSPI_CTRL_SMC
;
599 writel(ctrl
, spi_imx
->base
+ MX51_ECSPI_CTRL
);
602 * Wait until the changes in the configuration register CONFIGREG
603 * propagate into the hardware. It takes exactly one tick of the
604 * SCLK clock, but we will wait two SCLK clock just to be sure. The
605 * effect of the delay it takes for the hardware to apply changes
606 * is noticable if the SCLK clock run very slow. In such a case, if
607 * the polarity of SCLK should be inverted, the GPIO ChipSelect might
608 * be asserted before the SCLK polarity changes, which would disrupt
609 * the SPI communication as the device on the other end would consider
610 * the change of SCLK polarity as a clock tick already.
612 delay
= (2 * 1000000) / clk
;
613 if (likely(delay
< 10)) /* SCLK is faster than 100 kHz */
615 else /* SCLK is _very_ slow */
616 usleep_range(delay
, delay
+ 10);
621 static void mx51_setup_wml(struct spi_imx_data
*spi_imx
)
624 * Configure the DMA register: setup the watermark
625 * and enable DMA request.
627 writel(MX51_ECSPI_DMA_RX_WML(spi_imx
->wml
- 1) |
628 MX51_ECSPI_DMA_TX_WML(spi_imx
->wml
) |
629 MX51_ECSPI_DMA_RXT_WML(spi_imx
->wml
) |
630 MX51_ECSPI_DMA_TEDEN
| MX51_ECSPI_DMA_RXDEN
|
631 MX51_ECSPI_DMA_RXTDEN
, spi_imx
->base
+ MX51_ECSPI_DMA
);
634 static int mx51_ecspi_rx_available(struct spi_imx_data
*spi_imx
)
636 return readl(spi_imx
->base
+ MX51_ECSPI_STAT
) & MX51_ECSPI_STAT_RR
;
639 static void mx51_ecspi_reset(struct spi_imx_data
*spi_imx
)
641 /* drain receive buffer */
642 while (mx51_ecspi_rx_available(spi_imx
))
643 readl(spi_imx
->base
+ MXC_CSPIRXDATA
);
646 #define MX31_INTREG_TEEN (1 << 0)
647 #define MX31_INTREG_RREN (1 << 3)
649 #define MX31_CSPICTRL_ENABLE (1 << 0)
650 #define MX31_CSPICTRL_MASTER (1 << 1)
651 #define MX31_CSPICTRL_XCH (1 << 2)
652 #define MX31_CSPICTRL_SMC (1 << 3)
653 #define MX31_CSPICTRL_POL (1 << 4)
654 #define MX31_CSPICTRL_PHA (1 << 5)
655 #define MX31_CSPICTRL_SSCTL (1 << 6)
656 #define MX31_CSPICTRL_SSPOL (1 << 7)
657 #define MX31_CSPICTRL_BC_SHIFT 8
658 #define MX35_CSPICTRL_BL_SHIFT 20
659 #define MX31_CSPICTRL_CS_SHIFT 24
660 #define MX35_CSPICTRL_CS_SHIFT 12
661 #define MX31_CSPICTRL_DR_SHIFT 16
663 #define MX31_CSPI_DMAREG 0x10
664 #define MX31_DMAREG_RH_DEN (1<<4)
665 #define MX31_DMAREG_TH_DEN (1<<1)
667 #define MX31_CSPISTATUS 0x14
668 #define MX31_STATUS_RR (1 << 3)
670 #define MX31_CSPI_TESTREG 0x1C
671 #define MX31_TEST_LBC (1 << 14)
673 /* These functions also work for the i.MX35, but be aware that
674 * the i.MX35 has a slightly different register layout for bits
675 * we do not use here.
677 static void mx31_intctrl(struct spi_imx_data
*spi_imx
, int enable
)
679 unsigned int val
= 0;
681 if (enable
& MXC_INT_TE
)
682 val
|= MX31_INTREG_TEEN
;
683 if (enable
& MXC_INT_RR
)
684 val
|= MX31_INTREG_RREN
;
686 writel(val
, spi_imx
->base
+ MXC_CSPIINT
);
689 static void mx31_trigger(struct spi_imx_data
*spi_imx
)
693 reg
= readl(spi_imx
->base
+ MXC_CSPICTRL
);
694 reg
|= MX31_CSPICTRL_XCH
;
695 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
698 static int mx31_prepare_message(struct spi_imx_data
*spi_imx
,
699 struct spi_message
*msg
)
704 static int mx31_prepare_transfer(struct spi_imx_data
*spi_imx
,
705 struct spi_device
*spi
,
706 struct spi_transfer
*t
)
708 unsigned int reg
= MX31_CSPICTRL_ENABLE
| MX31_CSPICTRL_MASTER
;
711 reg
|= spi_imx_clkdiv_2(spi_imx
->spi_clk
, t
->speed_hz
, &clk
) <<
712 MX31_CSPICTRL_DR_SHIFT
;
713 spi_imx
->spi_bus_clk
= clk
;
715 if (is_imx35_cspi(spi_imx
)) {
716 reg
|= (spi_imx
->bits_per_word
- 1) << MX35_CSPICTRL_BL_SHIFT
;
717 reg
|= MX31_CSPICTRL_SSCTL
;
719 reg
|= (spi_imx
->bits_per_word
- 1) << MX31_CSPICTRL_BC_SHIFT
;
722 if (spi
->mode
& SPI_CPHA
)
723 reg
|= MX31_CSPICTRL_PHA
;
724 if (spi
->mode
& SPI_CPOL
)
725 reg
|= MX31_CSPICTRL_POL
;
726 if (spi
->mode
& SPI_CS_HIGH
)
727 reg
|= MX31_CSPICTRL_SSPOL
;
729 reg
|= (spi
->chip_select
) <<
730 (is_imx35_cspi(spi_imx
) ? MX35_CSPICTRL_CS_SHIFT
:
731 MX31_CSPICTRL_CS_SHIFT
);
734 reg
|= MX31_CSPICTRL_SMC
;
736 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
738 reg
= readl(spi_imx
->base
+ MX31_CSPI_TESTREG
);
739 if (spi
->mode
& SPI_LOOP
)
740 reg
|= MX31_TEST_LBC
;
742 reg
&= ~MX31_TEST_LBC
;
743 writel(reg
, spi_imx
->base
+ MX31_CSPI_TESTREG
);
745 if (spi_imx
->usedma
) {
747 * configure DMA requests when RXFIFO is half full and
748 * when TXFIFO is half empty
750 writel(MX31_DMAREG_RH_DEN
| MX31_DMAREG_TH_DEN
,
751 spi_imx
->base
+ MX31_CSPI_DMAREG
);
757 static int mx31_rx_available(struct spi_imx_data
*spi_imx
)
759 return readl(spi_imx
->base
+ MX31_CSPISTATUS
) & MX31_STATUS_RR
;
762 static void mx31_reset(struct spi_imx_data
*spi_imx
)
764 /* drain receive buffer */
765 while (readl(spi_imx
->base
+ MX31_CSPISTATUS
) & MX31_STATUS_RR
)
766 readl(spi_imx
->base
+ MXC_CSPIRXDATA
);
769 #define MX21_INTREG_RR (1 << 4)
770 #define MX21_INTREG_TEEN (1 << 9)
771 #define MX21_INTREG_RREN (1 << 13)
773 #define MX21_CSPICTRL_POL (1 << 5)
774 #define MX21_CSPICTRL_PHA (1 << 6)
775 #define MX21_CSPICTRL_SSPOL (1 << 8)
776 #define MX21_CSPICTRL_XCH (1 << 9)
777 #define MX21_CSPICTRL_ENABLE (1 << 10)
778 #define MX21_CSPICTRL_MASTER (1 << 11)
779 #define MX21_CSPICTRL_DR_SHIFT 14
780 #define MX21_CSPICTRL_CS_SHIFT 19
782 static void mx21_intctrl(struct spi_imx_data
*spi_imx
, int enable
)
784 unsigned int val
= 0;
786 if (enable
& MXC_INT_TE
)
787 val
|= MX21_INTREG_TEEN
;
788 if (enable
& MXC_INT_RR
)
789 val
|= MX21_INTREG_RREN
;
791 writel(val
, spi_imx
->base
+ MXC_CSPIINT
);
794 static void mx21_trigger(struct spi_imx_data
*spi_imx
)
798 reg
= readl(spi_imx
->base
+ MXC_CSPICTRL
);
799 reg
|= MX21_CSPICTRL_XCH
;
800 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
803 static int mx21_prepare_message(struct spi_imx_data
*spi_imx
,
804 struct spi_message
*msg
)
809 static int mx21_prepare_transfer(struct spi_imx_data
*spi_imx
,
810 struct spi_device
*spi
,
811 struct spi_transfer
*t
)
813 unsigned int reg
= MX21_CSPICTRL_ENABLE
| MX21_CSPICTRL_MASTER
;
814 unsigned int max
= is_imx27_cspi(spi_imx
) ? 16 : 18;
817 reg
|= spi_imx_clkdiv_1(spi_imx
->spi_clk
, t
->speed_hz
, max
, &clk
)
818 << MX21_CSPICTRL_DR_SHIFT
;
819 spi_imx
->spi_bus_clk
= clk
;
821 reg
|= spi_imx
->bits_per_word
- 1;
823 if (spi
->mode
& SPI_CPHA
)
824 reg
|= MX21_CSPICTRL_PHA
;
825 if (spi
->mode
& SPI_CPOL
)
826 reg
|= MX21_CSPICTRL_POL
;
827 if (spi
->mode
& SPI_CS_HIGH
)
828 reg
|= MX21_CSPICTRL_SSPOL
;
830 reg
|= spi
->chip_select
<< MX21_CSPICTRL_CS_SHIFT
;
832 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
837 static int mx21_rx_available(struct spi_imx_data
*spi_imx
)
839 return readl(spi_imx
->base
+ MXC_CSPIINT
) & MX21_INTREG_RR
;
842 static void mx21_reset(struct spi_imx_data
*spi_imx
)
844 writel(1, spi_imx
->base
+ MXC_RESET
);
847 #define MX1_INTREG_RR (1 << 3)
848 #define MX1_INTREG_TEEN (1 << 8)
849 #define MX1_INTREG_RREN (1 << 11)
851 #define MX1_CSPICTRL_POL (1 << 4)
852 #define MX1_CSPICTRL_PHA (1 << 5)
853 #define MX1_CSPICTRL_XCH (1 << 8)
854 #define MX1_CSPICTRL_ENABLE (1 << 9)
855 #define MX1_CSPICTRL_MASTER (1 << 10)
856 #define MX1_CSPICTRL_DR_SHIFT 13
858 static void mx1_intctrl(struct spi_imx_data
*spi_imx
, int enable
)
860 unsigned int val
= 0;
862 if (enable
& MXC_INT_TE
)
863 val
|= MX1_INTREG_TEEN
;
864 if (enable
& MXC_INT_RR
)
865 val
|= MX1_INTREG_RREN
;
867 writel(val
, spi_imx
->base
+ MXC_CSPIINT
);
870 static void mx1_trigger(struct spi_imx_data
*spi_imx
)
874 reg
= readl(spi_imx
->base
+ MXC_CSPICTRL
);
875 reg
|= MX1_CSPICTRL_XCH
;
876 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
879 static int mx1_prepare_message(struct spi_imx_data
*spi_imx
,
880 struct spi_message
*msg
)
885 static int mx1_prepare_transfer(struct spi_imx_data
*spi_imx
,
886 struct spi_device
*spi
,
887 struct spi_transfer
*t
)
889 unsigned int reg
= MX1_CSPICTRL_ENABLE
| MX1_CSPICTRL_MASTER
;
892 reg
|= spi_imx_clkdiv_2(spi_imx
->spi_clk
, t
->speed_hz
, &clk
) <<
893 MX1_CSPICTRL_DR_SHIFT
;
894 spi_imx
->spi_bus_clk
= clk
;
896 reg
|= spi_imx
->bits_per_word
- 1;
898 if (spi
->mode
& SPI_CPHA
)
899 reg
|= MX1_CSPICTRL_PHA
;
900 if (spi
->mode
& SPI_CPOL
)
901 reg
|= MX1_CSPICTRL_POL
;
903 writel(reg
, spi_imx
->base
+ MXC_CSPICTRL
);
908 static int mx1_rx_available(struct spi_imx_data
*spi_imx
)
910 return readl(spi_imx
->base
+ MXC_CSPIINT
) & MX1_INTREG_RR
;
913 static void mx1_reset(struct spi_imx_data
*spi_imx
)
915 writel(1, spi_imx
->base
+ MXC_RESET
);
918 static struct spi_imx_devtype_data imx1_cspi_devtype_data
= {
919 .intctrl
= mx1_intctrl
,
920 .prepare_message
= mx1_prepare_message
,
921 .prepare_transfer
= mx1_prepare_transfer
,
922 .trigger
= mx1_trigger
,
923 .rx_available
= mx1_rx_available
,
926 .has_dmamode
= false,
927 .dynamic_burst
= false,
928 .has_slavemode
= false,
929 .devtype
= IMX1_CSPI
,
932 static struct spi_imx_devtype_data imx21_cspi_devtype_data
= {
933 .intctrl
= mx21_intctrl
,
934 .prepare_message
= mx21_prepare_message
,
935 .prepare_transfer
= mx21_prepare_transfer
,
936 .trigger
= mx21_trigger
,
937 .rx_available
= mx21_rx_available
,
940 .has_dmamode
= false,
941 .dynamic_burst
= false,
942 .has_slavemode
= false,
943 .devtype
= IMX21_CSPI
,
946 static struct spi_imx_devtype_data imx27_cspi_devtype_data
= {
947 /* i.mx27 cspi shares the functions with i.mx21 one */
948 .intctrl
= mx21_intctrl
,
949 .prepare_message
= mx21_prepare_message
,
950 .prepare_transfer
= mx21_prepare_transfer
,
951 .trigger
= mx21_trigger
,
952 .rx_available
= mx21_rx_available
,
955 .has_dmamode
= false,
956 .dynamic_burst
= false,
957 .has_slavemode
= false,
958 .devtype
= IMX27_CSPI
,
961 static struct spi_imx_devtype_data imx31_cspi_devtype_data
= {
962 .intctrl
= mx31_intctrl
,
963 .prepare_message
= mx31_prepare_message
,
964 .prepare_transfer
= mx31_prepare_transfer
,
965 .trigger
= mx31_trigger
,
966 .rx_available
= mx31_rx_available
,
969 .has_dmamode
= false,
970 .dynamic_burst
= false,
971 .has_slavemode
= false,
972 .devtype
= IMX31_CSPI
,
975 static struct spi_imx_devtype_data imx35_cspi_devtype_data
= {
976 /* i.mx35 and later cspi shares the functions with i.mx31 one */
977 .intctrl
= mx31_intctrl
,
978 .prepare_message
= mx31_prepare_message
,
979 .prepare_transfer
= mx31_prepare_transfer
,
980 .trigger
= mx31_trigger
,
981 .rx_available
= mx31_rx_available
,
985 .dynamic_burst
= false,
986 .has_slavemode
= false,
987 .devtype
= IMX35_CSPI
,
990 static struct spi_imx_devtype_data imx51_ecspi_devtype_data
= {
991 .intctrl
= mx51_ecspi_intctrl
,
992 .prepare_message
= mx51_ecspi_prepare_message
,
993 .prepare_transfer
= mx51_ecspi_prepare_transfer
,
994 .trigger
= mx51_ecspi_trigger
,
995 .rx_available
= mx51_ecspi_rx_available
,
996 .reset
= mx51_ecspi_reset
,
997 .setup_wml
= mx51_setup_wml
,
998 .disable_dma
= mx51_disable_dma
,
1000 .has_dmamode
= true,
1001 .dynamic_burst
= true,
1002 .has_slavemode
= true,
1003 .disable
= mx51_ecspi_disable
,
1004 .devtype
= IMX51_ECSPI
,
1007 static struct spi_imx_devtype_data imx53_ecspi_devtype_data
= {
1008 .intctrl
= mx51_ecspi_intctrl
,
1009 .prepare_message
= mx51_ecspi_prepare_message
,
1010 .prepare_transfer
= mx51_ecspi_prepare_transfer
,
1011 .trigger
= mx51_ecspi_trigger
,
1012 .rx_available
= mx51_ecspi_rx_available
,
1013 .disable_dma
= mx51_disable_dma
,
1014 .reset
= mx51_ecspi_reset
,
1016 .has_dmamode
= true,
1017 .has_slavemode
= true,
1018 .disable
= mx51_ecspi_disable
,
1019 .devtype
= IMX53_ECSPI
,
1022 static const struct of_device_id spi_imx_dt_ids
[] = {
1023 { .compatible
= "fsl,imx1-cspi", .data
= &imx1_cspi_devtype_data
, },
1024 { .compatible
= "fsl,imx21-cspi", .data
= &imx21_cspi_devtype_data
, },
1025 { .compatible
= "fsl,imx27-cspi", .data
= &imx27_cspi_devtype_data
, },
1026 { .compatible
= "fsl,imx31-cspi", .data
= &imx31_cspi_devtype_data
, },
1027 { .compatible
= "fsl,imx35-cspi", .data
= &imx35_cspi_devtype_data
, },
1028 { .compatible
= "fsl,imx51-ecspi", .data
= &imx51_ecspi_devtype_data
, },
1029 { .compatible
= "fsl,imx53-ecspi", .data
= &imx53_ecspi_devtype_data
, },
1032 MODULE_DEVICE_TABLE(of
, spi_imx_dt_ids
);
1034 static void spi_imx_set_burst_len(struct spi_imx_data
*spi_imx
, int n_bits
)
1038 ctrl
= readl(spi_imx
->base
+ MX51_ECSPI_CTRL
);
1039 ctrl
&= ~MX51_ECSPI_CTRL_BL_MASK
;
1040 ctrl
|= ((n_bits
- 1) << MX51_ECSPI_CTRL_BL_OFFSET
);
1041 writel(ctrl
, spi_imx
->base
+ MX51_ECSPI_CTRL
);
1044 static void spi_imx_push(struct spi_imx_data
*spi_imx
)
1046 unsigned int burst_len
, fifo_words
;
1048 if (spi_imx
->dynamic_burst
)
1051 fifo_words
= spi_imx_bytes_per_word(spi_imx
->bits_per_word
);
1053 * Reload the FIFO when the remaining bytes to be transferred in the
1054 * current burst is 0. This only applies when bits_per_word is a
1057 if (!spi_imx
->remainder
) {
1058 if (spi_imx
->dynamic_burst
) {
1060 /* We need to deal unaligned data first */
1061 burst_len
= spi_imx
->count
% MX51_ECSPI_CTRL_MAX_BURST
;
1064 burst_len
= MX51_ECSPI_CTRL_MAX_BURST
;
1066 spi_imx_set_burst_len(spi_imx
, burst_len
* 8);
1068 spi_imx
->remainder
= burst_len
;
1070 spi_imx
->remainder
= fifo_words
;
1074 while (spi_imx
->txfifo
< spi_imx
->devtype_data
->fifo_size
) {
1075 if (!spi_imx
->count
)
1077 if (spi_imx
->dynamic_burst
&&
1078 spi_imx
->txfifo
>= DIV_ROUND_UP(spi_imx
->remainder
,
1081 spi_imx
->tx(spi_imx
);
1085 if (!spi_imx
->slave_mode
)
1086 spi_imx
->devtype_data
->trigger(spi_imx
);
1089 static irqreturn_t
spi_imx_isr(int irq
, void *dev_id
)
1091 struct spi_imx_data
*spi_imx
= dev_id
;
1093 while (spi_imx
->txfifo
&&
1094 spi_imx
->devtype_data
->rx_available(spi_imx
)) {
1095 spi_imx
->rx(spi_imx
);
1099 if (spi_imx
->count
) {
1100 spi_imx_push(spi_imx
);
1104 if (spi_imx
->txfifo
) {
1105 /* No data left to push, but still waiting for rx data,
1106 * enable receive data available interrupt.
1108 spi_imx
->devtype_data
->intctrl(
1109 spi_imx
, MXC_INT_RR
);
1113 spi_imx
->devtype_data
->intctrl(spi_imx
, 0);
1114 complete(&spi_imx
->xfer_done
);
1119 static int spi_imx_dma_configure(struct spi_master
*master
)
1122 enum dma_slave_buswidth buswidth
;
1123 struct dma_slave_config rx
= {}, tx
= {};
1124 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
1126 switch (spi_imx_bytes_per_word(spi_imx
->bits_per_word
)) {
1128 buswidth
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
1131 buswidth
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
1134 buswidth
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
1140 tx
.direction
= DMA_MEM_TO_DEV
;
1141 tx
.dst_addr
= spi_imx
->base_phys
+ MXC_CSPITXDATA
;
1142 tx
.dst_addr_width
= buswidth
;
1143 tx
.dst_maxburst
= spi_imx
->wml
;
1144 ret
= dmaengine_slave_config(master
->dma_tx
, &tx
);
1146 dev_err(spi_imx
->dev
, "TX dma configuration failed with %d\n", ret
);
1150 rx
.direction
= DMA_DEV_TO_MEM
;
1151 rx
.src_addr
= spi_imx
->base_phys
+ MXC_CSPIRXDATA
;
1152 rx
.src_addr_width
= buswidth
;
1153 rx
.src_maxburst
= spi_imx
->wml
;
1154 ret
= dmaengine_slave_config(master
->dma_rx
, &rx
);
1156 dev_err(spi_imx
->dev
, "RX dma configuration failed with %d\n", ret
);
1163 static int spi_imx_setupxfer(struct spi_device
*spi
,
1164 struct spi_transfer
*t
)
1166 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(spi
->master
);
1171 spi_imx
->bits_per_word
= t
->bits_per_word
;
1174 * Initialize the functions for transfer. To transfer non byte-aligned
1175 * words, we have to use multiple word-size bursts, we can't use
1176 * dynamic_burst in that case.
1178 if (spi_imx
->devtype_data
->dynamic_burst
&& !spi_imx
->slave_mode
&&
1179 (spi_imx
->bits_per_word
== 8 ||
1180 spi_imx
->bits_per_word
== 16 ||
1181 spi_imx
->bits_per_word
== 32)) {
1183 spi_imx
->rx
= spi_imx_buf_rx_swap
;
1184 spi_imx
->tx
= spi_imx_buf_tx_swap
;
1185 spi_imx
->dynamic_burst
= 1;
1188 if (spi_imx
->bits_per_word
<= 8) {
1189 spi_imx
->rx
= spi_imx_buf_rx_u8
;
1190 spi_imx
->tx
= spi_imx_buf_tx_u8
;
1191 } else if (spi_imx
->bits_per_word
<= 16) {
1192 spi_imx
->rx
= spi_imx_buf_rx_u16
;
1193 spi_imx
->tx
= spi_imx_buf_tx_u16
;
1195 spi_imx
->rx
= spi_imx_buf_rx_u32
;
1196 spi_imx
->tx
= spi_imx_buf_tx_u32
;
1198 spi_imx
->dynamic_burst
= 0;
1201 if (spi_imx_can_dma(spi_imx
->bitbang
.master
, spi
, t
))
1202 spi_imx
->usedma
= true;
1204 spi_imx
->usedma
= false;
1206 if (is_imx53_ecspi(spi_imx
) && spi_imx
->slave_mode
) {
1207 spi_imx
->rx
= mx53_ecspi_rx_slave
;
1208 spi_imx
->tx
= mx53_ecspi_tx_slave
;
1209 spi_imx
->slave_burst
= t
->len
;
1212 spi_imx
->devtype_data
->prepare_transfer(spi_imx
, spi
, t
);
1217 static void spi_imx_sdma_exit(struct spi_imx_data
*spi_imx
)
1219 struct spi_master
*master
= spi_imx
->bitbang
.master
;
1221 if (master
->dma_rx
) {
1222 dma_release_channel(master
->dma_rx
);
1223 master
->dma_rx
= NULL
;
1226 if (master
->dma_tx
) {
1227 dma_release_channel(master
->dma_tx
);
1228 master
->dma_tx
= NULL
;
1232 static int spi_imx_sdma_init(struct device
*dev
, struct spi_imx_data
*spi_imx
,
1233 struct spi_master
*master
)
1237 /* use pio mode for i.mx6dl chip TKT238285 */
1238 if (of_machine_is_compatible("fsl,imx6dl"))
1241 spi_imx
->wml
= spi_imx
->devtype_data
->fifo_size
/ 2;
1243 /* Prepare for TX DMA: */
1244 master
->dma_tx
= dma_request_chan(dev
, "tx");
1245 if (IS_ERR(master
->dma_tx
)) {
1246 ret
= PTR_ERR(master
->dma_tx
);
1247 dev_dbg(dev
, "can't get the TX DMA channel, error %d!\n", ret
);
1248 master
->dma_tx
= NULL
;
1252 /* Prepare for RX : */
1253 master
->dma_rx
= dma_request_chan(dev
, "rx");
1254 if (IS_ERR(master
->dma_rx
)) {
1255 ret
= PTR_ERR(master
->dma_rx
);
1256 dev_dbg(dev
, "can't get the RX DMA channel, error %d\n", ret
);
1257 master
->dma_rx
= NULL
;
1261 init_completion(&spi_imx
->dma_rx_completion
);
1262 init_completion(&spi_imx
->dma_tx_completion
);
1263 master
->can_dma
= spi_imx_can_dma
;
1264 master
->max_dma_len
= MAX_SDMA_BD_BYTES
;
1265 spi_imx
->bitbang
.master
->flags
= SPI_MASTER_MUST_RX
|
1270 spi_imx_sdma_exit(spi_imx
);
1274 static void spi_imx_dma_rx_callback(void *cookie
)
1276 struct spi_imx_data
*spi_imx
= (struct spi_imx_data
*)cookie
;
1278 complete(&spi_imx
->dma_rx_completion
);
1281 static void spi_imx_dma_tx_callback(void *cookie
)
1283 struct spi_imx_data
*spi_imx
= (struct spi_imx_data
*)cookie
;
1285 complete(&spi_imx
->dma_tx_completion
);
1288 static int spi_imx_calculate_timeout(struct spi_imx_data
*spi_imx
, int size
)
1290 unsigned long timeout
= 0;
1292 /* Time with actual data transfer and CS change delay related to HW */
1293 timeout
= (8 + 4) * size
/ spi_imx
->spi_bus_clk
;
1295 /* Add extra second for scheduler related activities */
1298 /* Double calculated timeout */
1299 return msecs_to_jiffies(2 * timeout
* MSEC_PER_SEC
);
1302 static int spi_imx_dma_transfer(struct spi_imx_data
*spi_imx
,
1303 struct spi_transfer
*transfer
)
1305 struct dma_async_tx_descriptor
*desc_tx
, *desc_rx
;
1306 unsigned long transfer_timeout
;
1307 unsigned long timeout
;
1308 struct spi_master
*master
= spi_imx
->bitbang
.master
;
1309 struct sg_table
*tx
= &transfer
->tx_sg
, *rx
= &transfer
->rx_sg
;
1310 struct scatterlist
*last_sg
= sg_last(rx
->sgl
, rx
->nents
);
1311 unsigned int bytes_per_word
, i
;
1314 /* Get the right burst length from the last sg to ensure no tail data */
1315 bytes_per_word
= spi_imx_bytes_per_word(transfer
->bits_per_word
);
1316 for (i
= spi_imx
->devtype_data
->fifo_size
/ 2; i
> 0; i
--) {
1317 if (!(sg_dma_len(last_sg
) % (i
* bytes_per_word
)))
1320 /* Use 1 as wml in case no available burst length got */
1326 ret
= spi_imx_dma_configure(master
);
1328 goto dma_failure_no_start
;
1330 if (!spi_imx
->devtype_data
->setup_wml
) {
1331 dev_err(spi_imx
->dev
, "No setup_wml()?\n");
1333 goto dma_failure_no_start
;
1335 spi_imx
->devtype_data
->setup_wml(spi_imx
);
1338 * The TX DMA setup starts the transfer, so make sure RX is configured
1341 desc_rx
= dmaengine_prep_slave_sg(master
->dma_rx
,
1342 rx
->sgl
, rx
->nents
, DMA_DEV_TO_MEM
,
1343 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1346 goto dma_failure_no_start
;
1349 desc_rx
->callback
= spi_imx_dma_rx_callback
;
1350 desc_rx
->callback_param
= (void *)spi_imx
;
1351 dmaengine_submit(desc_rx
);
1352 reinit_completion(&spi_imx
->dma_rx_completion
);
1353 dma_async_issue_pending(master
->dma_rx
);
1355 desc_tx
= dmaengine_prep_slave_sg(master
->dma_tx
,
1356 tx
->sgl
, tx
->nents
, DMA_MEM_TO_DEV
,
1357 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
1359 dmaengine_terminate_all(master
->dma_tx
);
1360 dmaengine_terminate_all(master
->dma_rx
);
1364 desc_tx
->callback
= spi_imx_dma_tx_callback
;
1365 desc_tx
->callback_param
= (void *)spi_imx
;
1366 dmaengine_submit(desc_tx
);
1367 reinit_completion(&spi_imx
->dma_tx_completion
);
1368 dma_async_issue_pending(master
->dma_tx
);
1370 transfer_timeout
= spi_imx_calculate_timeout(spi_imx
, transfer
->len
);
1372 /* Wait SDMA to finish the data transfer.*/
1373 timeout
= wait_for_completion_timeout(&spi_imx
->dma_tx_completion
,
1376 dev_err(spi_imx
->dev
, "I/O Error in DMA TX\n");
1377 dmaengine_terminate_all(master
->dma_tx
);
1378 dmaengine_terminate_all(master
->dma_rx
);
1382 timeout
= wait_for_completion_timeout(&spi_imx
->dma_rx_completion
,
1385 dev_err(&master
->dev
, "I/O Error in DMA RX\n");
1386 spi_imx
->devtype_data
->reset(spi_imx
);
1387 dmaengine_terminate_all(master
->dma_rx
);
1391 return transfer
->len
;
1392 /* fallback to pio */
1393 dma_failure_no_start
:
1394 transfer
->error
|= SPI_TRANS_FAIL_NO_START
;
1398 static int spi_imx_pio_transfer(struct spi_device
*spi
,
1399 struct spi_transfer
*transfer
)
1401 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(spi
->master
);
1402 unsigned long transfer_timeout
;
1403 unsigned long timeout
;
1405 spi_imx
->tx_buf
= transfer
->tx_buf
;
1406 spi_imx
->rx_buf
= transfer
->rx_buf
;
1407 spi_imx
->count
= transfer
->len
;
1408 spi_imx
->txfifo
= 0;
1409 spi_imx
->remainder
= 0;
1411 reinit_completion(&spi_imx
->xfer_done
);
1413 spi_imx_push(spi_imx
);
1415 spi_imx
->devtype_data
->intctrl(spi_imx
, MXC_INT_TE
);
1417 transfer_timeout
= spi_imx_calculate_timeout(spi_imx
, transfer
->len
);
1419 timeout
= wait_for_completion_timeout(&spi_imx
->xfer_done
,
1422 dev_err(&spi
->dev
, "I/O Error in PIO\n");
1423 spi_imx
->devtype_data
->reset(spi_imx
);
1427 return transfer
->len
;
1430 static int spi_imx_pio_transfer_slave(struct spi_device
*spi
,
1431 struct spi_transfer
*transfer
)
1433 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(spi
->master
);
1434 int ret
= transfer
->len
;
1436 if (is_imx53_ecspi(spi_imx
) &&
1437 transfer
->len
> MX53_MAX_TRANSFER_BYTES
) {
1438 dev_err(&spi
->dev
, "Transaction too big, max size is %d bytes\n",
1439 MX53_MAX_TRANSFER_BYTES
);
1443 spi_imx
->tx_buf
= transfer
->tx_buf
;
1444 spi_imx
->rx_buf
= transfer
->rx_buf
;
1445 spi_imx
->count
= transfer
->len
;
1446 spi_imx
->txfifo
= 0;
1447 spi_imx
->remainder
= 0;
1449 reinit_completion(&spi_imx
->xfer_done
);
1450 spi_imx
->slave_aborted
= false;
1452 spi_imx_push(spi_imx
);
1454 spi_imx
->devtype_data
->intctrl(spi_imx
, MXC_INT_TE
| MXC_INT_RDR
);
1456 if (wait_for_completion_interruptible(&spi_imx
->xfer_done
) ||
1457 spi_imx
->slave_aborted
) {
1458 dev_dbg(&spi
->dev
, "interrupted\n");
1462 /* ecspi has a HW issue when works in Slave mode,
1463 * after 64 words writtern to TXFIFO, even TXFIFO becomes empty,
1464 * ECSPI_TXDATA keeps shift out the last word data,
1465 * so we have to disable ECSPI when in slave mode after the
1466 * transfer completes
1468 if (spi_imx
->devtype_data
->disable
)
1469 spi_imx
->devtype_data
->disable(spi_imx
);
1474 static int spi_imx_transfer(struct spi_device
*spi
,
1475 struct spi_transfer
*transfer
)
1477 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(spi
->master
);
1479 transfer
->effective_speed_hz
= spi_imx
->spi_bus_clk
;
1481 /* flush rxfifo before transfer */
1482 while (spi_imx
->devtype_data
->rx_available(spi_imx
))
1483 readl(spi_imx
->base
+ MXC_CSPIRXDATA
);
1485 if (spi_imx
->slave_mode
)
1486 return spi_imx_pio_transfer_slave(spi
, transfer
);
1488 if (spi_imx
->usedma
)
1489 return spi_imx_dma_transfer(spi_imx
, transfer
);
1491 return spi_imx_pio_transfer(spi
, transfer
);
1494 static int spi_imx_setup(struct spi_device
*spi
)
1496 dev_dbg(&spi
->dev
, "%s: mode %d, %u bpw, %d hz\n", __func__
,
1497 spi
->mode
, spi
->bits_per_word
, spi
->max_speed_hz
);
1502 static void spi_imx_cleanup(struct spi_device
*spi
)
1507 spi_imx_prepare_message(struct spi_master
*master
, struct spi_message
*msg
)
1509 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
1512 ret
= pm_runtime_get_sync(spi_imx
->dev
);
1514 pm_runtime_put_noidle(spi_imx
->dev
);
1515 dev_err(spi_imx
->dev
, "failed to enable clock\n");
1519 ret
= spi_imx
->devtype_data
->prepare_message(spi_imx
, msg
);
1521 pm_runtime_mark_last_busy(spi_imx
->dev
);
1522 pm_runtime_put_autosuspend(spi_imx
->dev
);
1529 spi_imx_unprepare_message(struct spi_master
*master
, struct spi_message
*msg
)
1531 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
1533 pm_runtime_mark_last_busy(spi_imx
->dev
);
1534 pm_runtime_put_autosuspend(spi_imx
->dev
);
1538 static int spi_imx_slave_abort(struct spi_master
*master
)
1540 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
1542 spi_imx
->slave_aborted
= true;
1543 complete(&spi_imx
->xfer_done
);
1548 static int spi_imx_probe(struct platform_device
*pdev
)
1550 struct device_node
*np
= pdev
->dev
.of_node
;
1551 const struct of_device_id
*of_id
=
1552 of_match_device(spi_imx_dt_ids
, &pdev
->dev
);
1553 struct spi_master
*master
;
1554 struct spi_imx_data
*spi_imx
;
1555 struct resource
*res
;
1556 int ret
, irq
, spi_drctl
;
1557 const struct spi_imx_devtype_data
*devtype_data
= of_id
->data
;
1561 slave_mode
= devtype_data
->has_slavemode
&&
1562 of_property_read_bool(np
, "spi-slave");
1564 master
= spi_alloc_slave(&pdev
->dev
,
1565 sizeof(struct spi_imx_data
));
1567 master
= spi_alloc_master(&pdev
->dev
,
1568 sizeof(struct spi_imx_data
));
1572 ret
= of_property_read_u32(np
, "fsl,spi-rdy-drctl", &spi_drctl
);
1573 if ((ret
< 0) || (spi_drctl
>= 0x3)) {
1574 /* '11' is reserved */
1578 platform_set_drvdata(pdev
, master
);
1580 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(1, 32);
1581 master
->bus_num
= np
? -1 : pdev
->id
;
1582 master
->use_gpio_descriptors
= true;
1584 spi_imx
= spi_master_get_devdata(master
);
1585 spi_imx
->bitbang
.master
= master
;
1586 spi_imx
->dev
= &pdev
->dev
;
1587 spi_imx
->slave_mode
= slave_mode
;
1589 spi_imx
->devtype_data
= devtype_data
;
1592 * Get number of chip selects from device properties. This can be
1593 * coming from device tree or boardfiles, if it is not defined,
1594 * a default value of 3 chip selects will be used, as all the legacy
1595 * board files have <= 3 chip selects.
1597 if (!device_property_read_u32(&pdev
->dev
, "num-cs", &val
))
1598 master
->num_chipselect
= val
;
1600 master
->num_chipselect
= 3;
1602 spi_imx
->bitbang
.setup_transfer
= spi_imx_setupxfer
;
1603 spi_imx
->bitbang
.txrx_bufs
= spi_imx_transfer
;
1604 spi_imx
->bitbang
.master
->setup
= spi_imx_setup
;
1605 spi_imx
->bitbang
.master
->cleanup
= spi_imx_cleanup
;
1606 spi_imx
->bitbang
.master
->prepare_message
= spi_imx_prepare_message
;
1607 spi_imx
->bitbang
.master
->unprepare_message
= spi_imx_unprepare_message
;
1608 spi_imx
->bitbang
.master
->slave_abort
= spi_imx_slave_abort
;
1609 spi_imx
->bitbang
.master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH \
1611 if (is_imx35_cspi(spi_imx
) || is_imx51_ecspi(spi_imx
) ||
1612 is_imx53_ecspi(spi_imx
))
1613 spi_imx
->bitbang
.master
->mode_bits
|= SPI_LOOP
| SPI_READY
;
1615 spi_imx
->spi_drctl
= spi_drctl
;
1617 init_completion(&spi_imx
->xfer_done
);
1619 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1620 spi_imx
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
1621 if (IS_ERR(spi_imx
->base
)) {
1622 ret
= PTR_ERR(spi_imx
->base
);
1623 goto out_master_put
;
1625 spi_imx
->base_phys
= res
->start
;
1627 irq
= platform_get_irq(pdev
, 0);
1630 goto out_master_put
;
1633 ret
= devm_request_irq(&pdev
->dev
, irq
, spi_imx_isr
, 0,
1634 dev_name(&pdev
->dev
), spi_imx
);
1636 dev_err(&pdev
->dev
, "can't get irq%d: %d\n", irq
, ret
);
1637 goto out_master_put
;
1640 spi_imx
->clk_ipg
= devm_clk_get(&pdev
->dev
, "ipg");
1641 if (IS_ERR(spi_imx
->clk_ipg
)) {
1642 ret
= PTR_ERR(spi_imx
->clk_ipg
);
1643 goto out_master_put
;
1646 spi_imx
->clk_per
= devm_clk_get(&pdev
->dev
, "per");
1647 if (IS_ERR(spi_imx
->clk_per
)) {
1648 ret
= PTR_ERR(spi_imx
->clk_per
);
1649 goto out_master_put
;
1652 ret
= clk_prepare_enable(spi_imx
->clk_per
);
1654 goto out_master_put
;
1656 ret
= clk_prepare_enable(spi_imx
->clk_ipg
);
1660 pm_runtime_set_autosuspend_delay(spi_imx
->dev
, MXC_RPM_TIMEOUT
);
1661 pm_runtime_use_autosuspend(spi_imx
->dev
);
1662 pm_runtime_get_noresume(spi_imx
->dev
);
1663 pm_runtime_set_active(spi_imx
->dev
);
1664 pm_runtime_enable(spi_imx
->dev
);
1666 spi_imx
->spi_clk
= clk_get_rate(spi_imx
->clk_per
);
1668 * Only validated on i.mx35 and i.mx6 now, can remove the constraint
1669 * if validated on other chips.
1671 if (spi_imx
->devtype_data
->has_dmamode
) {
1672 ret
= spi_imx_sdma_init(&pdev
->dev
, spi_imx
, master
);
1673 if (ret
== -EPROBE_DEFER
)
1674 goto out_runtime_pm_put
;
1677 dev_dbg(&pdev
->dev
, "dma setup error %d, use pio\n",
1681 spi_imx
->devtype_data
->reset(spi_imx
);
1683 spi_imx
->devtype_data
->intctrl(spi_imx
, 0);
1685 master
->dev
.of_node
= pdev
->dev
.of_node
;
1686 ret
= spi_bitbang_start(&spi_imx
->bitbang
);
1688 dev_err(&pdev
->dev
, "bitbang start failed with %d\n", ret
);
1689 goto out_bitbang_start
;
1692 pm_runtime_mark_last_busy(spi_imx
->dev
);
1693 pm_runtime_put_autosuspend(spi_imx
->dev
);
1698 if (spi_imx
->devtype_data
->has_dmamode
)
1699 spi_imx_sdma_exit(spi_imx
);
1701 pm_runtime_dont_use_autosuspend(spi_imx
->dev
);
1702 pm_runtime_set_suspended(&pdev
->dev
);
1703 pm_runtime_disable(spi_imx
->dev
);
1705 clk_disable_unprepare(spi_imx
->clk_ipg
);
1707 clk_disable_unprepare(spi_imx
->clk_per
);
1709 spi_master_put(master
);
1714 static int spi_imx_remove(struct platform_device
*pdev
)
1716 struct spi_master
*master
= platform_get_drvdata(pdev
);
1717 struct spi_imx_data
*spi_imx
= spi_master_get_devdata(master
);
1720 spi_bitbang_stop(&spi_imx
->bitbang
);
1722 ret
= pm_runtime_get_sync(spi_imx
->dev
);
1724 pm_runtime_put_noidle(spi_imx
->dev
);
1725 dev_err(spi_imx
->dev
, "failed to enable clock\n");
1729 writel(0, spi_imx
->base
+ MXC_CSPICTRL
);
1731 pm_runtime_dont_use_autosuspend(spi_imx
->dev
);
1732 pm_runtime_put_sync(spi_imx
->dev
);
1733 pm_runtime_disable(spi_imx
->dev
);
1735 spi_imx_sdma_exit(spi_imx
);
1736 spi_master_put(master
);
1741 static int __maybe_unused
spi_imx_runtime_resume(struct device
*dev
)
1743 struct spi_master
*master
= dev_get_drvdata(dev
);
1744 struct spi_imx_data
*spi_imx
;
1747 spi_imx
= spi_master_get_devdata(master
);
1749 ret
= clk_prepare_enable(spi_imx
->clk_per
);
1753 ret
= clk_prepare_enable(spi_imx
->clk_ipg
);
1755 clk_disable_unprepare(spi_imx
->clk_per
);
1762 static int __maybe_unused
spi_imx_runtime_suspend(struct device
*dev
)
1764 struct spi_master
*master
= dev_get_drvdata(dev
);
1765 struct spi_imx_data
*spi_imx
;
1767 spi_imx
= spi_master_get_devdata(master
);
1769 clk_disable_unprepare(spi_imx
->clk_per
);
1770 clk_disable_unprepare(spi_imx
->clk_ipg
);
1775 static int __maybe_unused
spi_imx_suspend(struct device
*dev
)
1777 pinctrl_pm_select_sleep_state(dev
);
1781 static int __maybe_unused
spi_imx_resume(struct device
*dev
)
1783 pinctrl_pm_select_default_state(dev
);
1787 static const struct dev_pm_ops imx_spi_pm
= {
1788 SET_RUNTIME_PM_OPS(spi_imx_runtime_suspend
,
1789 spi_imx_runtime_resume
, NULL
)
1790 SET_SYSTEM_SLEEP_PM_OPS(spi_imx_suspend
, spi_imx_resume
)
1793 static struct platform_driver spi_imx_driver
= {
1795 .name
= DRIVER_NAME
,
1796 .of_match_table
= spi_imx_dt_ids
,
1799 .probe
= spi_imx_probe
,
1800 .remove
= spi_imx_remove
,
1802 module_platform_driver(spi_imx_driver
);
1804 MODULE_DESCRIPTION("SPI Controller driver");
1805 MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1806 MODULE_LICENSE("GPL");
1807 MODULE_ALIAS("platform:" DRIVER_NAME
);