1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/completion.h>
17 #include <linux/spinlock.h>
18 #include <linux/err.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/spi/spi.h>
23 #include <lantiq_soc.h>
26 #define LTQ_SPI_RX_IRQ_NAME "spi_rx"
27 #define LTQ_SPI_TX_IRQ_NAME "spi_tx"
28 #define LTQ_SPI_ERR_IRQ_NAME "spi_err"
29 #define LTQ_SPI_FRM_IRQ_NAME "spi_frm"
31 #define LTQ_SPI_CLC 0x00
32 #define LTQ_SPI_PISEL 0x04
33 #define LTQ_SPI_ID 0x08
34 #define LTQ_SPI_CON 0x10
35 #define LTQ_SPI_STAT 0x14
36 #define LTQ_SPI_WHBSTATE 0x18
37 #define LTQ_SPI_TB 0x20
38 #define LTQ_SPI_RB 0x24
39 #define LTQ_SPI_RXFCON 0x30
40 #define LTQ_SPI_TXFCON 0x34
41 #define LTQ_SPI_FSTAT 0x38
42 #define LTQ_SPI_BRT 0x40
43 #define LTQ_SPI_BRSTAT 0x44
44 #define LTQ_SPI_SFCON 0x60
45 #define LTQ_SPI_SFSTAT 0x64
46 #define LTQ_SPI_GPOCON 0x70
47 #define LTQ_SPI_GPOSTAT 0x74
48 #define LTQ_SPI_FPGO 0x78
49 #define LTQ_SPI_RXREQ 0x80
50 #define LTQ_SPI_RXCNT 0x84
51 #define LTQ_SPI_DMACON 0xec
52 #define LTQ_SPI_IRNEN 0xf4
54 #define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
55 #define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S)
56 #define LTQ_SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
57 #define LTQ_SPI_CLC_RMC_M (0xFF << LTQ_SPI_CLC_RMC_S)
58 #define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
59 #define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
61 #define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
62 #define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
63 #define LTQ_SPI_ID_MOD_S 8 /* Module ID */
64 #define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S)
65 #define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */
66 #define LTQ_SPI_ID_CFG_M (1 << LTQ_SPI_ID_CFG_S)
67 #define LTQ_SPI_ID_REV_M 0x1F /* Hardware revision number */
69 #define LTQ_SPI_CON_BM_S 16 /* Data width selection */
70 #define LTQ_SPI_CON_BM_M (0x1F << LTQ_SPI_CON_BM_S)
71 #define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
72 #define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
73 #define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
74 #define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
75 #define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
76 #define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
77 #define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
78 #define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
79 #define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
80 #define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
81 #define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
82 #define LTQ_SPI_CON_HB BIT(4) /* Heading control */
83 #define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
84 #define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
86 #define LTQ_SPI_STAT_RXBV_S 28
87 #define LTQ_SPI_STAT_RXBV_M (0x7 << LTQ_SPI_STAT_RXBV_S)
88 #define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
89 #define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
90 #define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
91 #define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
92 #define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
93 #define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
94 #define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */
95 #define LTQ_SPI_STAT_MS BIT(1) /* Host/target select bit */
96 #define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
97 #define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
98 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
99 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
101 #define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
102 #define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
103 #define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
104 #define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
105 #define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
106 #define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
107 #define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
108 #define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
109 #define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
110 #define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
111 #define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
112 #define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
113 #define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set host select bit */
114 #define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear host select bit */
115 #define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
116 #define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
117 #define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \
118 LTQ_SPI_WHBSTATE_CLRME | \
119 LTQ_SPI_WHBSTATE_CLRTE | \
120 LTQ_SPI_WHBSTATE_CLRRE | \
121 LTQ_SPI_WHBSTATE_CLRAE | \
122 LTQ_SPI_WHBSTATE_CLRTUE)
124 #define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
125 #define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
126 #define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
128 #define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
129 #define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
130 #define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
132 #define LTQ_SPI_FSTAT_RXFFL_S 0
133 #define LTQ_SPI_FSTAT_TXFFL_S 8
135 #define LTQ_SPI_GPOCON_ISCSBN_S 8
136 #define LTQ_SPI_GPOCON_INVOUTN_S 0
138 #define LTQ_SPI_FGPO_SETOUTN_S 8
139 #define LTQ_SPI_FGPO_CLROUTN_S 0
141 #define LTQ_SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
142 #define LTQ_SPI_RXCNT_TODO_M 0xFFFF /* Receive to-do value */
144 #define LTQ_SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
145 #define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
146 #define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
147 #define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
148 #define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
149 #define LTQ_SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
150 #define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
151 #define LTQ_SPI_IRNEN_ALL 0x1F
153 struct lantiq_ssc_spi
;
155 struct lantiq_ssc_hwcfg
{
156 int (*cfg_irq
)(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
);
157 unsigned int irnen_r
;
158 unsigned int irnen_t
;
165 struct lantiq_ssc_spi
{
166 struct spi_controller
*host
;
168 void __iomem
*regbase
;
171 const struct lantiq_ssc_hwcfg
*hwcfg
;
174 struct workqueue_struct
*wq
;
175 struct work_struct work
;
179 unsigned int tx_todo
;
180 unsigned int rx_todo
;
181 unsigned int bits_per_word
;
182 unsigned int speed_hz
;
183 unsigned int tx_fifo_size
;
184 unsigned int rx_fifo_size
;
185 unsigned int base_cs
;
186 unsigned int fdx_tx_level
;
189 static u32
lantiq_ssc_readl(const struct lantiq_ssc_spi
*spi
, u32 reg
)
191 return __raw_readl(spi
->regbase
+ reg
);
194 static void lantiq_ssc_writel(const struct lantiq_ssc_spi
*spi
, u32 val
,
197 __raw_writel(val
, spi
->regbase
+ reg
);
200 static void lantiq_ssc_maskl(const struct lantiq_ssc_spi
*spi
, u32 clr
,
203 u32 val
= __raw_readl(spi
->regbase
+ reg
);
207 __raw_writel(val
, spi
->regbase
+ reg
);
210 static unsigned int tx_fifo_level(const struct lantiq_ssc_spi
*spi
)
212 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
213 u32 fstat
= lantiq_ssc_readl(spi
, LTQ_SPI_FSTAT
);
215 return (fstat
>> LTQ_SPI_FSTAT_TXFFL_S
) & hwcfg
->fifo_size_mask
;
218 static unsigned int rx_fifo_level(const struct lantiq_ssc_spi
*spi
)
220 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
221 u32 fstat
= lantiq_ssc_readl(spi
, LTQ_SPI_FSTAT
);
223 return (fstat
>> LTQ_SPI_FSTAT_RXFFL_S
) & hwcfg
->fifo_size_mask
;
226 static unsigned int tx_fifo_free(const struct lantiq_ssc_spi
*spi
)
228 return spi
->tx_fifo_size
- tx_fifo_level(spi
);
231 static void rx_fifo_reset(const struct lantiq_ssc_spi
*spi
)
233 u32 val
= spi
->rx_fifo_size
<< LTQ_SPI_RXFCON_RXFITL_S
;
235 val
|= LTQ_SPI_RXFCON_RXFEN
| LTQ_SPI_RXFCON_RXFLU
;
236 lantiq_ssc_writel(spi
, val
, LTQ_SPI_RXFCON
);
239 static void tx_fifo_reset(const struct lantiq_ssc_spi
*spi
)
241 u32 val
= 1 << LTQ_SPI_TXFCON_TXFITL_S
;
243 val
|= LTQ_SPI_TXFCON_TXFEN
| LTQ_SPI_TXFCON_TXFLU
;
244 lantiq_ssc_writel(spi
, val
, LTQ_SPI_TXFCON
);
247 static void rx_fifo_flush(const struct lantiq_ssc_spi
*spi
)
249 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_RXFCON_RXFLU
, LTQ_SPI_RXFCON
);
252 static void tx_fifo_flush(const struct lantiq_ssc_spi
*spi
)
254 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_TXFCON_TXFLU
, LTQ_SPI_TXFCON
);
257 static void hw_enter_config_mode(const struct lantiq_ssc_spi
*spi
)
259 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_CLREN
, LTQ_SPI_WHBSTATE
);
262 static void hw_enter_active_mode(const struct lantiq_ssc_spi
*spi
)
264 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_SETEN
, LTQ_SPI_WHBSTATE
);
267 static void hw_setup_speed_hz(const struct lantiq_ssc_spi
*spi
,
268 unsigned int max_speed_hz
)
273 * SPI module clock is derived from FPI bus clock dependent on
274 * divider value in CLC.RMS which is always set to 1.
277 * baudrate = --------------
280 spi_clk
= clk_get_rate(spi
->fpi_clk
) / 2;
282 if (max_speed_hz
> spi_clk
)
285 brt
= spi_clk
/ max_speed_hz
- 1;
290 dev_dbg(spi
->dev
, "spi_clk %u, max_speed_hz %u, brt %u\n",
291 spi_clk
, max_speed_hz
, brt
);
293 lantiq_ssc_writel(spi
, brt
, LTQ_SPI_BRT
);
296 static void hw_setup_bits_per_word(const struct lantiq_ssc_spi
*spi
,
297 unsigned int bits_per_word
)
301 /* CON.BM value = bits_per_word - 1 */
302 bm
= (bits_per_word
- 1) << LTQ_SPI_CON_BM_S
;
304 lantiq_ssc_maskl(spi
, LTQ_SPI_CON_BM_M
, bm
, LTQ_SPI_CON
);
307 static void hw_setup_clock_mode(const struct lantiq_ssc_spi
*spi
,
310 u32 con_set
= 0, con_clr
= 0;
313 * SPI mode mapping in CON register:
314 * Mode CPOL CPHA CON.PO CON.PH
321 con_clr
|= LTQ_SPI_CON_PH
;
323 con_set
|= LTQ_SPI_CON_PH
;
326 con_set
|= LTQ_SPI_CON_PO
| LTQ_SPI_CON_IDLE
;
328 con_clr
|= LTQ_SPI_CON_PO
| LTQ_SPI_CON_IDLE
;
330 /* Set heading control */
331 if (mode
& SPI_LSB_FIRST
)
332 con_clr
|= LTQ_SPI_CON_HB
;
334 con_set
|= LTQ_SPI_CON_HB
;
336 /* Set loopback mode */
338 con_set
|= LTQ_SPI_CON_LB
;
340 con_clr
|= LTQ_SPI_CON_LB
;
342 lantiq_ssc_maskl(spi
, con_clr
, con_set
, LTQ_SPI_CON
);
345 static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi
*spi
)
347 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
350 * Set clock divider for run mode to 1 to
351 * run at same frequency as FPI bus
353 lantiq_ssc_writel(spi
, 1 << LTQ_SPI_CLC_RMC_S
, LTQ_SPI_CLC
);
355 /* Put controller into config mode */
356 hw_enter_config_mode(spi
);
358 /* Clear error flags */
359 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS
, LTQ_SPI_WHBSTATE
);
361 /* Enable error checking, disable TX/RX */
362 lantiq_ssc_writel(spi
, LTQ_SPI_CON_RUEN
| LTQ_SPI_CON_AEN
|
363 LTQ_SPI_CON_TEN
| LTQ_SPI_CON_REN
| LTQ_SPI_CON_TXOFF
|
364 LTQ_SPI_CON_RXOFF
, LTQ_SPI_CON
);
366 /* Setup default SPI mode */
367 hw_setup_bits_per_word(spi
, spi
->bits_per_word
);
368 hw_setup_clock_mode(spi
, SPI_MODE_0
);
370 /* Enable host mode and clear error flags */
371 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_SETMS
|
372 LTQ_SPI_WHBSTATE_CLR_ERRORS
,
375 /* Reset GPIO/CS registers */
376 lantiq_ssc_writel(spi
, 0, LTQ_SPI_GPOCON
);
377 lantiq_ssc_writel(spi
, 0xFF00, LTQ_SPI_FPGO
);
379 /* Enable and flush FIFOs */
383 /* Enable interrupts */
384 lantiq_ssc_writel(spi
, hwcfg
->irnen_t
| hwcfg
->irnen_r
|
385 LTQ_SPI_IRNEN_E
, LTQ_SPI_IRNEN
);
388 static int lantiq_ssc_setup(struct spi_device
*spidev
)
390 struct spi_controller
*host
= spidev
->controller
;
391 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(host
);
392 unsigned int cs
= spi_get_chipselect(spidev
, 0);
395 /* GPIOs are used for CS */
396 if (spi_get_csgpiod(spidev
, 0))
399 dev_dbg(spi
->dev
, "using internal chipselect %u\n", cs
);
401 if (cs
< spi
->base_cs
) {
403 "chipselect %i too small (min %i)\n", cs
, spi
->base_cs
);
407 /* set GPO pin to CS mode */
408 gpocon
= 1 << ((cs
- spi
->base_cs
) + LTQ_SPI_GPOCON_ISCSBN_S
);
411 if (spidev
->mode
& SPI_CS_HIGH
)
412 gpocon
|= 1 << (cs
- spi
->base_cs
);
414 lantiq_ssc_maskl(spi
, 0, gpocon
, LTQ_SPI_GPOCON
);
419 static int lantiq_ssc_prepare_message(struct spi_controller
*host
,
420 struct spi_message
*message
)
422 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(host
);
424 hw_enter_config_mode(spi
);
425 hw_setup_clock_mode(spi
, message
->spi
->mode
);
426 hw_enter_active_mode(spi
);
431 static void hw_setup_transfer(struct lantiq_ssc_spi
*spi
,
432 struct spi_device
*spidev
, struct spi_transfer
*t
)
434 unsigned int speed_hz
= t
->speed_hz
;
435 unsigned int bits_per_word
= t
->bits_per_word
;
438 if (bits_per_word
!= spi
->bits_per_word
||
439 speed_hz
!= spi
->speed_hz
) {
440 hw_enter_config_mode(spi
);
441 hw_setup_speed_hz(spi
, speed_hz
);
442 hw_setup_bits_per_word(spi
, bits_per_word
);
443 hw_enter_active_mode(spi
);
445 spi
->speed_hz
= speed_hz
;
446 spi
->bits_per_word
= bits_per_word
;
449 /* Configure transmitter and receiver */
450 con
= lantiq_ssc_readl(spi
, LTQ_SPI_CON
);
452 con
&= ~LTQ_SPI_CON_TXOFF
;
454 con
|= LTQ_SPI_CON_TXOFF
;
457 con
&= ~LTQ_SPI_CON_RXOFF
;
459 con
|= LTQ_SPI_CON_RXOFF
;
461 lantiq_ssc_writel(spi
, con
, LTQ_SPI_CON
);
464 static int lantiq_ssc_unprepare_message(struct spi_controller
*host
,
465 struct spi_message
*message
)
467 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(host
);
469 flush_workqueue(spi
->wq
);
471 /* Disable transmitter and receiver while idle */
472 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_CON_TXOFF
| LTQ_SPI_CON_RXOFF
,
478 static void tx_fifo_write(struct lantiq_ssc_spi
*spi
)
484 unsigned int tx_free
= tx_fifo_free(spi
);
486 spi
->fdx_tx_level
= 0;
487 while (spi
->tx_todo
&& tx_free
) {
488 switch (spi
->bits_per_word
) {
496 tx16
= (u16
*) spi
->tx
;
502 tx32
= (u32
*) spi
->tx
;
513 lantiq_ssc_writel(spi
, data
, LTQ_SPI_TB
);
519 static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi
*spi
)
525 unsigned int rx_fill
= rx_fifo_level(spi
);
528 * Wait until all expected data to be shifted in.
529 * Otherwise, rx overrun may occur.
531 while (rx_fill
!= spi
->fdx_tx_level
)
532 rx_fill
= rx_fifo_level(spi
);
535 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
537 switch (spi
->bits_per_word
) {
545 rx16
= (u16
*) spi
->rx
;
551 rx32
= (u32
*) spi
->rx
;
565 static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi
*spi
)
569 unsigned int rxbv
, shift
;
570 unsigned int rx_fill
= rx_fifo_level(spi
);
573 * In RX-only mode the bits per word value is ignored by HW. A value
574 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
575 * If remaining RX bytes are less than 4, the FIFO must be read
576 * differently. The amount of received and valid bytes is indicated
577 * by STAT.RXBV register value.
580 if (spi
->rx_todo
< 4) {
581 rxbv
= (lantiq_ssc_readl(spi
, LTQ_SPI_STAT
) &
582 LTQ_SPI_STAT_RXBV_M
) >> LTQ_SPI_STAT_RXBV_S
;
583 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
585 shift
= (rxbv
- 1) * 8;
589 *rx8
++ = (data
>> shift
) & 0xFF;
596 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
597 rx32
= (u32
*) spi
->rx
;
607 static void rx_request(struct lantiq_ssc_spi
*spi
)
609 unsigned int rxreq
, rxreq_max
;
612 * To avoid receive overflows at high clocks it is better to request
613 * only the amount of bytes that fits into all FIFOs. This value
614 * depends on the FIFO size implemented in hardware.
616 rxreq
= spi
->rx_todo
;
617 rxreq_max
= spi
->rx_fifo_size
* 4;
618 if (rxreq
> rxreq_max
)
621 lantiq_ssc_writel(spi
, rxreq
, LTQ_SPI_RXREQ
);
624 static irqreturn_t
lantiq_ssc_xmit_interrupt(int irq
, void *data
)
626 struct lantiq_ssc_spi
*spi
= data
;
627 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
628 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
630 spin_lock(&spi
->lock
);
632 lantiq_ssc_writel(spi
, val
, hwcfg
->irncr
);
635 if (spi
->rx
&& spi
->rx_todo
)
636 rx_fifo_read_full_duplex(spi
);
640 else if (!tx_fifo_level(spi
))
642 } else if (spi
->rx
) {
644 rx_fifo_read_half_duplex(spi
);
655 spin_unlock(&spi
->lock
);
659 queue_work(spi
->wq
, &spi
->work
);
660 spin_unlock(&spi
->lock
);
665 static irqreturn_t
lantiq_ssc_err_interrupt(int irq
, void *data
)
667 struct lantiq_ssc_spi
*spi
= data
;
668 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
669 u32 stat
= lantiq_ssc_readl(spi
, LTQ_SPI_STAT
);
670 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
672 if (!(stat
& LTQ_SPI_STAT_ERRORS
))
675 spin_lock(&spi
->lock
);
677 lantiq_ssc_writel(spi
, val
, hwcfg
->irncr
);
679 if (stat
& LTQ_SPI_STAT_RUE
)
680 dev_err(spi
->dev
, "receive underflow error\n");
681 if (stat
& LTQ_SPI_STAT_TUE
)
682 dev_err(spi
->dev
, "transmit underflow error\n");
683 if (stat
& LTQ_SPI_STAT_AE
)
684 dev_err(spi
->dev
, "abort error\n");
685 if (stat
& LTQ_SPI_STAT_RE
)
686 dev_err(spi
->dev
, "receive overflow error\n");
687 if (stat
& LTQ_SPI_STAT_TE
)
688 dev_err(spi
->dev
, "transmit overflow error\n");
689 if (stat
& LTQ_SPI_STAT_ME
)
690 dev_err(spi
->dev
, "mode error\n");
692 /* Clear error flags */
693 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS
, LTQ_SPI_WHBSTATE
);
695 /* set bad status so it can be retried */
696 if (spi
->host
->cur_msg
)
697 spi
->host
->cur_msg
->status
= -EIO
;
698 queue_work(spi
->wq
, &spi
->work
);
699 spin_unlock(&spi
->lock
);
704 static irqreturn_t
intel_lgm_ssc_isr(int irq
, void *data
)
706 struct lantiq_ssc_spi
*spi
= data
;
707 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
708 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
710 if (!(val
& LTQ_SPI_IRNEN_ALL
))
713 if (val
& LTQ_SPI_IRNEN_E
)
714 return lantiq_ssc_err_interrupt(irq
, data
);
716 if ((val
& hwcfg
->irnen_t
) || (val
& hwcfg
->irnen_r
))
717 return lantiq_ssc_xmit_interrupt(irq
, data
);
722 static int transfer_start(struct lantiq_ssc_spi
*spi
, struct spi_device
*spidev
,
723 struct spi_transfer
*t
)
727 spin_lock_irqsave(&spi
->lock
, flags
);
733 spi
->tx_todo
= t
->len
;
735 /* initially fill TX FIFO */
740 spi
->rx_todo
= t
->len
;
742 /* start shift clock in RX-only mode */
747 spin_unlock_irqrestore(&spi
->lock
, flags
);
753 * The driver only gets an interrupt when the FIFO is empty, but there
754 * is an additional shift register from which the data is written to
755 * the wire. We get the last interrupt when the controller starts to
756 * write the last word to the wire, not when it is finished. Do busy
757 * waiting till it finishes.
759 static void lantiq_ssc_bussy_work(struct work_struct
*work
)
761 struct lantiq_ssc_spi
*spi
;
762 unsigned long long timeout
= 8LL * 1000LL;
765 spi
= container_of(work
, typeof(*spi
), work
);
767 do_div(timeout
, spi
->speed_hz
);
768 timeout
+= timeout
+ 100; /* some tolerance */
770 end
= jiffies
+ msecs_to_jiffies(timeout
);
772 u32 stat
= lantiq_ssc_readl(spi
, LTQ_SPI_STAT
);
774 if (!(stat
& LTQ_SPI_STAT_BSY
)) {
775 spi_finalize_current_transfer(spi
->host
);
780 } while (!time_after_eq(jiffies
, end
));
782 if (spi
->host
->cur_msg
)
783 spi
->host
->cur_msg
->status
= -EIO
;
784 spi_finalize_current_transfer(spi
->host
);
787 static void lantiq_ssc_handle_err(struct spi_controller
*host
,
788 struct spi_message
*message
)
790 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(host
);
792 /* flush FIFOs on timeout */
797 static void lantiq_ssc_set_cs(struct spi_device
*spidev
, bool enable
)
799 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(spidev
->controller
);
800 unsigned int cs
= spi_get_chipselect(spidev
, 0);
803 if (!!(spidev
->mode
& SPI_CS_HIGH
) == enable
)
804 fgpo
= (1 << (cs
- spi
->base_cs
));
806 fgpo
= (1 << (cs
- spi
->base_cs
+ LTQ_SPI_FGPO_SETOUTN_S
));
808 lantiq_ssc_writel(spi
, fgpo
, LTQ_SPI_FPGO
);
811 static int lantiq_ssc_transfer_one(struct spi_controller
*host
,
812 struct spi_device
*spidev
,
813 struct spi_transfer
*t
)
815 struct lantiq_ssc_spi
*spi
= spi_controller_get_devdata(host
);
817 hw_setup_transfer(spi
, spidev
, t
);
819 return transfer_start(spi
, spidev
, t
);
822 static int intel_lgm_cfg_irq(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
)
826 irq
= platform_get_irq(pdev
, 0);
830 return devm_request_irq(&pdev
->dev
, irq
, intel_lgm_ssc_isr
, 0, "spi", spi
);
833 static int lantiq_cfg_irq(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
)
837 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_RX_IRQ_NAME
);
841 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_xmit_interrupt
,
842 0, LTQ_SPI_RX_IRQ_NAME
, spi
);
846 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_TX_IRQ_NAME
);
850 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_xmit_interrupt
,
851 0, LTQ_SPI_TX_IRQ_NAME
, spi
);
856 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_ERR_IRQ_NAME
);
860 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_err_interrupt
,
861 0, LTQ_SPI_ERR_IRQ_NAME
, spi
);
865 static const struct lantiq_ssc_hwcfg lantiq_ssc_xway
= {
866 .cfg_irq
= lantiq_cfg_irq
,
867 .irnen_r
= LTQ_SPI_IRNEN_R_XWAY
,
868 .irnen_t
= LTQ_SPI_IRNEN_T_XWAY
,
871 .fifo_size_mask
= GENMASK(5, 0),
875 static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx
= {
876 .cfg_irq
= lantiq_cfg_irq
,
877 .irnen_r
= LTQ_SPI_IRNEN_R_XRX
,
878 .irnen_t
= LTQ_SPI_IRNEN_T_XRX
,
881 .fifo_size_mask
= GENMASK(5, 0),
885 static const struct lantiq_ssc_hwcfg intel_ssc_lgm
= {
886 .cfg_irq
= intel_lgm_cfg_irq
,
887 .irnen_r
= LTQ_SPI_IRNEN_R_XRX
,
888 .irnen_t
= LTQ_SPI_IRNEN_T_XRX
,
891 .fifo_size_mask
= GENMASK(7, 0),
895 static const struct of_device_id lantiq_ssc_match
[] = {
896 { .compatible
= "lantiq,ase-spi", .data
= &lantiq_ssc_xway
, },
897 { .compatible
= "lantiq,falcon-spi", .data
= &lantiq_ssc_xrx
, },
898 { .compatible
= "lantiq,xrx100-spi", .data
= &lantiq_ssc_xrx
, },
899 { .compatible
= "intel,lgm-spi", .data
= &intel_ssc_lgm
, },
902 MODULE_DEVICE_TABLE(of
, lantiq_ssc_match
);
904 static int lantiq_ssc_probe(struct platform_device
*pdev
)
906 struct device
*dev
= &pdev
->dev
;
907 struct spi_controller
*host
;
908 struct lantiq_ssc_spi
*spi
;
909 const struct lantiq_ssc_hwcfg
*hwcfg
;
910 u32 id
, supports_dma
, revision
;
914 hwcfg
= of_device_get_match_data(dev
);
916 host
= spi_alloc_host(dev
, sizeof(struct lantiq_ssc_spi
));
920 spi
= spi_controller_get_devdata(host
);
924 platform_set_drvdata(pdev
, spi
);
925 spi
->regbase
= devm_platform_ioremap_resource(pdev
, 0);
926 if (IS_ERR(spi
->regbase
)) {
927 err
= PTR_ERR(spi
->regbase
);
931 err
= hwcfg
->cfg_irq(pdev
, spi
);
935 spi
->spi_clk
= devm_clk_get_enabled(dev
, "gate");
936 if (IS_ERR(spi
->spi_clk
)) {
937 err
= PTR_ERR(spi
->spi_clk
);
942 * Use the old clk_get_fpi() function on Lantiq platform, till it
943 * supports common clk.
945 #if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
946 spi
->fpi_clk
= clk_get_fpi();
948 spi
->fpi_clk
= clk_get(dev
, "freq");
950 if (IS_ERR(spi
->fpi_clk
)) {
951 err
= PTR_ERR(spi
->fpi_clk
);
956 of_property_read_u32(pdev
->dev
.of_node
, "num-cs", &num_cs
);
959 of_property_read_u32(pdev
->dev
.of_node
, "base-cs", &spi
->base_cs
);
961 spin_lock_init(&spi
->lock
);
962 spi
->bits_per_word
= 8;
965 host
->dev
.of_node
= pdev
->dev
.of_node
;
966 host
->num_chipselect
= num_cs
;
967 host
->use_gpio_descriptors
= true;
968 host
->setup
= lantiq_ssc_setup
;
969 host
->set_cs
= lantiq_ssc_set_cs
;
970 host
->handle_err
= lantiq_ssc_handle_err
;
971 host
->prepare_message
= lantiq_ssc_prepare_message
;
972 host
->unprepare_message
= lantiq_ssc_unprepare_message
;
973 host
->transfer_one
= lantiq_ssc_transfer_one
;
974 host
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LSB_FIRST
| SPI_CS_HIGH
|
976 host
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(2, 8) |
977 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
979 spi
->wq
= alloc_ordered_workqueue(dev_name(dev
), WQ_MEM_RECLAIM
);
984 INIT_WORK(&spi
->work
, lantiq_ssc_bussy_work
);
986 id
= lantiq_ssc_readl(spi
, LTQ_SPI_ID
);
987 spi
->tx_fifo_size
= (id
>> LTQ_SPI_ID_TXFS_S
) & hwcfg
->fifo_size_mask
;
988 spi
->rx_fifo_size
= (id
>> LTQ_SPI_ID_RXFS_S
) & hwcfg
->fifo_size_mask
;
989 supports_dma
= (id
& LTQ_SPI_ID_CFG_M
) >> LTQ_SPI_ID_CFG_S
;
990 revision
= id
& LTQ_SPI_ID_REV_M
;
992 lantiq_ssc_hw_init(spi
);
995 "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
996 revision
, spi
->tx_fifo_size
, spi
->rx_fifo_size
, supports_dma
);
998 err
= devm_spi_register_controller(dev
, host
);
1000 dev_err(dev
, "failed to register spi host\n");
1001 goto err_wq_destroy
;
1007 destroy_workqueue(spi
->wq
);
1009 clk_put(spi
->fpi_clk
);
1011 spi_controller_put(host
);
1016 static void lantiq_ssc_remove(struct platform_device
*pdev
)
1018 struct lantiq_ssc_spi
*spi
= platform_get_drvdata(pdev
);
1020 lantiq_ssc_writel(spi
, 0, LTQ_SPI_IRNEN
);
1021 lantiq_ssc_writel(spi
, 0, LTQ_SPI_CLC
);
1024 hw_enter_config_mode(spi
);
1026 destroy_workqueue(spi
->wq
);
1027 clk_put(spi
->fpi_clk
);
1030 static struct platform_driver lantiq_ssc_driver
= {
1031 .probe
= lantiq_ssc_probe
,
1032 .remove
= lantiq_ssc_remove
,
1034 .name
= "spi-lantiq-ssc",
1035 .of_match_table
= lantiq_ssc_match
,
1038 module_platform_driver(lantiq_ssc_driver
);
1040 MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
1041 MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1042 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1043 MODULE_LICENSE("GPL");
1044 MODULE_ALIAS("platform:spi-lantiq-ssc");