1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/of_device.h>
10 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/sched.h>
15 #include <linux/completion.h>
16 #include <linux/spinlock.h>
17 #include <linux/err.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/spi/spi.h>
22 #include <lantiq_soc.h>
25 #define LTQ_SPI_RX_IRQ_NAME "spi_rx"
26 #define LTQ_SPI_TX_IRQ_NAME "spi_tx"
27 #define LTQ_SPI_ERR_IRQ_NAME "spi_err"
28 #define LTQ_SPI_FRM_IRQ_NAME "spi_frm"
30 #define LTQ_SPI_CLC 0x00
31 #define LTQ_SPI_PISEL 0x04
32 #define LTQ_SPI_ID 0x08
33 #define LTQ_SPI_CON 0x10
34 #define LTQ_SPI_STAT 0x14
35 #define LTQ_SPI_WHBSTATE 0x18
36 #define LTQ_SPI_TB 0x20
37 #define LTQ_SPI_RB 0x24
38 #define LTQ_SPI_RXFCON 0x30
39 #define LTQ_SPI_TXFCON 0x34
40 #define LTQ_SPI_FSTAT 0x38
41 #define LTQ_SPI_BRT 0x40
42 #define LTQ_SPI_BRSTAT 0x44
43 #define LTQ_SPI_SFCON 0x60
44 #define LTQ_SPI_SFSTAT 0x64
45 #define LTQ_SPI_GPOCON 0x70
46 #define LTQ_SPI_GPOSTAT 0x74
47 #define LTQ_SPI_FPGO 0x78
48 #define LTQ_SPI_RXREQ 0x80
49 #define LTQ_SPI_RXCNT 0x84
50 #define LTQ_SPI_DMACON 0xec
51 #define LTQ_SPI_IRNEN 0xf4
53 #define LTQ_SPI_CLC_SMC_S 16 /* Clock divider for sleep mode */
54 #define LTQ_SPI_CLC_SMC_M (0xFF << LTQ_SPI_CLC_SMC_S)
55 #define LTQ_SPI_CLC_RMC_S 8 /* Clock divider for normal run mode */
56 #define LTQ_SPI_CLC_RMC_M (0xFF << LTQ_SPI_CLC_RMC_S)
57 #define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
58 #define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
60 #define LTQ_SPI_ID_TXFS_S 24 /* Implemented TX FIFO size */
61 #define LTQ_SPI_ID_RXFS_S 16 /* Implemented RX FIFO size */
62 #define LTQ_SPI_ID_MOD_S 8 /* Module ID */
63 #define LTQ_SPI_ID_MOD_M (0xff << LTQ_SPI_ID_MOD_S)
64 #define LTQ_SPI_ID_CFG_S 5 /* DMA interface support */
65 #define LTQ_SPI_ID_CFG_M (1 << LTQ_SPI_ID_CFG_S)
66 #define LTQ_SPI_ID_REV_M 0x1F /* Hardware revision number */
68 #define LTQ_SPI_CON_BM_S 16 /* Data width selection */
69 #define LTQ_SPI_CON_BM_M (0x1F << LTQ_SPI_CON_BM_S)
70 #define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
71 #define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
72 #define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
73 #define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
74 #define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
75 #define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
76 #define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
77 #define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
78 #define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
79 #define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
80 #define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
81 #define LTQ_SPI_CON_HB BIT(4) /* Heading control */
82 #define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
83 #define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
85 #define LTQ_SPI_STAT_RXBV_S 28
86 #define LTQ_SPI_STAT_RXBV_M (0x7 << LTQ_SPI_STAT_RXBV_S)
87 #define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
88 #define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
89 #define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
90 #define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
91 #define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
92 #define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
93 #define LTQ_SPI_STAT_ME BIT(7) /* Mode error flag */
94 #define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
95 #define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
96 #define LTQ_SPI_STAT_ERRORS (LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
97 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
98 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
100 #define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
101 #define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
102 #define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
103 #define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
104 #define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
105 #define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
106 #define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
107 #define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
108 #define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
109 #define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
110 #define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
111 #define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
112 #define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
113 #define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
114 #define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
115 #define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
116 #define LTQ_SPI_WHBSTATE_CLR_ERRORS (LTQ_SPI_WHBSTATE_CLRRUE | \
117 LTQ_SPI_WHBSTATE_CLRME | \
118 LTQ_SPI_WHBSTATE_CLRTE | \
119 LTQ_SPI_WHBSTATE_CLRRE | \
120 LTQ_SPI_WHBSTATE_CLRAE | \
121 LTQ_SPI_WHBSTATE_CLRTUE)
123 #define LTQ_SPI_RXFCON_RXFITL_S 8 /* FIFO interrupt trigger level */
124 #define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
125 #define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
127 #define LTQ_SPI_TXFCON_TXFITL_S 8 /* FIFO interrupt trigger level */
128 #define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
129 #define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
131 #define LTQ_SPI_FSTAT_RXFFL_S 0
132 #define LTQ_SPI_FSTAT_TXFFL_S 8
134 #define LTQ_SPI_GPOCON_ISCSBN_S 8
135 #define LTQ_SPI_GPOCON_INVOUTN_S 0
137 #define LTQ_SPI_FGPO_SETOUTN_S 8
138 #define LTQ_SPI_FGPO_CLROUTN_S 0
140 #define LTQ_SPI_RXREQ_RXCNT_M 0xFFFF /* Receive count value */
141 #define LTQ_SPI_RXCNT_TODO_M 0xFFFF /* Recevie to-do value */
143 #define LTQ_SPI_IRNEN_TFI BIT(4) /* TX finished interrupt */
144 #define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
145 #define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
146 #define LTQ_SPI_IRNEN_T_XWAY BIT(1) /* Transmit end interrupt request */
147 #define LTQ_SPI_IRNEN_R_XWAY BIT(0) /* Receive end interrupt request */
148 #define LTQ_SPI_IRNEN_R_XRX BIT(1) /* Transmit end interrupt request */
149 #define LTQ_SPI_IRNEN_T_XRX BIT(0) /* Receive end interrupt request */
150 #define LTQ_SPI_IRNEN_ALL 0x1F
152 struct lantiq_ssc_spi
;
154 struct lantiq_ssc_hwcfg
{
155 int (*cfg_irq
)(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
);
156 unsigned int irnen_r
;
157 unsigned int irnen_t
;
164 struct lantiq_ssc_spi
{
165 struct spi_master
*master
;
167 void __iomem
*regbase
;
170 const struct lantiq_ssc_hwcfg
*hwcfg
;
173 struct workqueue_struct
*wq
;
174 struct work_struct work
;
178 unsigned int tx_todo
;
179 unsigned int rx_todo
;
180 unsigned int bits_per_word
;
181 unsigned int speed_hz
;
182 unsigned int tx_fifo_size
;
183 unsigned int rx_fifo_size
;
184 unsigned int base_cs
;
185 unsigned int fdx_tx_level
;
188 static u32
lantiq_ssc_readl(const struct lantiq_ssc_spi
*spi
, u32 reg
)
190 return __raw_readl(spi
->regbase
+ reg
);
193 static void lantiq_ssc_writel(const struct lantiq_ssc_spi
*spi
, u32 val
,
196 __raw_writel(val
, spi
->regbase
+ reg
);
199 static void lantiq_ssc_maskl(const struct lantiq_ssc_spi
*spi
, u32 clr
,
202 u32 val
= __raw_readl(spi
->regbase
+ reg
);
206 __raw_writel(val
, spi
->regbase
+ reg
);
209 static unsigned int tx_fifo_level(const struct lantiq_ssc_spi
*spi
)
211 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
212 u32 fstat
= lantiq_ssc_readl(spi
, LTQ_SPI_FSTAT
);
214 return (fstat
>> LTQ_SPI_FSTAT_TXFFL_S
) & hwcfg
->fifo_size_mask
;
217 static unsigned int rx_fifo_level(const struct lantiq_ssc_spi
*spi
)
219 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
220 u32 fstat
= lantiq_ssc_readl(spi
, LTQ_SPI_FSTAT
);
222 return (fstat
>> LTQ_SPI_FSTAT_RXFFL_S
) & hwcfg
->fifo_size_mask
;
225 static unsigned int tx_fifo_free(const struct lantiq_ssc_spi
*spi
)
227 return spi
->tx_fifo_size
- tx_fifo_level(spi
);
230 static void rx_fifo_reset(const struct lantiq_ssc_spi
*spi
)
232 u32 val
= spi
->rx_fifo_size
<< LTQ_SPI_RXFCON_RXFITL_S
;
234 val
|= LTQ_SPI_RXFCON_RXFEN
| LTQ_SPI_RXFCON_RXFLU
;
235 lantiq_ssc_writel(spi
, val
, LTQ_SPI_RXFCON
);
238 static void tx_fifo_reset(const struct lantiq_ssc_spi
*spi
)
240 u32 val
= 1 << LTQ_SPI_TXFCON_TXFITL_S
;
242 val
|= LTQ_SPI_TXFCON_TXFEN
| LTQ_SPI_TXFCON_TXFLU
;
243 lantiq_ssc_writel(spi
, val
, LTQ_SPI_TXFCON
);
246 static void rx_fifo_flush(const struct lantiq_ssc_spi
*spi
)
248 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_RXFCON_RXFLU
, LTQ_SPI_RXFCON
);
251 static void tx_fifo_flush(const struct lantiq_ssc_spi
*spi
)
253 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_TXFCON_TXFLU
, LTQ_SPI_TXFCON
);
256 static void hw_enter_config_mode(const struct lantiq_ssc_spi
*spi
)
258 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_CLREN
, LTQ_SPI_WHBSTATE
);
261 static void hw_enter_active_mode(const struct lantiq_ssc_spi
*spi
)
263 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_SETEN
, LTQ_SPI_WHBSTATE
);
266 static void hw_setup_speed_hz(const struct lantiq_ssc_spi
*spi
,
267 unsigned int max_speed_hz
)
272 * SPI module clock is derived from FPI bus clock dependent on
273 * divider value in CLC.RMS which is always set to 1.
276 * baudrate = --------------
279 spi_clk
= clk_get_rate(spi
->fpi_clk
) / 2;
281 if (max_speed_hz
> spi_clk
)
284 brt
= spi_clk
/ max_speed_hz
- 1;
289 dev_dbg(spi
->dev
, "spi_clk %u, max_speed_hz %u, brt %u\n",
290 spi_clk
, max_speed_hz
, brt
);
292 lantiq_ssc_writel(spi
, brt
, LTQ_SPI_BRT
);
295 static void hw_setup_bits_per_word(const struct lantiq_ssc_spi
*spi
,
296 unsigned int bits_per_word
)
300 /* CON.BM value = bits_per_word - 1 */
301 bm
= (bits_per_word
- 1) << LTQ_SPI_CON_BM_S
;
303 lantiq_ssc_maskl(spi
, LTQ_SPI_CON_BM_M
, bm
, LTQ_SPI_CON
);
306 static void hw_setup_clock_mode(const struct lantiq_ssc_spi
*spi
,
309 u32 con_set
= 0, con_clr
= 0;
312 * SPI mode mapping in CON register:
313 * Mode CPOL CPHA CON.PO CON.PH
320 con_clr
|= LTQ_SPI_CON_PH
;
322 con_set
|= LTQ_SPI_CON_PH
;
325 con_set
|= LTQ_SPI_CON_PO
| LTQ_SPI_CON_IDLE
;
327 con_clr
|= LTQ_SPI_CON_PO
| LTQ_SPI_CON_IDLE
;
329 /* Set heading control */
330 if (mode
& SPI_LSB_FIRST
)
331 con_clr
|= LTQ_SPI_CON_HB
;
333 con_set
|= LTQ_SPI_CON_HB
;
335 /* Set loopback mode */
337 con_set
|= LTQ_SPI_CON_LB
;
339 con_clr
|= LTQ_SPI_CON_LB
;
341 lantiq_ssc_maskl(spi
, con_clr
, con_set
, LTQ_SPI_CON
);
344 static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi
*spi
)
346 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
349 * Set clock divider for run mode to 1 to
350 * run at same frequency as FPI bus
352 lantiq_ssc_writel(spi
, 1 << LTQ_SPI_CLC_RMC_S
, LTQ_SPI_CLC
);
354 /* Put controller into config mode */
355 hw_enter_config_mode(spi
);
357 /* Clear error flags */
358 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS
, LTQ_SPI_WHBSTATE
);
360 /* Enable error checking, disable TX/RX */
361 lantiq_ssc_writel(spi
, LTQ_SPI_CON_RUEN
| LTQ_SPI_CON_AEN
|
362 LTQ_SPI_CON_TEN
| LTQ_SPI_CON_REN
| LTQ_SPI_CON_TXOFF
|
363 LTQ_SPI_CON_RXOFF
, LTQ_SPI_CON
);
365 /* Setup default SPI mode */
366 hw_setup_bits_per_word(spi
, spi
->bits_per_word
);
367 hw_setup_clock_mode(spi
, SPI_MODE_0
);
369 /* Enable master mode and clear error flags */
370 lantiq_ssc_writel(spi
, LTQ_SPI_WHBSTATE_SETMS
|
371 LTQ_SPI_WHBSTATE_CLR_ERRORS
,
374 /* Reset GPIO/CS registers */
375 lantiq_ssc_writel(spi
, 0, LTQ_SPI_GPOCON
);
376 lantiq_ssc_writel(spi
, 0xFF00, LTQ_SPI_FPGO
);
378 /* Enable and flush FIFOs */
382 /* Enable interrupts */
383 lantiq_ssc_writel(spi
, hwcfg
->irnen_t
| hwcfg
->irnen_r
|
384 LTQ_SPI_IRNEN_E
, LTQ_SPI_IRNEN
);
387 static int lantiq_ssc_setup(struct spi_device
*spidev
)
389 struct spi_master
*master
= spidev
->master
;
390 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(master
);
391 unsigned int cs
= spidev
->chip_select
;
394 /* GPIOs are used for CS */
395 if (spidev
->cs_gpiod
)
398 dev_dbg(spi
->dev
, "using internal chipselect %u\n", cs
);
400 if (cs
< spi
->base_cs
) {
402 "chipselect %i too small (min %i)\n", cs
, spi
->base_cs
);
406 /* set GPO pin to CS mode */
407 gpocon
= 1 << ((cs
- spi
->base_cs
) + LTQ_SPI_GPOCON_ISCSBN_S
);
410 if (spidev
->mode
& SPI_CS_HIGH
)
411 gpocon
|= 1 << (cs
- spi
->base_cs
);
413 lantiq_ssc_maskl(spi
, 0, gpocon
, LTQ_SPI_GPOCON
);
418 static int lantiq_ssc_prepare_message(struct spi_master
*master
,
419 struct spi_message
*message
)
421 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(master
);
423 hw_enter_config_mode(spi
);
424 hw_setup_clock_mode(spi
, message
->spi
->mode
);
425 hw_enter_active_mode(spi
);
430 static void hw_setup_transfer(struct lantiq_ssc_spi
*spi
,
431 struct spi_device
*spidev
, struct spi_transfer
*t
)
433 unsigned int speed_hz
= t
->speed_hz
;
434 unsigned int bits_per_word
= t
->bits_per_word
;
437 if (bits_per_word
!= spi
->bits_per_word
||
438 speed_hz
!= spi
->speed_hz
) {
439 hw_enter_config_mode(spi
);
440 hw_setup_speed_hz(spi
, speed_hz
);
441 hw_setup_bits_per_word(spi
, bits_per_word
);
442 hw_enter_active_mode(spi
);
444 spi
->speed_hz
= speed_hz
;
445 spi
->bits_per_word
= bits_per_word
;
448 /* Configure transmitter and receiver */
449 con
= lantiq_ssc_readl(spi
, LTQ_SPI_CON
);
451 con
&= ~LTQ_SPI_CON_TXOFF
;
453 con
|= LTQ_SPI_CON_TXOFF
;
456 con
&= ~LTQ_SPI_CON_RXOFF
;
458 con
|= LTQ_SPI_CON_RXOFF
;
460 lantiq_ssc_writel(spi
, con
, LTQ_SPI_CON
);
463 static int lantiq_ssc_unprepare_message(struct spi_master
*master
,
464 struct spi_message
*message
)
466 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(master
);
468 flush_workqueue(spi
->wq
);
470 /* Disable transmitter and receiver while idle */
471 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_CON_TXOFF
| LTQ_SPI_CON_RXOFF
,
477 static void tx_fifo_write(struct lantiq_ssc_spi
*spi
)
483 unsigned int tx_free
= tx_fifo_free(spi
);
485 spi
->fdx_tx_level
= 0;
486 while (spi
->tx_todo
&& tx_free
) {
487 switch (spi
->bits_per_word
) {
495 tx16
= (u16
*) spi
->tx
;
501 tx32
= (u32
*) spi
->tx
;
512 lantiq_ssc_writel(spi
, data
, LTQ_SPI_TB
);
518 static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi
*spi
)
524 unsigned int rx_fill
= rx_fifo_level(spi
);
527 * Wait until all expected data to be shifted in.
528 * Otherwise, rx overrun may occur.
530 while (rx_fill
!= spi
->fdx_tx_level
)
531 rx_fill
= rx_fifo_level(spi
);
534 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
536 switch (spi
->bits_per_word
) {
544 rx16
= (u16
*) spi
->rx
;
550 rx32
= (u32
*) spi
->rx
;
564 static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi
*spi
)
568 unsigned int rxbv
, shift
;
569 unsigned int rx_fill
= rx_fifo_level(spi
);
572 * In RX-only mode the bits per word value is ignored by HW. A value
573 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
574 * If remaining RX bytes are less than 4, the FIFO must be read
575 * differently. The amount of received and valid bytes is indicated
576 * by STAT.RXBV register value.
579 if (spi
->rx_todo
< 4) {
580 rxbv
= (lantiq_ssc_readl(spi
, LTQ_SPI_STAT
) &
581 LTQ_SPI_STAT_RXBV_M
) >> LTQ_SPI_STAT_RXBV_S
;
582 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
584 shift
= (rxbv
- 1) * 8;
588 *rx8
++ = (data
>> shift
) & 0xFF;
595 data
= lantiq_ssc_readl(spi
, LTQ_SPI_RB
);
596 rx32
= (u32
*) spi
->rx
;
606 static void rx_request(struct lantiq_ssc_spi
*spi
)
608 unsigned int rxreq
, rxreq_max
;
611 * To avoid receive overflows at high clocks it is better to request
612 * only the amount of bytes that fits into all FIFOs. This value
613 * depends on the FIFO size implemented in hardware.
615 rxreq
= spi
->rx_todo
;
616 rxreq_max
= spi
->rx_fifo_size
* 4;
617 if (rxreq
> rxreq_max
)
620 lantiq_ssc_writel(spi
, rxreq
, LTQ_SPI_RXREQ
);
623 static irqreturn_t
lantiq_ssc_xmit_interrupt(int irq
, void *data
)
625 struct lantiq_ssc_spi
*spi
= data
;
626 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
627 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
629 spin_lock(&spi
->lock
);
631 lantiq_ssc_writel(spi
, val
, hwcfg
->irncr
);
634 if (spi
->rx
&& spi
->rx_todo
)
635 rx_fifo_read_full_duplex(spi
);
639 else if (!tx_fifo_level(spi
))
641 } else if (spi
->rx
) {
643 rx_fifo_read_half_duplex(spi
);
654 spin_unlock(&spi
->lock
);
658 queue_work(spi
->wq
, &spi
->work
);
659 spin_unlock(&spi
->lock
);
664 static irqreturn_t
lantiq_ssc_err_interrupt(int irq
, void *data
)
666 struct lantiq_ssc_spi
*spi
= data
;
667 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
668 u32 stat
= lantiq_ssc_readl(spi
, LTQ_SPI_STAT
);
669 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
671 if (!(stat
& LTQ_SPI_STAT_ERRORS
))
674 spin_lock(&spi
->lock
);
676 lantiq_ssc_writel(spi
, val
, hwcfg
->irncr
);
678 if (stat
& LTQ_SPI_STAT_RUE
)
679 dev_err(spi
->dev
, "receive underflow error\n");
680 if (stat
& LTQ_SPI_STAT_TUE
)
681 dev_err(spi
->dev
, "transmit underflow error\n");
682 if (stat
& LTQ_SPI_STAT_AE
)
683 dev_err(spi
->dev
, "abort error\n");
684 if (stat
& LTQ_SPI_STAT_RE
)
685 dev_err(spi
->dev
, "receive overflow error\n");
686 if (stat
& LTQ_SPI_STAT_TE
)
687 dev_err(spi
->dev
, "transmit overflow error\n");
688 if (stat
& LTQ_SPI_STAT_ME
)
689 dev_err(spi
->dev
, "mode error\n");
691 /* Clear error flags */
692 lantiq_ssc_maskl(spi
, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS
, LTQ_SPI_WHBSTATE
);
694 /* set bad status so it can be retried */
695 if (spi
->master
->cur_msg
)
696 spi
->master
->cur_msg
->status
= -EIO
;
697 queue_work(spi
->wq
, &spi
->work
);
698 spin_unlock(&spi
->lock
);
703 static irqreturn_t
intel_lgm_ssc_isr(int irq
, void *data
)
705 struct lantiq_ssc_spi
*spi
= data
;
706 const struct lantiq_ssc_hwcfg
*hwcfg
= spi
->hwcfg
;
707 u32 val
= lantiq_ssc_readl(spi
, hwcfg
->irncr
);
709 if (!(val
& LTQ_SPI_IRNEN_ALL
))
712 if (val
& LTQ_SPI_IRNEN_E
)
713 return lantiq_ssc_err_interrupt(irq
, data
);
715 if ((val
& hwcfg
->irnen_t
) || (val
& hwcfg
->irnen_r
))
716 return lantiq_ssc_xmit_interrupt(irq
, data
);
721 static int transfer_start(struct lantiq_ssc_spi
*spi
, struct spi_device
*spidev
,
722 struct spi_transfer
*t
)
726 spin_lock_irqsave(&spi
->lock
, flags
);
732 spi
->tx_todo
= t
->len
;
734 /* initially fill TX FIFO */
739 spi
->rx_todo
= t
->len
;
741 /* start shift clock in RX-only mode */
746 spin_unlock_irqrestore(&spi
->lock
, flags
);
752 * The driver only gets an interrupt when the FIFO is empty, but there
753 * is an additional shift register from which the data is written to
754 * the wire. We get the last interrupt when the controller starts to
755 * write the last word to the wire, not when it is finished. Do busy
756 * waiting till it finishes.
758 static void lantiq_ssc_bussy_work(struct work_struct
*work
)
760 struct lantiq_ssc_spi
*spi
;
761 unsigned long long timeout
= 8LL * 1000LL;
764 spi
= container_of(work
, typeof(*spi
), work
);
766 do_div(timeout
, spi
->speed_hz
);
767 timeout
+= timeout
+ 100; /* some tolerance */
769 end
= jiffies
+ msecs_to_jiffies(timeout
);
771 u32 stat
= lantiq_ssc_readl(spi
, LTQ_SPI_STAT
);
773 if (!(stat
& LTQ_SPI_STAT_BSY
)) {
774 spi_finalize_current_transfer(spi
->master
);
779 } while (!time_after_eq(jiffies
, end
));
781 if (spi
->master
->cur_msg
)
782 spi
->master
->cur_msg
->status
= -EIO
;
783 spi_finalize_current_transfer(spi
->master
);
786 static void lantiq_ssc_handle_err(struct spi_master
*master
,
787 struct spi_message
*message
)
789 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(master
);
791 /* flush FIFOs on timeout */
796 static void lantiq_ssc_set_cs(struct spi_device
*spidev
, bool enable
)
798 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(spidev
->master
);
799 unsigned int cs
= spidev
->chip_select
;
802 if (!!(spidev
->mode
& SPI_CS_HIGH
) == enable
)
803 fgpo
= (1 << (cs
- spi
->base_cs
));
805 fgpo
= (1 << (cs
- spi
->base_cs
+ LTQ_SPI_FGPO_SETOUTN_S
));
807 lantiq_ssc_writel(spi
, fgpo
, LTQ_SPI_FPGO
);
810 static int lantiq_ssc_transfer_one(struct spi_master
*master
,
811 struct spi_device
*spidev
,
812 struct spi_transfer
*t
)
814 struct lantiq_ssc_spi
*spi
= spi_master_get_devdata(master
);
816 hw_setup_transfer(spi
, spidev
, t
);
818 return transfer_start(spi
, spidev
, t
);
821 static int intel_lgm_cfg_irq(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
)
825 irq
= platform_get_irq(pdev
, 0);
829 return devm_request_irq(&pdev
->dev
, irq
, intel_lgm_ssc_isr
, 0, "spi", spi
);
832 static int lantiq_cfg_irq(struct platform_device
*pdev
, struct lantiq_ssc_spi
*spi
)
836 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_RX_IRQ_NAME
);
840 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_xmit_interrupt
,
841 0, LTQ_SPI_RX_IRQ_NAME
, spi
);
845 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_TX_IRQ_NAME
);
849 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_xmit_interrupt
,
850 0, LTQ_SPI_TX_IRQ_NAME
, spi
);
855 irq
= platform_get_irq_byname(pdev
, LTQ_SPI_ERR_IRQ_NAME
);
859 err
= devm_request_irq(&pdev
->dev
, irq
, lantiq_ssc_err_interrupt
,
860 0, LTQ_SPI_ERR_IRQ_NAME
, spi
);
864 static const struct lantiq_ssc_hwcfg lantiq_ssc_xway
= {
865 .cfg_irq
= lantiq_cfg_irq
,
866 .irnen_r
= LTQ_SPI_IRNEN_R_XWAY
,
867 .irnen_t
= LTQ_SPI_IRNEN_T_XWAY
,
870 .fifo_size_mask
= GENMASK(5, 0),
874 static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx
= {
875 .cfg_irq
= lantiq_cfg_irq
,
876 .irnen_r
= LTQ_SPI_IRNEN_R_XRX
,
877 .irnen_t
= LTQ_SPI_IRNEN_T_XRX
,
880 .fifo_size_mask
= GENMASK(5, 0),
884 static const struct lantiq_ssc_hwcfg intel_ssc_lgm
= {
885 .cfg_irq
= intel_lgm_cfg_irq
,
886 .irnen_r
= LTQ_SPI_IRNEN_R_XRX
,
887 .irnen_t
= LTQ_SPI_IRNEN_T_XRX
,
890 .fifo_size_mask
= GENMASK(7, 0),
894 static const struct of_device_id lantiq_ssc_match
[] = {
895 { .compatible
= "lantiq,ase-spi", .data
= &lantiq_ssc_xway
, },
896 { .compatible
= "lantiq,falcon-spi", .data
= &lantiq_ssc_xrx
, },
897 { .compatible
= "lantiq,xrx100-spi", .data
= &lantiq_ssc_xrx
, },
898 { .compatible
= "intel,lgm-spi", .data
= &intel_ssc_lgm
, },
901 MODULE_DEVICE_TABLE(of
, lantiq_ssc_match
);
903 static int lantiq_ssc_probe(struct platform_device
*pdev
)
905 struct device
*dev
= &pdev
->dev
;
906 struct spi_master
*master
;
907 struct lantiq_ssc_spi
*spi
;
908 const struct lantiq_ssc_hwcfg
*hwcfg
;
909 const struct of_device_id
*match
;
910 u32 id
, supports_dma
, revision
;
914 match
= of_match_device(lantiq_ssc_match
, dev
);
916 dev_err(dev
, "no device match\n");
921 master
= spi_alloc_master(dev
, sizeof(struct lantiq_ssc_spi
));
925 spi
= spi_master_get_devdata(master
);
926 spi
->master
= master
;
929 platform_set_drvdata(pdev
, spi
);
930 spi
->regbase
= devm_platform_ioremap_resource(pdev
, 0);
931 if (IS_ERR(spi
->regbase
)) {
932 err
= PTR_ERR(spi
->regbase
);
936 err
= hwcfg
->cfg_irq(pdev
, spi
);
940 spi
->spi_clk
= devm_clk_get(dev
, "gate");
941 if (IS_ERR(spi
->spi_clk
)) {
942 err
= PTR_ERR(spi
->spi_clk
);
945 err
= clk_prepare_enable(spi
->spi_clk
);
950 * Use the old clk_get_fpi() function on Lantiq platform, till it
951 * supports common clk.
953 #if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
954 spi
->fpi_clk
= clk_get_fpi();
956 spi
->fpi_clk
= clk_get(dev
, "freq");
958 if (IS_ERR(spi
->fpi_clk
)) {
959 err
= PTR_ERR(spi
->fpi_clk
);
960 goto err_clk_disable
;
964 of_property_read_u32(pdev
->dev
.of_node
, "num-cs", &num_cs
);
967 of_property_read_u32(pdev
->dev
.of_node
, "base-cs", &spi
->base_cs
);
969 spin_lock_init(&spi
->lock
);
970 spi
->bits_per_word
= 8;
973 master
->dev
.of_node
= pdev
->dev
.of_node
;
974 master
->num_chipselect
= num_cs
;
975 master
->use_gpio_descriptors
= true;
976 master
->setup
= lantiq_ssc_setup
;
977 master
->set_cs
= lantiq_ssc_set_cs
;
978 master
->handle_err
= lantiq_ssc_handle_err
;
979 master
->prepare_message
= lantiq_ssc_prepare_message
;
980 master
->unprepare_message
= lantiq_ssc_unprepare_message
;
981 master
->transfer_one
= lantiq_ssc_transfer_one
;
982 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LSB_FIRST
| SPI_CS_HIGH
|
984 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(2, 8) |
985 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
987 spi
->wq
= alloc_ordered_workqueue(dev_name(dev
), WQ_MEM_RECLAIM
);
992 INIT_WORK(&spi
->work
, lantiq_ssc_bussy_work
);
994 id
= lantiq_ssc_readl(spi
, LTQ_SPI_ID
);
995 spi
->tx_fifo_size
= (id
>> LTQ_SPI_ID_TXFS_S
) & hwcfg
->fifo_size_mask
;
996 spi
->rx_fifo_size
= (id
>> LTQ_SPI_ID_RXFS_S
) & hwcfg
->fifo_size_mask
;
997 supports_dma
= (id
& LTQ_SPI_ID_CFG_M
) >> LTQ_SPI_ID_CFG_S
;
998 revision
= id
& LTQ_SPI_ID_REV_M
;
1000 lantiq_ssc_hw_init(spi
);
1003 "Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
1004 revision
, spi
->tx_fifo_size
, spi
->rx_fifo_size
, supports_dma
);
1006 err
= devm_spi_register_master(dev
, master
);
1008 dev_err(dev
, "failed to register spi_master\n");
1009 goto err_wq_destroy
;
1015 destroy_workqueue(spi
->wq
);
1017 clk_put(spi
->fpi_clk
);
1019 clk_disable_unprepare(spi
->spi_clk
);
1021 spi_master_put(master
);
1026 static int lantiq_ssc_remove(struct platform_device
*pdev
)
1028 struct lantiq_ssc_spi
*spi
= platform_get_drvdata(pdev
);
1030 lantiq_ssc_writel(spi
, 0, LTQ_SPI_IRNEN
);
1031 lantiq_ssc_writel(spi
, 0, LTQ_SPI_CLC
);
1034 hw_enter_config_mode(spi
);
1036 destroy_workqueue(spi
->wq
);
1037 clk_disable_unprepare(spi
->spi_clk
);
1038 clk_put(spi
->fpi_clk
);
1043 static struct platform_driver lantiq_ssc_driver
= {
1044 .probe
= lantiq_ssc_probe
,
1045 .remove
= lantiq_ssc_remove
,
1047 .name
= "spi-lantiq-ssc",
1048 .of_match_table
= lantiq_ssc_match
,
1051 module_platform_driver(lantiq_ssc_driver
);
1053 MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
1054 MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
1055 MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
1056 MODULE_LICENSE("GPL");
1057 MODULE_ALIAS("platform:spi-lantiq-ssc");