1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 MediaTek Inc.
4 * Author: Leilk Liu <leilk.liu@mediatek.com>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
12 #include <linux/ioport.h>
13 #include <linux/module.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/pinctrl/consumer.h>
17 #include <linux/platform_device.h>
18 #include <linux/platform_data/spi-mt65xx.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #include <linux/dma-mapping.h>
24 #define SPI_CFG0_REG 0x0000
25 #define SPI_CFG1_REG 0x0004
26 #define SPI_TX_SRC_REG 0x0008
27 #define SPI_RX_DST_REG 0x000c
28 #define SPI_TX_DATA_REG 0x0010
29 #define SPI_RX_DATA_REG 0x0014
30 #define SPI_CMD_REG 0x0018
31 #define SPI_STATUS0_REG 0x001c
32 #define SPI_PAD_SEL_REG 0x0024
33 #define SPI_CFG2_REG 0x0028
34 #define SPI_TX_SRC_REG_64 0x002c
35 #define SPI_RX_DST_REG_64 0x0030
36 #define SPI_CFG3_IPM_REG 0x0040
38 #define SPI_CFG0_SCK_HIGH_OFFSET 0
39 #define SPI_CFG0_SCK_LOW_OFFSET 8
40 #define SPI_CFG0_CS_HOLD_OFFSET 16
41 #define SPI_CFG0_CS_SETUP_OFFSET 24
42 #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
43 #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
45 #define SPI_CFG1_CS_IDLE_OFFSET 0
46 #define SPI_CFG1_PACKET_LOOP_OFFSET 8
47 #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
48 #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
49 #define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
51 #define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
52 #define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
54 #define SPI_CFG1_CS_IDLE_MASK 0xff
55 #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
56 #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
57 #define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
58 #define SPI_CFG2_SCK_HIGH_OFFSET 0
59 #define SPI_CFG2_SCK_LOW_OFFSET 16
61 #define SPI_CMD_ACT BIT(0)
62 #define SPI_CMD_RESUME BIT(1)
63 #define SPI_CMD_RST BIT(2)
64 #define SPI_CMD_PAUSE_EN BIT(4)
65 #define SPI_CMD_DEASSERT BIT(5)
66 #define SPI_CMD_SAMPLE_SEL BIT(6)
67 #define SPI_CMD_CS_POL BIT(7)
68 #define SPI_CMD_CPHA BIT(8)
69 #define SPI_CMD_CPOL BIT(9)
70 #define SPI_CMD_RX_DMA BIT(10)
71 #define SPI_CMD_TX_DMA BIT(11)
72 #define SPI_CMD_TXMSBF BIT(12)
73 #define SPI_CMD_RXMSBF BIT(13)
74 #define SPI_CMD_RX_ENDIAN BIT(14)
75 #define SPI_CMD_TX_ENDIAN BIT(15)
76 #define SPI_CMD_FINISH_IE BIT(16)
77 #define SPI_CMD_PAUSE_IE BIT(17)
78 #define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
79 #define SPI_CMD_IPM_SPIM_LOOP BIT(21)
80 #define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
82 #define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
84 #define PIN_MODE_CFG(x) ((x) / 2)
86 #define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
87 #define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
88 #define SPI_CFG3_IPM_XMODE_EN BIT(4)
89 #define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
90 #define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
91 #define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
93 #define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
94 #define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
95 #define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
97 #define MT8173_SPI_MAX_PAD_SEL 3
99 #define MTK_SPI_PAUSE_INT_STATUS 0x2
101 #define MTK_SPI_MAX_FIFO_SIZE 32U
102 #define MTK_SPI_PACKET_SIZE 1024
103 #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
104 #define MTK_SPI_IPM_PACKET_LOOP SZ_256
106 #define MTK_SPI_IDLE 0
107 #define MTK_SPI_PAUSED 1
109 #define MTK_SPI_32BITS_MASK (0xffffffff)
111 #define DMA_ADDR_EXT_BITS (36)
112 #define DMA_ADDR_DEF_BITS (32)
115 * struct mtk_spi_compatible - device data structure
116 * @need_pad_sel: Enable pad (pins) selection in SPI controller
117 * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
118 * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
119 * @dma_ext: DMA address extension supported
120 * @no_need_unprepare: Don't unprepare the SPI clk during runtime
121 * @ipm_design: Adjust/extend registers to support IPM design IP features
123 struct mtk_spi_compatible
{
128 bool no_need_unprepare
;
133 * struct mtk_spi - SPI driver instance
134 * @base: Start address of the SPI controller registers
135 * @state: SPI controller state
136 * @pad_num: Number of pad_sel entries
137 * @pad_sel: Groups of pins to select
138 * @parent_clk: Parent of sel_clk
139 * @sel_clk: SPI host mux clock
140 * @spi_clk: Peripheral clock
141 * @spi_hclk: AHB bus clock
142 * @cur_transfer: Currently processed SPI transfer
143 * @xfer_len: Number of bytes to transfer
144 * @num_xfered: Number of transferred bytes
145 * @tx_sgl: TX transfer scatterlist
146 * @rx_sgl: RX transfer scatterlist
147 * @tx_sgl_len: Size of TX DMA transfer
148 * @rx_sgl_len: Size of RX DMA transfer
149 * @dev_comp: Device data structure
150 * @spi_clk_hz: Current SPI clock in Hz
151 * @spimem_done: SPI-MEM operation completion
152 * @use_spimem: Enables SPI-MEM
153 * @dev: Device pointer
154 * @tx_dma: DMA start for SPI-MEM TX
155 * @rx_dma: DMA start for SPI-MEM RX
162 struct clk
*parent_clk
, *sel_clk
, *spi_clk
, *spi_hclk
;
163 struct spi_transfer
*cur_transfer
;
166 struct scatterlist
*tx_sgl
, *rx_sgl
;
167 u32 tx_sgl_len
, rx_sgl_len
;
168 const struct mtk_spi_compatible
*dev_comp
;
170 struct completion spimem_done
;
177 static const struct mtk_spi_compatible mtk_common_compat
;
179 static const struct mtk_spi_compatible mt2712_compat
= {
183 static const struct mtk_spi_compatible mtk_ipm_compat
= {
184 .enhance_timing
= true,
189 static const struct mtk_spi_compatible mt6765_compat
= {
190 .need_pad_sel
= true,
192 .enhance_timing
= true,
196 static const struct mtk_spi_compatible mt7622_compat
= {
198 .enhance_timing
= true,
201 static const struct mtk_spi_compatible mt8173_compat
= {
202 .need_pad_sel
= true,
206 static const struct mtk_spi_compatible mt8183_compat
= {
207 .need_pad_sel
= true,
209 .enhance_timing
= true,
212 static const struct mtk_spi_compatible mt6893_compat
= {
213 .need_pad_sel
= true,
215 .enhance_timing
= true,
217 .no_need_unprepare
= true,
221 * A piece of default chip info unless the platform
224 static const struct mtk_chip_config mtk_default_chip_info
= {
229 static const struct of_device_id mtk_spi_of_match
[] = {
230 { .compatible
= "mediatek,spi-ipm",
231 .data
= (void *)&mtk_ipm_compat
,
233 { .compatible
= "mediatek,mt2701-spi",
234 .data
= (void *)&mtk_common_compat
,
236 { .compatible
= "mediatek,mt2712-spi",
237 .data
= (void *)&mt2712_compat
,
239 { .compatible
= "mediatek,mt6589-spi",
240 .data
= (void *)&mtk_common_compat
,
242 { .compatible
= "mediatek,mt6765-spi",
243 .data
= (void *)&mt6765_compat
,
245 { .compatible
= "mediatek,mt7622-spi",
246 .data
= (void *)&mt7622_compat
,
248 { .compatible
= "mediatek,mt7629-spi",
249 .data
= (void *)&mt7622_compat
,
251 { .compatible
= "mediatek,mt8135-spi",
252 .data
= (void *)&mtk_common_compat
,
254 { .compatible
= "mediatek,mt8173-spi",
255 .data
= (void *)&mt8173_compat
,
257 { .compatible
= "mediatek,mt8183-spi",
258 .data
= (void *)&mt8183_compat
,
260 { .compatible
= "mediatek,mt8192-spi",
261 .data
= (void *)&mt6765_compat
,
263 { .compatible
= "mediatek,mt6893-spi",
264 .data
= (void *)&mt6893_compat
,
268 MODULE_DEVICE_TABLE(of
, mtk_spi_of_match
);
270 static void mtk_spi_reset(struct mtk_spi
*mdata
)
274 /* set the software reset bit in SPI_CMD_REG. */
275 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
276 reg_val
|= SPI_CMD_RST
;
277 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
279 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
280 reg_val
&= ~SPI_CMD_RST
;
281 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
284 static int mtk_spi_set_hw_cs_timing(struct spi_device
*spi
)
286 struct mtk_spi
*mdata
= spi_controller_get_devdata(spi
->controller
);
287 struct spi_delay
*cs_setup
= &spi
->cs_setup
;
288 struct spi_delay
*cs_hold
= &spi
->cs_hold
;
289 struct spi_delay
*cs_inactive
= &spi
->cs_inactive
;
290 u32 setup
, hold
, inactive
;
294 delay
= spi_delay_to_ns(cs_setup
, NULL
);
297 setup
= (delay
* DIV_ROUND_UP(mdata
->spi_clk_hz
, 1000000)) / 1000;
299 delay
= spi_delay_to_ns(cs_hold
, NULL
);
302 hold
= (delay
* DIV_ROUND_UP(mdata
->spi_clk_hz
, 1000000)) / 1000;
304 delay
= spi_delay_to_ns(cs_inactive
, NULL
);
307 inactive
= (delay
* DIV_ROUND_UP(mdata
->spi_clk_hz
, 1000000)) / 1000;
310 reg_val
= readl(mdata
->base
+ SPI_CFG0_REG
);
311 if (mdata
->dev_comp
->enhance_timing
) {
313 hold
= min_t(u32
, hold
, 0x10000);
314 reg_val
&= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET
);
315 reg_val
|= (((hold
- 1) & 0xffff)
316 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET
);
319 setup
= min_t(u32
, setup
, 0x10000);
320 reg_val
&= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET
);
321 reg_val
|= (((setup
- 1) & 0xffff)
322 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET
);
326 hold
= min_t(u32
, hold
, 0x100);
327 reg_val
&= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET
);
328 reg_val
|= (((hold
- 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET
);
331 setup
= min_t(u32
, setup
, 0x100);
332 reg_val
&= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET
);
333 reg_val
|= (((setup
- 1) & 0xff)
334 << SPI_CFG0_CS_SETUP_OFFSET
);
337 writel(reg_val
, mdata
->base
+ SPI_CFG0_REG
);
341 inactive
= min_t(u32
, inactive
, 0x100);
342 reg_val
= readl(mdata
->base
+ SPI_CFG1_REG
);
343 reg_val
&= ~SPI_CFG1_CS_IDLE_MASK
;
344 reg_val
|= (((inactive
- 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET
);
345 writel(reg_val
, mdata
->base
+ SPI_CFG1_REG
);
351 static int mtk_spi_hw_init(struct spi_controller
*host
,
352 struct spi_device
*spi
)
356 struct mtk_chip_config
*chip_config
= spi
->controller_data
;
357 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
359 cpha
= spi
->mode
& SPI_CPHA
? 1 : 0;
360 cpol
= spi
->mode
& SPI_CPOL
? 1 : 0;
362 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
363 if (mdata
->dev_comp
->ipm_design
) {
364 /* SPI transfer without idle time until packet length done */
365 reg_val
|= SPI_CMD_IPM_NONIDLE_MODE
;
366 if (spi
->mode
& SPI_LOOP
)
367 reg_val
|= SPI_CMD_IPM_SPIM_LOOP
;
369 reg_val
&= ~SPI_CMD_IPM_SPIM_LOOP
;
373 reg_val
|= SPI_CMD_CPHA
;
375 reg_val
&= ~SPI_CMD_CPHA
;
377 reg_val
|= SPI_CMD_CPOL
;
379 reg_val
&= ~SPI_CMD_CPOL
;
381 /* set the mlsbx and mlsbtx */
382 if (spi
->mode
& SPI_LSB_FIRST
) {
383 reg_val
&= ~SPI_CMD_TXMSBF
;
384 reg_val
&= ~SPI_CMD_RXMSBF
;
386 reg_val
|= SPI_CMD_TXMSBF
;
387 reg_val
|= SPI_CMD_RXMSBF
;
390 /* set the tx/rx endian */
391 #ifdef __LITTLE_ENDIAN
392 reg_val
&= ~SPI_CMD_TX_ENDIAN
;
393 reg_val
&= ~SPI_CMD_RX_ENDIAN
;
395 reg_val
|= SPI_CMD_TX_ENDIAN
;
396 reg_val
|= SPI_CMD_RX_ENDIAN
;
399 if (mdata
->dev_comp
->enhance_timing
) {
400 /* set CS polarity */
401 if (spi
->mode
& SPI_CS_HIGH
)
402 reg_val
|= SPI_CMD_CS_POL
;
404 reg_val
&= ~SPI_CMD_CS_POL
;
406 if (chip_config
->sample_sel
)
407 reg_val
|= SPI_CMD_SAMPLE_SEL
;
409 reg_val
&= ~SPI_CMD_SAMPLE_SEL
;
412 /* set finish and pause interrupt always enable */
413 reg_val
|= SPI_CMD_FINISH_IE
| SPI_CMD_PAUSE_IE
;
415 /* disable dma mode */
416 reg_val
&= ~(SPI_CMD_TX_DMA
| SPI_CMD_RX_DMA
);
418 /* disable deassert mode */
419 reg_val
&= ~SPI_CMD_DEASSERT
;
421 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
424 if (mdata
->dev_comp
->need_pad_sel
)
425 writel(mdata
->pad_sel
[spi_get_chipselect(spi
, 0)],
426 mdata
->base
+ SPI_PAD_SEL_REG
);
429 if (mdata
->dev_comp
->enhance_timing
) {
430 if (mdata
->dev_comp
->ipm_design
) {
431 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
432 reg_val
&= ~SPI_CMD_IPM_GET_TICKDLY_MASK
;
433 reg_val
|= ((chip_config
->tick_delay
& 0x7)
434 << SPI_CMD_IPM_GET_TICKDLY_OFFSET
);
435 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
437 reg_val
= readl(mdata
->base
+ SPI_CFG1_REG
);
438 reg_val
&= ~SPI_CFG1_GET_TICK_DLY_MASK
;
439 reg_val
|= ((chip_config
->tick_delay
& 0x7)
440 << SPI_CFG1_GET_TICK_DLY_OFFSET
);
441 writel(reg_val
, mdata
->base
+ SPI_CFG1_REG
);
444 reg_val
= readl(mdata
->base
+ SPI_CFG1_REG
);
445 reg_val
&= ~SPI_CFG1_GET_TICK_DLY_MASK_V1
;
446 reg_val
|= ((chip_config
->tick_delay
& 0x3)
447 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1
);
448 writel(reg_val
, mdata
->base
+ SPI_CFG1_REG
);
451 /* set hw cs timing */
452 mtk_spi_set_hw_cs_timing(spi
);
456 static int mtk_spi_prepare_message(struct spi_controller
*host
,
457 struct spi_message
*msg
)
459 return mtk_spi_hw_init(host
, msg
->spi
);
462 static void mtk_spi_set_cs(struct spi_device
*spi
, bool enable
)
465 struct mtk_spi
*mdata
= spi_controller_get_devdata(spi
->controller
);
467 if (spi
->mode
& SPI_CS_HIGH
)
470 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
472 reg_val
|= SPI_CMD_PAUSE_EN
;
473 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
475 reg_val
&= ~SPI_CMD_PAUSE_EN
;
476 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
477 mdata
->state
= MTK_SPI_IDLE
;
478 mtk_spi_reset(mdata
);
482 static void mtk_spi_prepare_transfer(struct spi_controller
*host
,
485 u32 div
, sck_time
, reg_val
;
486 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
488 if (speed_hz
< mdata
->spi_clk_hz
/ 2)
489 div
= DIV_ROUND_UP(mdata
->spi_clk_hz
, speed_hz
);
493 sck_time
= (div
+ 1) / 2;
495 if (mdata
->dev_comp
->enhance_timing
) {
496 reg_val
= readl(mdata
->base
+ SPI_CFG2_REG
);
497 reg_val
&= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET
);
498 reg_val
|= (((sck_time
- 1) & 0xffff)
499 << SPI_CFG2_SCK_HIGH_OFFSET
);
500 reg_val
&= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET
);
501 reg_val
|= (((sck_time
- 1) & 0xffff)
502 << SPI_CFG2_SCK_LOW_OFFSET
);
503 writel(reg_val
, mdata
->base
+ SPI_CFG2_REG
);
505 reg_val
= readl(mdata
->base
+ SPI_CFG0_REG
);
506 reg_val
&= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET
);
507 reg_val
|= (((sck_time
- 1) & 0xff)
508 << SPI_CFG0_SCK_HIGH_OFFSET
);
509 reg_val
&= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET
);
510 reg_val
|= (((sck_time
- 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET
);
511 writel(reg_val
, mdata
->base
+ SPI_CFG0_REG
);
515 static void mtk_spi_setup_packet(struct spi_controller
*host
)
517 u32 packet_size
, packet_loop
, reg_val
;
518 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
520 if (mdata
->dev_comp
->ipm_design
)
521 packet_size
= min_t(u32
,
523 MTK_SPI_IPM_PACKET_SIZE
);
525 packet_size
= min_t(u32
,
527 MTK_SPI_PACKET_SIZE
);
529 packet_loop
= mdata
->xfer_len
/ packet_size
;
531 reg_val
= readl(mdata
->base
+ SPI_CFG1_REG
);
532 if (mdata
->dev_comp
->ipm_design
)
533 reg_val
&= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK
;
535 reg_val
&= ~SPI_CFG1_PACKET_LENGTH_MASK
;
536 reg_val
|= (packet_size
- 1) << SPI_CFG1_PACKET_LENGTH_OFFSET
;
537 reg_val
&= ~SPI_CFG1_PACKET_LOOP_MASK
;
538 reg_val
|= (packet_loop
- 1) << SPI_CFG1_PACKET_LOOP_OFFSET
;
539 writel(reg_val
, mdata
->base
+ SPI_CFG1_REG
);
542 static void mtk_spi_enable_transfer(struct spi_controller
*host
)
545 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
547 cmd
= readl(mdata
->base
+ SPI_CMD_REG
);
548 if (mdata
->state
== MTK_SPI_IDLE
)
551 cmd
|= SPI_CMD_RESUME
;
552 writel(cmd
, mdata
->base
+ SPI_CMD_REG
);
555 static int mtk_spi_get_mult_delta(struct mtk_spi
*mdata
, u32 xfer_len
)
559 if (mdata
->dev_comp
->ipm_design
) {
560 if (xfer_len
> MTK_SPI_IPM_PACKET_SIZE
)
561 mult_delta
= xfer_len
% MTK_SPI_IPM_PACKET_SIZE
;
563 if (xfer_len
> MTK_SPI_PACKET_SIZE
)
564 mult_delta
= xfer_len
% MTK_SPI_PACKET_SIZE
;
570 static void mtk_spi_update_mdata_len(struct spi_controller
*host
)
573 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
575 if (mdata
->tx_sgl_len
&& mdata
->rx_sgl_len
) {
576 if (mdata
->tx_sgl_len
> mdata
->rx_sgl_len
) {
577 mult_delta
= mtk_spi_get_mult_delta(mdata
, mdata
->rx_sgl_len
);
578 mdata
->xfer_len
= mdata
->rx_sgl_len
- mult_delta
;
579 mdata
->rx_sgl_len
= mult_delta
;
580 mdata
->tx_sgl_len
-= mdata
->xfer_len
;
582 mult_delta
= mtk_spi_get_mult_delta(mdata
, mdata
->tx_sgl_len
);
583 mdata
->xfer_len
= mdata
->tx_sgl_len
- mult_delta
;
584 mdata
->tx_sgl_len
= mult_delta
;
585 mdata
->rx_sgl_len
-= mdata
->xfer_len
;
587 } else if (mdata
->tx_sgl_len
) {
588 mult_delta
= mtk_spi_get_mult_delta(mdata
, mdata
->tx_sgl_len
);
589 mdata
->xfer_len
= mdata
->tx_sgl_len
- mult_delta
;
590 mdata
->tx_sgl_len
= mult_delta
;
591 } else if (mdata
->rx_sgl_len
) {
592 mult_delta
= mtk_spi_get_mult_delta(mdata
, mdata
->rx_sgl_len
);
593 mdata
->xfer_len
= mdata
->rx_sgl_len
- mult_delta
;
594 mdata
->rx_sgl_len
= mult_delta
;
598 static void mtk_spi_setup_dma_addr(struct spi_controller
*host
,
599 struct spi_transfer
*xfer
)
601 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
604 writel((u32
)(xfer
->tx_dma
& MTK_SPI_32BITS_MASK
),
605 mdata
->base
+ SPI_TX_SRC_REG
);
606 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
607 if (mdata
->dev_comp
->dma_ext
)
608 writel((u32
)(xfer
->tx_dma
>> 32),
609 mdata
->base
+ SPI_TX_SRC_REG_64
);
614 writel((u32
)(xfer
->rx_dma
& MTK_SPI_32BITS_MASK
),
615 mdata
->base
+ SPI_RX_DST_REG
);
616 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
617 if (mdata
->dev_comp
->dma_ext
)
618 writel((u32
)(xfer
->rx_dma
>> 32),
619 mdata
->base
+ SPI_RX_DST_REG_64
);
624 static int mtk_spi_fifo_transfer(struct spi_controller
*host
,
625 struct spi_device
*spi
,
626 struct spi_transfer
*xfer
)
630 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
632 mdata
->cur_transfer
= xfer
;
633 mdata
->xfer_len
= min(MTK_SPI_MAX_FIFO_SIZE
, xfer
->len
);
634 mdata
->num_xfered
= 0;
635 mtk_spi_prepare_transfer(host
, xfer
->speed_hz
);
636 mtk_spi_setup_packet(host
);
640 iowrite32_rep(mdata
->base
+ SPI_TX_DATA_REG
, xfer
->tx_buf
, cnt
);
641 remainder
= xfer
->len
% 4;
644 memcpy(®_val
, xfer
->tx_buf
+ (cnt
* 4), remainder
);
645 writel(reg_val
, mdata
->base
+ SPI_TX_DATA_REG
);
649 mtk_spi_enable_transfer(host
);
654 static int mtk_spi_dma_transfer(struct spi_controller
*host
,
655 struct spi_device
*spi
,
656 struct spi_transfer
*xfer
)
659 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
661 mdata
->tx_sgl
= NULL
;
662 mdata
->rx_sgl
= NULL
;
663 mdata
->tx_sgl_len
= 0;
664 mdata
->rx_sgl_len
= 0;
665 mdata
->cur_transfer
= xfer
;
666 mdata
->num_xfered
= 0;
668 mtk_spi_prepare_transfer(host
, xfer
->speed_hz
);
670 cmd
= readl(mdata
->base
+ SPI_CMD_REG
);
672 cmd
|= SPI_CMD_TX_DMA
;
674 cmd
|= SPI_CMD_RX_DMA
;
675 writel(cmd
, mdata
->base
+ SPI_CMD_REG
);
678 mdata
->tx_sgl
= xfer
->tx_sg
.sgl
;
680 mdata
->rx_sgl
= xfer
->rx_sg
.sgl
;
683 xfer
->tx_dma
= sg_dma_address(mdata
->tx_sgl
);
684 mdata
->tx_sgl_len
= sg_dma_len(mdata
->tx_sgl
);
687 xfer
->rx_dma
= sg_dma_address(mdata
->rx_sgl
);
688 mdata
->rx_sgl_len
= sg_dma_len(mdata
->rx_sgl
);
691 mtk_spi_update_mdata_len(host
);
692 mtk_spi_setup_packet(host
);
693 mtk_spi_setup_dma_addr(host
, xfer
);
694 mtk_spi_enable_transfer(host
);
699 static int mtk_spi_transfer_one(struct spi_controller
*host
,
700 struct spi_device
*spi
,
701 struct spi_transfer
*xfer
)
703 struct mtk_spi
*mdata
= spi_controller_get_devdata(spi
->controller
);
706 /* prepare xfer direction and duplex mode */
707 if (mdata
->dev_comp
->ipm_design
) {
708 if (!xfer
->tx_buf
|| !xfer
->rx_buf
) {
709 reg_val
|= SPI_CFG3_IPM_HALF_DUPLEX_EN
;
711 reg_val
|= SPI_CFG3_IPM_HALF_DUPLEX_DIR
;
713 writel(reg_val
, mdata
->base
+ SPI_CFG3_IPM_REG
);
716 if (host
->can_dma(host
, spi
, xfer
))
717 return mtk_spi_dma_transfer(host
, spi
, xfer
);
719 return mtk_spi_fifo_transfer(host
, spi
, xfer
);
722 static bool mtk_spi_can_dma(struct spi_controller
*host
,
723 struct spi_device
*spi
,
724 struct spi_transfer
*xfer
)
726 /* Buffers for DMA transactions must be 4-byte aligned */
727 return (xfer
->len
> MTK_SPI_MAX_FIFO_SIZE
&&
728 (unsigned long)xfer
->tx_buf
% 4 == 0 &&
729 (unsigned long)xfer
->rx_buf
% 4 == 0);
732 static int mtk_spi_setup(struct spi_device
*spi
)
734 struct mtk_spi
*mdata
= spi_controller_get_devdata(spi
->controller
);
736 if (!spi
->controller_data
)
737 spi
->controller_data
= (void *)&mtk_default_chip_info
;
739 if (mdata
->dev_comp
->need_pad_sel
&& spi_get_csgpiod(spi
, 0))
740 /* CS de-asserted, gpiolib will handle inversion */
741 gpiod_direction_output(spi_get_csgpiod(spi
, 0), 0);
746 static irqreturn_t
mtk_spi_interrupt_thread(int irq
, void *dev_id
)
748 u32 cmd
, reg_val
, cnt
, remainder
, len
;
749 struct spi_controller
*host
= dev_id
;
750 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
751 struct spi_transfer
*xfer
= mdata
->cur_transfer
;
753 if (!host
->can_dma(host
, NULL
, xfer
)) {
755 cnt
= mdata
->xfer_len
/ 4;
756 ioread32_rep(mdata
->base
+ SPI_RX_DATA_REG
,
757 xfer
->rx_buf
+ mdata
->num_xfered
, cnt
);
758 remainder
= mdata
->xfer_len
% 4;
760 reg_val
= readl(mdata
->base
+ SPI_RX_DATA_REG
);
761 memcpy(xfer
->rx_buf
+ (cnt
* 4) + mdata
->num_xfered
,
767 mdata
->num_xfered
+= mdata
->xfer_len
;
768 if (mdata
->num_xfered
== xfer
->len
) {
769 spi_finalize_current_transfer(host
);
773 len
= xfer
->len
- mdata
->num_xfered
;
774 mdata
->xfer_len
= min(MTK_SPI_MAX_FIFO_SIZE
, len
);
775 mtk_spi_setup_packet(host
);
778 cnt
= mdata
->xfer_len
/ 4;
779 iowrite32_rep(mdata
->base
+ SPI_TX_DATA_REG
,
780 xfer
->tx_buf
+ mdata
->num_xfered
, cnt
);
782 remainder
= mdata
->xfer_len
% 4;
786 xfer
->tx_buf
+ (cnt
* 4) + mdata
->num_xfered
,
788 writel(reg_val
, mdata
->base
+ SPI_TX_DATA_REG
);
792 mtk_spi_enable_transfer(host
);
798 xfer
->tx_dma
+= mdata
->xfer_len
;
800 xfer
->rx_dma
+= mdata
->xfer_len
;
802 if (mdata
->tx_sgl
&& (mdata
->tx_sgl_len
== 0)) {
803 mdata
->tx_sgl
= sg_next(mdata
->tx_sgl
);
805 xfer
->tx_dma
= sg_dma_address(mdata
->tx_sgl
);
806 mdata
->tx_sgl_len
= sg_dma_len(mdata
->tx_sgl
);
809 if (mdata
->rx_sgl
&& (mdata
->rx_sgl_len
== 0)) {
810 mdata
->rx_sgl
= sg_next(mdata
->rx_sgl
);
812 xfer
->rx_dma
= sg_dma_address(mdata
->rx_sgl
);
813 mdata
->rx_sgl_len
= sg_dma_len(mdata
->rx_sgl
);
817 if (!mdata
->tx_sgl
&& !mdata
->rx_sgl
) {
818 /* spi disable dma */
819 cmd
= readl(mdata
->base
+ SPI_CMD_REG
);
820 cmd
&= ~SPI_CMD_TX_DMA
;
821 cmd
&= ~SPI_CMD_RX_DMA
;
822 writel(cmd
, mdata
->base
+ SPI_CMD_REG
);
824 spi_finalize_current_transfer(host
);
828 mtk_spi_update_mdata_len(host
);
829 mtk_spi_setup_packet(host
);
830 mtk_spi_setup_dma_addr(host
, xfer
);
831 mtk_spi_enable_transfer(host
);
836 static irqreturn_t
mtk_spi_interrupt(int irq
, void *dev_id
)
838 struct spi_controller
*host
= dev_id
;
839 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
842 reg_val
= readl(mdata
->base
+ SPI_STATUS0_REG
);
843 if (reg_val
& MTK_SPI_PAUSE_INT_STATUS
)
844 mdata
->state
= MTK_SPI_PAUSED
;
846 mdata
->state
= MTK_SPI_IDLE
;
849 if (mdata
->use_spimem
) {
850 complete(&mdata
->spimem_done
);
854 return IRQ_WAKE_THREAD
;
857 static int mtk_spi_mem_adjust_op_size(struct spi_mem
*mem
,
858 struct spi_mem_op
*op
)
862 if (op
->data
.dir
!= SPI_MEM_NO_DATA
) {
863 opcode_len
= 1 + op
->addr
.nbytes
+ op
->dummy
.nbytes
;
864 if (opcode_len
+ op
->data
.nbytes
> MTK_SPI_IPM_PACKET_SIZE
) {
865 op
->data
.nbytes
= MTK_SPI_IPM_PACKET_SIZE
- opcode_len
;
866 /* force data buffer dma-aligned. */
867 op
->data
.nbytes
-= op
->data
.nbytes
% 4;
874 static bool mtk_spi_mem_supports_op(struct spi_mem
*mem
,
875 const struct spi_mem_op
*op
)
877 if (!spi_mem_default_supports_op(mem
, op
))
880 if (op
->addr
.nbytes
&& op
->dummy
.nbytes
&&
881 op
->addr
.buswidth
!= op
->dummy
.buswidth
)
884 if (op
->addr
.nbytes
+ op
->dummy
.nbytes
> 16)
887 if (op
->data
.nbytes
> MTK_SPI_IPM_PACKET_SIZE
) {
888 if (op
->data
.nbytes
/ MTK_SPI_IPM_PACKET_SIZE
>
889 MTK_SPI_IPM_PACKET_LOOP
||
890 op
->data
.nbytes
% MTK_SPI_IPM_PACKET_SIZE
!= 0)
897 static void mtk_spi_mem_setup_dma_xfer(struct spi_controller
*host
,
898 const struct spi_mem_op
*op
)
900 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
902 writel((u32
)(mdata
->tx_dma
& MTK_SPI_32BITS_MASK
),
903 mdata
->base
+ SPI_TX_SRC_REG
);
904 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
905 if (mdata
->dev_comp
->dma_ext
)
906 writel((u32
)(mdata
->tx_dma
>> 32),
907 mdata
->base
+ SPI_TX_SRC_REG_64
);
910 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
911 writel((u32
)(mdata
->rx_dma
& MTK_SPI_32BITS_MASK
),
912 mdata
->base
+ SPI_RX_DST_REG
);
913 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
914 if (mdata
->dev_comp
->dma_ext
)
915 writel((u32
)(mdata
->rx_dma
>> 32),
916 mdata
->base
+ SPI_RX_DST_REG_64
);
921 static int mtk_spi_transfer_wait(struct spi_mem
*mem
,
922 const struct spi_mem_op
*op
)
924 struct mtk_spi
*mdata
= spi_controller_get_devdata(mem
->spi
->controller
);
926 * For each byte we wait for 8 cycles of the SPI clock.
927 * Since speed is defined in Hz and we want milliseconds,
928 * so it should be 8 * 1000.
932 if (op
->data
.dir
== SPI_MEM_NO_DATA
)
933 ms
*= 32; /* prevent we may get 0 for short transfers. */
935 ms
*= op
->data
.nbytes
;
936 ms
= div_u64(ms
, mem
->spi
->max_speed_hz
);
937 ms
+= ms
+ 1000; /* 1s tolerance */
942 if (!wait_for_completion_timeout(&mdata
->spimem_done
,
943 msecs_to_jiffies(ms
))) {
944 dev_err(mdata
->dev
, "spi-mem transfer timeout\n");
951 static int mtk_spi_mem_exec_op(struct spi_mem
*mem
,
952 const struct spi_mem_op
*op
)
954 struct mtk_spi
*mdata
= spi_controller_get_devdata(mem
->spi
->controller
);
955 u32 reg_val
, nio
, tx_size
;
956 char *tx_tmp_buf
, *rx_tmp_buf
;
959 mdata
->use_spimem
= true;
960 reinit_completion(&mdata
->spimem_done
);
962 mtk_spi_reset(mdata
);
963 mtk_spi_hw_init(mem
->spi
->controller
, mem
->spi
);
964 mtk_spi_prepare_transfer(mem
->spi
->controller
, mem
->spi
->max_speed_hz
);
966 reg_val
= readl(mdata
->base
+ SPI_CFG3_IPM_REG
);
967 /* opcode byte len */
968 reg_val
&= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK
;
969 reg_val
|= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET
;
971 /* addr & dummy byte len */
972 reg_val
&= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK
;
973 if (op
->addr
.nbytes
|| op
->dummy
.nbytes
)
974 reg_val
|= (op
->addr
.nbytes
+ op
->dummy
.nbytes
) <<
975 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET
;
978 if (op
->data
.dir
== SPI_MEM_NO_DATA
) {
979 reg_val
|= SPI_CFG3_IPM_NODATA_FLAG
;
980 writel(0, mdata
->base
+ SPI_CFG1_REG
);
982 reg_val
&= ~SPI_CFG3_IPM_NODATA_FLAG
;
983 mdata
->xfer_len
= op
->data
.nbytes
;
984 mtk_spi_setup_packet(mem
->spi
->controller
);
987 if (op
->addr
.nbytes
|| op
->dummy
.nbytes
) {
988 if (op
->addr
.buswidth
== 1 || op
->dummy
.buswidth
== 1)
989 reg_val
|= SPI_CFG3_IPM_XMODE_EN
;
991 reg_val
&= ~SPI_CFG3_IPM_XMODE_EN
;
994 if (op
->addr
.buswidth
== 2 ||
995 op
->dummy
.buswidth
== 2 ||
996 op
->data
.buswidth
== 2)
998 else if (op
->addr
.buswidth
== 4 ||
999 op
->dummy
.buswidth
== 4 ||
1000 op
->data
.buswidth
== 4)
1005 reg_val
&= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK
;
1006 reg_val
|= PIN_MODE_CFG(nio
);
1008 reg_val
|= SPI_CFG3_IPM_HALF_DUPLEX_EN
;
1009 if (op
->data
.dir
== SPI_MEM_DATA_IN
)
1010 reg_val
|= SPI_CFG3_IPM_HALF_DUPLEX_DIR
;
1012 reg_val
&= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR
;
1013 writel(reg_val
, mdata
->base
+ SPI_CFG3_IPM_REG
);
1015 tx_size
= 1 + op
->addr
.nbytes
+ op
->dummy
.nbytes
;
1016 if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
1017 tx_size
+= op
->data
.nbytes
;
1019 tx_size
= max_t(u32
, tx_size
, 32);
1021 tx_tmp_buf
= kzalloc(tx_size
, GFP_KERNEL
| GFP_DMA
);
1023 mdata
->use_spimem
= false;
1027 tx_tmp_buf
[0] = op
->cmd
.opcode
;
1029 if (op
->addr
.nbytes
) {
1032 for (i
= 0; i
< op
->addr
.nbytes
; i
++)
1033 tx_tmp_buf
[i
+ 1] = op
->addr
.val
>>
1034 (8 * (op
->addr
.nbytes
- i
- 1));
1037 if (op
->dummy
.nbytes
)
1038 memset(tx_tmp_buf
+ op
->addr
.nbytes
+ 1,
1042 if (op
->data
.nbytes
&& op
->data
.dir
== SPI_MEM_DATA_OUT
)
1043 memcpy(tx_tmp_buf
+ op
->dummy
.nbytes
+ op
->addr
.nbytes
+ 1,
1047 mdata
->tx_dma
= dma_map_single(mdata
->dev
, tx_tmp_buf
,
1048 tx_size
, DMA_TO_DEVICE
);
1049 if (dma_mapping_error(mdata
->dev
, mdata
->tx_dma
)) {
1054 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
1055 if (!IS_ALIGNED((size_t)op
->data
.buf
.in
, 4)) {
1056 rx_tmp_buf
= kzalloc(op
->data
.nbytes
,
1057 GFP_KERNEL
| GFP_DMA
);
1063 rx_tmp_buf
= op
->data
.buf
.in
;
1066 mdata
->rx_dma
= dma_map_single(mdata
->dev
,
1070 if (dma_mapping_error(mdata
->dev
, mdata
->rx_dma
)) {
1072 goto kfree_rx_tmp_buf
;
1076 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
1077 reg_val
|= SPI_CMD_TX_DMA
;
1078 if (op
->data
.dir
== SPI_MEM_DATA_IN
)
1079 reg_val
|= SPI_CMD_RX_DMA
;
1080 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
1082 mtk_spi_mem_setup_dma_xfer(mem
->spi
->controller
, op
);
1084 mtk_spi_enable_transfer(mem
->spi
->controller
);
1086 /* Wait for the interrupt. */
1087 ret
= mtk_spi_transfer_wait(mem
, op
);
1091 /* spi disable dma */
1092 reg_val
= readl(mdata
->base
+ SPI_CMD_REG
);
1093 reg_val
&= ~SPI_CMD_TX_DMA
;
1094 if (op
->data
.dir
== SPI_MEM_DATA_IN
)
1095 reg_val
&= ~SPI_CMD_RX_DMA
;
1096 writel(reg_val
, mdata
->base
+ SPI_CMD_REG
);
1099 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
1100 dma_unmap_single(mdata
->dev
, mdata
->rx_dma
,
1101 op
->data
.nbytes
, DMA_FROM_DEVICE
);
1102 if (!IS_ALIGNED((size_t)op
->data
.buf
.in
, 4))
1103 memcpy(op
->data
.buf
.in
, rx_tmp_buf
, op
->data
.nbytes
);
1106 if (op
->data
.dir
== SPI_MEM_DATA_IN
&&
1107 !IS_ALIGNED((size_t)op
->data
.buf
.in
, 4))
1110 dma_unmap_single(mdata
->dev
, mdata
->tx_dma
,
1111 tx_size
, DMA_TO_DEVICE
);
1114 mdata
->use_spimem
= false;
1119 static const struct spi_controller_mem_ops mtk_spi_mem_ops
= {
1120 .adjust_op_size
= mtk_spi_mem_adjust_op_size
,
1121 .supports_op
= mtk_spi_mem_supports_op
,
1122 .exec_op
= mtk_spi_mem_exec_op
,
1125 static int mtk_spi_probe(struct platform_device
*pdev
)
1127 struct device
*dev
= &pdev
->dev
;
1128 struct spi_controller
*host
;
1129 struct mtk_spi
*mdata
;
1130 int i
, irq
, ret
, addr_bits
;
1132 host
= devm_spi_alloc_host(dev
, sizeof(*mdata
));
1134 return dev_err_probe(dev
, -ENOMEM
, "failed to alloc spi host\n");
1136 host
->auto_runtime_pm
= true;
1137 host
->dev
.of_node
= dev
->of_node
;
1138 host
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LSB_FIRST
;
1140 host
->set_cs
= mtk_spi_set_cs
;
1141 host
->prepare_message
= mtk_spi_prepare_message
;
1142 host
->transfer_one
= mtk_spi_transfer_one
;
1143 host
->can_dma
= mtk_spi_can_dma
;
1144 host
->setup
= mtk_spi_setup
;
1145 host
->set_cs_timing
= mtk_spi_set_hw_cs_timing
;
1146 host
->use_gpio_descriptors
= true;
1148 mdata
= spi_controller_get_devdata(host
);
1149 mdata
->dev_comp
= device_get_match_data(dev
);
1151 if (mdata
->dev_comp
->enhance_timing
)
1152 host
->mode_bits
|= SPI_CS_HIGH
;
1154 if (mdata
->dev_comp
->must_tx
)
1155 host
->flags
= SPI_CONTROLLER_MUST_TX
;
1156 if (mdata
->dev_comp
->ipm_design
)
1157 host
->mode_bits
|= SPI_LOOP
| SPI_RX_DUAL
| SPI_TX_DUAL
|
1158 SPI_RX_QUAD
| SPI_TX_QUAD
;
1160 if (mdata
->dev_comp
->ipm_design
) {
1162 host
->mem_ops
= &mtk_spi_mem_ops
;
1163 init_completion(&mdata
->spimem_done
);
1166 if (mdata
->dev_comp
->need_pad_sel
) {
1167 mdata
->pad_num
= of_property_count_u32_elems(dev
->of_node
,
1168 "mediatek,pad-select");
1169 if (mdata
->pad_num
< 0)
1170 return dev_err_probe(dev
, -EINVAL
,
1171 "No 'mediatek,pad-select' property\n");
1173 mdata
->pad_sel
= devm_kmalloc_array(dev
, mdata
->pad_num
,
1174 sizeof(u32
), GFP_KERNEL
);
1175 if (!mdata
->pad_sel
)
1178 for (i
= 0; i
< mdata
->pad_num
; i
++) {
1179 of_property_read_u32_index(dev
->of_node
,
1180 "mediatek,pad-select",
1181 i
, &mdata
->pad_sel
[i
]);
1182 if (mdata
->pad_sel
[i
] > MT8173_SPI_MAX_PAD_SEL
)
1183 return dev_err_probe(dev
, -EINVAL
,
1184 "wrong pad-sel[%d]: %u\n",
1185 i
, mdata
->pad_sel
[i
]);
1189 platform_set_drvdata(pdev
, host
);
1190 mdata
->base
= devm_platform_ioremap_resource(pdev
, 0);
1191 if (IS_ERR(mdata
->base
))
1192 return PTR_ERR(mdata
->base
);
1194 irq
= platform_get_irq(pdev
, 0);
1199 dev
->dma_mask
= &dev
->coherent_dma_mask
;
1201 if (mdata
->dev_comp
->ipm_design
)
1202 dma_set_max_seg_size(dev
, SZ_16M
);
1204 dma_set_max_seg_size(dev
, SZ_256K
);
1206 mdata
->parent_clk
= devm_clk_get(dev
, "parent-clk");
1207 if (IS_ERR(mdata
->parent_clk
))
1208 return dev_err_probe(dev
, PTR_ERR(mdata
->parent_clk
),
1209 "failed to get parent-clk\n");
1211 mdata
->sel_clk
= devm_clk_get(dev
, "sel-clk");
1212 if (IS_ERR(mdata
->sel_clk
))
1213 return dev_err_probe(dev
, PTR_ERR(mdata
->sel_clk
), "failed to get sel-clk\n");
1215 mdata
->spi_clk
= devm_clk_get(dev
, "spi-clk");
1216 if (IS_ERR(mdata
->spi_clk
))
1217 return dev_err_probe(dev
, PTR_ERR(mdata
->spi_clk
), "failed to get spi-clk\n");
1219 mdata
->spi_hclk
= devm_clk_get_optional(dev
, "hclk");
1220 if (IS_ERR(mdata
->spi_hclk
))
1221 return dev_err_probe(dev
, PTR_ERR(mdata
->spi_hclk
), "failed to get hclk\n");
1223 ret
= clk_set_parent(mdata
->sel_clk
, mdata
->parent_clk
);
1225 return dev_err_probe(dev
, ret
, "failed to clk_set_parent\n");
1227 ret
= clk_prepare_enable(mdata
->spi_hclk
);
1229 return dev_err_probe(dev
, ret
, "failed to enable hclk\n");
1231 ret
= clk_prepare_enable(mdata
->spi_clk
);
1233 clk_disable_unprepare(mdata
->spi_hclk
);
1234 return dev_err_probe(dev
, ret
, "failed to enable spi_clk\n");
1237 mdata
->spi_clk_hz
= clk_get_rate(mdata
->spi_clk
);
1239 if (mdata
->dev_comp
->no_need_unprepare
) {
1240 clk_disable(mdata
->spi_clk
);
1241 clk_disable(mdata
->spi_hclk
);
1243 clk_disable_unprepare(mdata
->spi_clk
);
1244 clk_disable_unprepare(mdata
->spi_hclk
);
1247 if (mdata
->dev_comp
->need_pad_sel
) {
1248 if (mdata
->pad_num
!= host
->num_chipselect
)
1249 return dev_err_probe(dev
, -EINVAL
,
1250 "pad_num does not match num_chipselect(%d != %d)\n",
1251 mdata
->pad_num
, host
->num_chipselect
);
1253 if (!host
->cs_gpiods
&& host
->num_chipselect
> 1)
1254 return dev_err_probe(dev
, -EINVAL
,
1255 "cs_gpios not specified and num_chipselect > 1\n");
1258 if (mdata
->dev_comp
->dma_ext
)
1259 addr_bits
= DMA_ADDR_EXT_BITS
;
1261 addr_bits
= DMA_ADDR_DEF_BITS
;
1262 ret
= dma_set_mask(dev
, DMA_BIT_MASK(addr_bits
));
1264 dev_notice(dev
, "SPI dma_set_mask(%d) failed, ret:%d\n",
1267 ret
= devm_request_threaded_irq(dev
, irq
, mtk_spi_interrupt
,
1268 mtk_spi_interrupt_thread
,
1269 IRQF_TRIGGER_NONE
, dev_name(dev
), host
);
1271 return dev_err_probe(dev
, ret
, "failed to register irq\n");
1273 pm_runtime_enable(dev
);
1275 ret
= devm_spi_register_controller(dev
, host
);
1277 pm_runtime_disable(dev
);
1278 return dev_err_probe(dev
, ret
, "failed to register host\n");
1284 static void mtk_spi_remove(struct platform_device
*pdev
)
1286 struct spi_controller
*host
= platform_get_drvdata(pdev
);
1287 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
1290 if (mdata
->use_spimem
&& !completion_done(&mdata
->spimem_done
))
1291 complete(&mdata
->spimem_done
);
1293 ret
= pm_runtime_get_sync(&pdev
->dev
);
1295 dev_warn(&pdev
->dev
, "Failed to resume hardware (%pe)\n", ERR_PTR(ret
));
1298 * If pm runtime resume failed, clks are disabled and
1299 * unprepared. So don't access the hardware and skip clk
1302 mtk_spi_reset(mdata
);
1304 if (mdata
->dev_comp
->no_need_unprepare
) {
1305 clk_unprepare(mdata
->spi_clk
);
1306 clk_unprepare(mdata
->spi_hclk
);
1310 pm_runtime_put_noidle(&pdev
->dev
);
1311 pm_runtime_disable(&pdev
->dev
);
1314 #ifdef CONFIG_PM_SLEEP
1315 static int mtk_spi_suspend(struct device
*dev
)
1318 struct spi_controller
*host
= dev_get_drvdata(dev
);
1319 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
1321 ret
= spi_controller_suspend(host
);
1325 if (!pm_runtime_suspended(dev
)) {
1326 clk_disable_unprepare(mdata
->spi_clk
);
1327 clk_disable_unprepare(mdata
->spi_hclk
);
1330 pinctrl_pm_select_sleep_state(dev
);
1335 static int mtk_spi_resume(struct device
*dev
)
1338 struct spi_controller
*host
= dev_get_drvdata(dev
);
1339 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
1341 pinctrl_pm_select_default_state(dev
);
1343 if (!pm_runtime_suspended(dev
)) {
1344 ret
= clk_prepare_enable(mdata
->spi_clk
);
1346 dev_err(dev
, "failed to enable spi_clk (%d)\n", ret
);
1350 ret
= clk_prepare_enable(mdata
->spi_hclk
);
1352 dev_err(dev
, "failed to enable spi_hclk (%d)\n", ret
);
1353 clk_disable_unprepare(mdata
->spi_clk
);
1358 ret
= spi_controller_resume(host
);
1360 clk_disable_unprepare(mdata
->spi_clk
);
1361 clk_disable_unprepare(mdata
->spi_hclk
);
1366 #endif /* CONFIG_PM_SLEEP */
1369 static int mtk_spi_runtime_suspend(struct device
*dev
)
1371 struct spi_controller
*host
= dev_get_drvdata(dev
);
1372 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
1374 if (mdata
->dev_comp
->no_need_unprepare
) {
1375 clk_disable(mdata
->spi_clk
);
1376 clk_disable(mdata
->spi_hclk
);
1378 clk_disable_unprepare(mdata
->spi_clk
);
1379 clk_disable_unprepare(mdata
->spi_hclk
);
1385 static int mtk_spi_runtime_resume(struct device
*dev
)
1387 struct spi_controller
*host
= dev_get_drvdata(dev
);
1388 struct mtk_spi
*mdata
= spi_controller_get_devdata(host
);
1391 if (mdata
->dev_comp
->no_need_unprepare
) {
1392 ret
= clk_enable(mdata
->spi_clk
);
1394 dev_err(dev
, "failed to enable spi_clk (%d)\n", ret
);
1397 ret
= clk_enable(mdata
->spi_hclk
);
1399 dev_err(dev
, "failed to enable spi_hclk (%d)\n", ret
);
1400 clk_disable(mdata
->spi_clk
);
1404 ret
= clk_prepare_enable(mdata
->spi_clk
);
1406 dev_err(dev
, "failed to prepare_enable spi_clk (%d)\n", ret
);
1410 ret
= clk_prepare_enable(mdata
->spi_hclk
);
1412 dev_err(dev
, "failed to prepare_enable spi_hclk (%d)\n", ret
);
1413 clk_disable_unprepare(mdata
->spi_clk
);
1420 #endif /* CONFIG_PM */
1422 static const struct dev_pm_ops mtk_spi_pm
= {
1423 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend
, mtk_spi_resume
)
1424 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend
,
1425 mtk_spi_runtime_resume
, NULL
)
1428 static struct platform_driver mtk_spi_driver
= {
1432 .of_match_table
= mtk_spi_of_match
,
1434 .probe
= mtk_spi_probe
,
1435 .remove
= mtk_spi_remove
,
1438 module_platform_driver(mtk_spi_driver
);
1440 MODULE_DESCRIPTION("MTK SPI Controller driver");
1441 MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
1442 MODULE_LICENSE("GPL v2");
1443 MODULE_ALIAS("platform:mtk-spi");