1 // SPDX-License-Identifier: GPL-2.0
3 // Mediatek SPI NOR controller driver
5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
7 #include <linux/bits.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/spi/spi.h>
20 #include <linux/spi/spi-mem.h>
21 #include <linux/string.h>
23 #define DRIVER_NAME "mtk-spi-nor"
25 #define MTK_NOR_REG_CMD 0x00
26 #define MTK_NOR_CMD_WRITE BIT(4)
27 #define MTK_NOR_CMD_PROGRAM BIT(2)
28 #define MTK_NOR_CMD_READ BIT(0)
29 #define MTK_NOR_CMD_MASK GENMASK(5, 0)
31 #define MTK_NOR_REG_PRG_CNT 0x04
32 #define MTK_NOR_PRG_CNT_MAX 56
33 #define MTK_NOR_REG_RDATA 0x0c
35 #define MTK_NOR_REG_RADR0 0x10
36 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
37 #define MTK_NOR_REG_RADR3 0xc8
39 #define MTK_NOR_REG_WDATA 0x1c
41 #define MTK_NOR_REG_PRGDATA0 0x20
42 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
43 #define MTK_NOR_REG_PRGDATA_MAX 5
45 #define MTK_NOR_REG_SHIFT0 0x38
46 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
47 #define MTK_NOR_REG_SHIFT_MAX 9
49 #define MTK_NOR_REG_CFG1 0x60
50 #define MTK_NOR_FAST_READ BIT(0)
52 #define MTK_NOR_REG_CFG2 0x64
53 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
54 #define MTK_NOR_WR_BUF_EN BIT(0)
56 #define MTK_NOR_REG_PP_DATA 0x98
58 #define MTK_NOR_REG_IRQ_STAT 0xa8
59 #define MTK_NOR_REG_IRQ_EN 0xac
60 #define MTK_NOR_IRQ_DMA BIT(7)
61 #define MTK_NOR_IRQ_MASK GENMASK(7, 0)
63 #define MTK_NOR_REG_CFG3 0xb4
64 #define MTK_NOR_DISABLE_WREN BIT(7)
65 #define MTK_NOR_DISABLE_SR_POLL BIT(5)
67 #define MTK_NOR_REG_WP 0xc4
68 #define MTK_NOR_ENABLE_SF_CMD 0x30
70 #define MTK_NOR_REG_BUSCFG 0xcc
71 #define MTK_NOR_4B_ADDR BIT(4)
72 #define MTK_NOR_QUAD_ADDR BIT(3)
73 #define MTK_NOR_QUAD_READ BIT(2)
74 #define MTK_NOR_DUAL_ADDR BIT(1)
75 #define MTK_NOR_DUAL_READ BIT(0)
76 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
78 #define MTK_NOR_REG_DMA_CTL 0x718
79 #define MTK_NOR_DMA_START BIT(0)
81 #define MTK_NOR_REG_DMA_FADR 0x71c
82 #define MTK_NOR_REG_DMA_DADR 0x720
83 #define MTK_NOR_REG_DMA_END_DADR 0x724
84 #define MTK_NOR_REG_CG_DIS 0x728
85 #define MTK_NOR_SFC_SW_RST BIT(2)
87 #define MTK_NOR_REG_DMA_DADR_HB 0x738
88 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
90 #define MTK_NOR_PRG_MAX_SIZE 6
91 // Reading DMA src/dst addresses have to be 16-byte aligned
92 #define MTK_NOR_DMA_ALIGN 16
93 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
94 // and we allocate a bounce buffer if destination address isn't aligned.
95 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
97 // Buffered page program can do one 128-byte transfer
98 #define MTK_NOR_PP_SIZE 128
100 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
102 struct mtk_nor_caps
{
105 /* extra_dummy_bit is adding for the IP of new SoCs.
106 * Some new SoCs modify the timing of fetching registers' values
107 * and IDs of nor flash, they need a extra_dummy_bit which can add
108 * more clock cycles for fetching data.
114 struct spi_controller
*ctlr
;
118 dma_addr_t buffer_dma
;
120 struct clk
*ctlr_clk
;
122 struct clk
*axi_s_clk
;
123 unsigned int spi_freq
;
127 struct completion op_done
;
128 const struct mtk_nor_caps
*caps
;
131 static inline void mtk_nor_rmw(struct mtk_nor
*sp
, u32 reg
, u32 set
, u32 clr
)
133 u32 val
= readl(sp
->base
+ reg
);
137 writel(val
, sp
->base
+ reg
);
140 static inline int mtk_nor_cmd_exec(struct mtk_nor
*sp
, u32 cmd
, ulong clk
)
142 ulong delay
= CLK_TO_US(sp
, clk
);
146 writel(cmd
, sp
->base
+ MTK_NOR_REG_CMD
);
147 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CMD
, reg
, !(reg
& cmd
),
148 delay
/ 3, (delay
+ 1) * 200);
150 dev_err(sp
->dev
, "command %u timeout.\n", cmd
);
154 static void mtk_nor_reset(struct mtk_nor
*sp
)
156 mtk_nor_rmw(sp
, MTK_NOR_REG_CG_DIS
, 0, MTK_NOR_SFC_SW_RST
);
157 mb(); /* flush previous writes */
158 mtk_nor_rmw(sp
, MTK_NOR_REG_CG_DIS
, MTK_NOR_SFC_SW_RST
, 0);
159 mb(); /* flush previous writes */
160 writel(MTK_NOR_ENABLE_SF_CMD
, sp
->base
+ MTK_NOR_REG_WP
);
163 static void mtk_nor_set_addr(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
165 u32 addr
= op
->addr
.val
;
168 for (i
= 0; i
< 3; i
++) {
169 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR(i
));
172 if (op
->addr
.nbytes
== 4) {
173 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR3
);
174 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, MTK_NOR_4B_ADDR
, 0);
176 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, 0, MTK_NOR_4B_ADDR
);
180 static bool need_bounce(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
182 return ((uintptr_t)op
->data
.buf
.in
& MTK_NOR_DMA_ALIGN_MASK
);
185 static bool mtk_nor_match_read(const struct spi_mem_op
*op
)
189 if (op
->dummy
.nbytes
)
190 dummy
= op
->dummy
.nbytes
* BITS_PER_BYTE
/ op
->dummy
.buswidth
;
192 if ((op
->data
.buswidth
== 2) || (op
->data
.buswidth
== 4)) {
193 if (op
->addr
.buswidth
== 1)
195 else if (op
->addr
.buswidth
== 2)
197 else if (op
->addr
.buswidth
== 4)
199 } else if ((op
->addr
.buswidth
== 1) && (op
->data
.buswidth
== 1)) {
200 if (op
->cmd
.opcode
== 0x03)
202 else if (op
->cmd
.opcode
== 0x0b)
208 static bool mtk_nor_match_prg(const struct spi_mem_op
*op
)
210 int tx_len
, rx_len
, prg_len
, prg_left
;
212 // prg mode is spi-only.
213 if ((op
->cmd
.buswidth
> 1) || (op
->addr
.buswidth
> 1) ||
214 (op
->dummy
.buswidth
> 1) || (op
->data
.buswidth
> 1))
217 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
219 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
220 // count dummy bytes only if we need to write data after it
221 tx_len
+= op
->dummy
.nbytes
;
223 // leave at least one byte for data
224 if (tx_len
> MTK_NOR_REG_PRGDATA_MAX
)
227 // if there's no addr, meaning adjust_op_size is impossible,
228 // check data length as well.
229 if ((!op
->addr
.nbytes
) &&
230 (tx_len
+ op
->data
.nbytes
> MTK_NOR_REG_PRGDATA_MAX
+ 1))
232 } else if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
233 if (tx_len
> MTK_NOR_REG_PRGDATA_MAX
+ 1)
236 rx_len
= op
->data
.nbytes
;
237 prg_left
= MTK_NOR_PRG_CNT_MAX
/ 8 - tx_len
- op
->dummy
.nbytes
;
238 if (prg_left
> MTK_NOR_REG_SHIFT_MAX
+ 1)
239 prg_left
= MTK_NOR_REG_SHIFT_MAX
+ 1;
240 if (rx_len
> prg_left
) {
241 if (!op
->addr
.nbytes
)
246 prg_len
= tx_len
+ op
->dummy
.nbytes
+ rx_len
;
247 if (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8)
250 prg_len
= tx_len
+ op
->dummy
.nbytes
;
251 if (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8)
257 static void mtk_nor_adj_prg_size(struct spi_mem_op
*op
)
259 int tx_len
, tx_left
, prg_left
;
261 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
262 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
263 tx_len
+= op
->dummy
.nbytes
;
264 tx_left
= MTK_NOR_REG_PRGDATA_MAX
+ 1 - tx_len
;
265 if (op
->data
.nbytes
> tx_left
)
266 op
->data
.nbytes
= tx_left
;
267 } else if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
268 prg_left
= MTK_NOR_PRG_CNT_MAX
/ 8 - tx_len
- op
->dummy
.nbytes
;
269 if (prg_left
> MTK_NOR_REG_SHIFT_MAX
+ 1)
270 prg_left
= MTK_NOR_REG_SHIFT_MAX
+ 1;
271 if (op
->data
.nbytes
> prg_left
)
272 op
->data
.nbytes
= prg_left
;
276 static int mtk_nor_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
278 struct mtk_nor
*sp
= spi_controller_get_devdata(mem
->spi
->controller
);
280 if (!op
->data
.nbytes
)
283 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
284 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) &&
285 mtk_nor_match_read(op
)) {
286 // limit size to prevent timeout calculation overflow
287 if (op
->data
.nbytes
> 0x400000)
288 op
->data
.nbytes
= 0x400000;
290 if ((op
->addr
.val
& MTK_NOR_DMA_ALIGN_MASK
) ||
291 (op
->data
.nbytes
< MTK_NOR_DMA_ALIGN
))
293 else if (!need_bounce(sp
, op
))
294 op
->data
.nbytes
&= ~MTK_NOR_DMA_ALIGN_MASK
;
295 else if (op
->data
.nbytes
> MTK_NOR_BOUNCE_BUF_SIZE
)
296 op
->data
.nbytes
= MTK_NOR_BOUNCE_BUF_SIZE
;
298 } else if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
299 if (op
->data
.nbytes
>= MTK_NOR_PP_SIZE
)
300 op
->data
.nbytes
= MTK_NOR_PP_SIZE
;
307 mtk_nor_adj_prg_size(op
);
311 static bool mtk_nor_supports_op(struct spi_mem
*mem
,
312 const struct spi_mem_op
*op
)
314 if (!spi_mem_default_supports_op(mem
, op
))
317 if (op
->cmd
.buswidth
!= 1)
320 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
321 switch (op
->data
.dir
) {
322 case SPI_MEM_DATA_IN
:
323 if (mtk_nor_match_read(op
))
326 case SPI_MEM_DATA_OUT
:
327 if ((op
->addr
.buswidth
== 1) &&
328 (op
->dummy
.nbytes
== 0) &&
329 (op
->data
.buswidth
== 1))
337 return mtk_nor_match_prg(op
);
340 static void mtk_nor_setup_bus(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
344 if (op
->addr
.nbytes
== 4)
345 reg
|= MTK_NOR_4B_ADDR
;
347 if (op
->data
.buswidth
== 4) {
348 reg
|= MTK_NOR_QUAD_READ
;
349 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(4));
350 if (op
->addr
.buswidth
== 4)
351 reg
|= MTK_NOR_QUAD_ADDR
;
352 } else if (op
->data
.buswidth
== 2) {
353 reg
|= MTK_NOR_DUAL_READ
;
354 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(3));
355 if (op
->addr
.buswidth
== 2)
356 reg
|= MTK_NOR_DUAL_ADDR
;
358 if (op
->cmd
.opcode
== 0x0b)
359 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, MTK_NOR_FAST_READ
, 0);
361 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, 0, MTK_NOR_FAST_READ
);
363 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, reg
, MTK_NOR_BUS_MODE_MASK
);
366 static int mtk_nor_dma_exec(struct mtk_nor
*sp
, u32 from
, unsigned int length
,
373 writel(from
, sp
->base
+ MTK_NOR_REG_DMA_FADR
);
374 writel(dma_addr
, sp
->base
+ MTK_NOR_REG_DMA_DADR
);
375 writel(dma_addr
+ length
, sp
->base
+ MTK_NOR_REG_DMA_END_DADR
);
378 writel(upper_32_bits(dma_addr
),
379 sp
->base
+ MTK_NOR_REG_DMA_DADR_HB
);
380 writel(upper_32_bits(dma_addr
+ length
),
381 sp
->base
+ MTK_NOR_REG_DMA_END_DADR_HB
);
385 reinit_completion(&sp
->op_done
);
386 mtk_nor_rmw(sp
, MTK_NOR_REG_IRQ_EN
, MTK_NOR_IRQ_DMA
, 0);
389 mtk_nor_rmw(sp
, MTK_NOR_REG_DMA_CTL
, MTK_NOR_DMA_START
, 0);
391 delay
= CLK_TO_US(sp
, (length
+ 5) * BITS_PER_BYTE
);
392 timeout
= (delay
+ 1) * 100;
395 if (!wait_for_completion_timeout(&sp
->op_done
,
396 usecs_to_jiffies(max(timeout
, 10000U))))
399 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_DMA_CTL
, reg
,
400 !(reg
& MTK_NOR_DMA_START
), delay
/ 3,
405 dev_err(sp
->dev
, "dma read timeout.\n");
410 static int mtk_nor_read_bounce(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
415 if (op
->data
.nbytes
& MTK_NOR_DMA_ALIGN_MASK
)
416 rdlen
= (op
->data
.nbytes
+ MTK_NOR_DMA_ALIGN
) & ~MTK_NOR_DMA_ALIGN_MASK
;
418 rdlen
= op
->data
.nbytes
;
420 ret
= mtk_nor_dma_exec(sp
, op
->addr
.val
, rdlen
, sp
->buffer_dma
);
423 memcpy(op
->data
.buf
.in
, sp
->buffer
, op
->data
.nbytes
);
428 static int mtk_nor_read_dma(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
433 if (need_bounce(sp
, op
))
434 return mtk_nor_read_bounce(sp
, op
);
436 dma_addr
= dma_map_single(sp
->dev
, op
->data
.buf
.in
,
437 op
->data
.nbytes
, DMA_FROM_DEVICE
);
439 if (dma_mapping_error(sp
->dev
, dma_addr
))
442 ret
= mtk_nor_dma_exec(sp
, op
->addr
.val
, op
->data
.nbytes
, dma_addr
);
444 dma_unmap_single(sp
->dev
, dma_addr
, op
->data
.nbytes
, DMA_FROM_DEVICE
);
449 static int mtk_nor_read_pio(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
451 u8
*buf
= op
->data
.buf
.in
;
454 ret
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_READ
, 6 * BITS_PER_BYTE
);
456 buf
[0] = readb(sp
->base
+ MTK_NOR_REG_RDATA
);
460 static int mtk_nor_setup_write_buffer(struct mtk_nor
*sp
, bool on
)
465 if (!(sp
->wbuf_en
^ on
))
468 val
= readl(sp
->base
+ MTK_NOR_REG_CFG2
);
470 writel(val
| MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
471 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
472 val
& MTK_NOR_WR_BUF_EN
, 0, 10000);
474 writel(val
& ~MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
475 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
476 !(val
& MTK_NOR_WR_BUF_EN
), 0, 10000);
485 static int mtk_nor_pp_buffered(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
487 const u8
*buf
= op
->data
.buf
.out
;
491 ret
= mtk_nor_setup_write_buffer(sp
, true);
495 for (i
= 0; i
< op
->data
.nbytes
; i
+= 4) {
496 val
= buf
[i
+ 3] << 24 | buf
[i
+ 2] << 16 | buf
[i
+ 1] << 8 |
498 writel(val
, sp
->base
+ MTK_NOR_REG_PP_DATA
);
500 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
,
501 (op
->data
.nbytes
+ 5) * BITS_PER_BYTE
);
504 static int mtk_nor_pp_unbuffered(struct mtk_nor
*sp
,
505 const struct spi_mem_op
*op
)
507 const u8
*buf
= op
->data
.buf
.out
;
510 ret
= mtk_nor_setup_write_buffer(sp
, false);
513 writeb(buf
[0], sp
->base
+ MTK_NOR_REG_WDATA
);
514 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
, 6 * BITS_PER_BYTE
);
517 static int mtk_nor_spi_mem_prg(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
520 int reg_offset
= MTK_NOR_REG_PRGDATA_MAX
;
526 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
528 // count dummy bytes only if we need to write data after it
529 if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
530 tx_len
+= op
->dummy
.nbytes
+ op
->data
.nbytes
;
531 else if (op
->data
.dir
== SPI_MEM_DATA_IN
)
532 rx_len
= op
->data
.nbytes
;
534 prg_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
+
537 // an invalid op may reach here if the caller calls exec_op without
538 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
539 // spi-mem won't try this op again with generic spi transfers.
540 if ((tx_len
> MTK_NOR_REG_PRGDATA_MAX
+ 1) ||
541 (rx_len
> MTK_NOR_REG_SHIFT_MAX
+ 1) ||
542 (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8))
546 for (i
= op
->cmd
.nbytes
; i
> 0; i
--, reg_offset
--) {
547 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
548 bufbyte
= (op
->cmd
.opcode
>> ((i
- 1) * BITS_PER_BYTE
)) & 0xff;
549 writeb(bufbyte
, reg
);
552 for (i
= op
->addr
.nbytes
; i
> 0; i
--, reg_offset
--) {
553 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
554 bufbyte
= (op
->addr
.val
>> ((i
- 1) * BITS_PER_BYTE
)) & 0xff;
555 writeb(bufbyte
, reg
);
558 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
559 for (i
= 0; i
< op
->dummy
.nbytes
; i
++, reg_offset
--) {
560 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
564 for (i
= 0; i
< op
->data
.nbytes
; i
++, reg_offset
--) {
565 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
566 writeb(((const u8
*)(op
->data
.buf
.out
))[i
], reg
);
570 for (; reg_offset
>= 0; reg_offset
--) {
571 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
577 writel(prg_len
* BITS_PER_BYTE
+ sp
->caps
->extra_dummy_bit
,
578 sp
->base
+ MTK_NOR_REG_PRG_CNT
);
580 writel(prg_len
* BITS_PER_BYTE
, sp
->base
+ MTK_NOR_REG_PRG_CNT
);
582 ret
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_PROGRAM
,
583 prg_len
* BITS_PER_BYTE
);
589 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
590 for (i
= op
->data
.nbytes
- 1; i
>= 0; i
--, reg_offset
++) {
591 reg
= sp
->base
+ MTK_NOR_REG_SHIFT(reg_offset
);
592 ((u8
*)(op
->data
.buf
.in
))[i
] = readb(reg
);
599 static int mtk_nor_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
601 struct mtk_nor
*sp
= spi_controller_get_devdata(mem
->spi
->controller
);
604 if ((op
->data
.nbytes
== 0) ||
605 ((op
->addr
.nbytes
!= 3) && (op
->addr
.nbytes
!= 4)))
606 return mtk_nor_spi_mem_prg(sp
, op
);
608 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
609 mtk_nor_set_addr(sp
, op
);
610 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA0
);
611 if (op
->data
.nbytes
== MTK_NOR_PP_SIZE
)
612 return mtk_nor_pp_buffered(sp
, op
);
613 return mtk_nor_pp_unbuffered(sp
, op
);
616 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) && mtk_nor_match_read(op
)) {
617 ret
= mtk_nor_setup_write_buffer(sp
, false);
620 mtk_nor_setup_bus(sp
, op
);
621 if (op
->data
.nbytes
== 1) {
622 mtk_nor_set_addr(sp
, op
);
623 return mtk_nor_read_pio(sp
, op
);
625 ret
= mtk_nor_read_dma(sp
, op
);
627 /* Handle rare bus glitch */
629 mtk_nor_setup_bus(sp
, op
);
630 return mtk_nor_read_dma(sp
, op
);
637 return mtk_nor_spi_mem_prg(sp
, op
);
640 static int mtk_nor_setup(struct spi_device
*spi
)
642 struct mtk_nor
*sp
= spi_controller_get_devdata(spi
->controller
);
644 if (spi
->max_speed_hz
&& (spi
->max_speed_hz
< sp
->spi_freq
)) {
645 dev_err(&spi
->dev
, "spi clock should be %u Hz.\n",
649 spi
->max_speed_hz
= sp
->spi_freq
;
654 static int mtk_nor_transfer_one_message(struct spi_controller
*host
,
655 struct spi_message
*m
)
657 struct mtk_nor
*sp
= spi_controller_get_devdata(host
);
658 struct spi_transfer
*t
= NULL
;
659 unsigned long trx_len
= 0;
661 int reg_offset
= MTK_NOR_REG_PRGDATA_MAX
;
667 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
669 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
670 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
672 writeb(txbuf
[i
], reg
);
679 writel(trx_len
* BITS_PER_BYTE
, sp
->base
+ MTK_NOR_REG_PRG_CNT
);
681 stat
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_PROGRAM
,
682 trx_len
* BITS_PER_BYTE
);
686 reg_offset
= trx_len
- 1;
687 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
689 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
690 reg
= sp
->base
+ MTK_NOR_REG_SHIFT(reg_offset
);
692 rxbuf
[i
] = readb(reg
);
696 m
->actual_length
= trx_len
;
699 spi_finalize_current_message(host
);
704 static void mtk_nor_disable_clk(struct mtk_nor
*sp
)
706 clk_disable_unprepare(sp
->spi_clk
);
707 clk_disable_unprepare(sp
->ctlr_clk
);
708 clk_disable_unprepare(sp
->axi_clk
);
709 clk_disable_unprepare(sp
->axi_s_clk
);
712 static int mtk_nor_enable_clk(struct mtk_nor
*sp
)
716 ret
= clk_prepare_enable(sp
->spi_clk
);
720 ret
= clk_prepare_enable(sp
->ctlr_clk
);
722 clk_disable_unprepare(sp
->spi_clk
);
726 ret
= clk_prepare_enable(sp
->axi_clk
);
728 clk_disable_unprepare(sp
->spi_clk
);
729 clk_disable_unprepare(sp
->ctlr_clk
);
733 ret
= clk_prepare_enable(sp
->axi_s_clk
);
735 clk_disable_unprepare(sp
->spi_clk
);
736 clk_disable_unprepare(sp
->ctlr_clk
);
737 clk_disable_unprepare(sp
->axi_clk
);
744 static void mtk_nor_init(struct mtk_nor
*sp
)
746 writel(0, sp
->base
+ MTK_NOR_REG_IRQ_EN
);
747 writel(MTK_NOR_IRQ_MASK
, sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
749 writel(MTK_NOR_ENABLE_SF_CMD
, sp
->base
+ MTK_NOR_REG_WP
);
750 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG2
, MTK_NOR_WR_CUSTOM_OP_EN
, 0);
751 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG3
,
752 MTK_NOR_DISABLE_WREN
| MTK_NOR_DISABLE_SR_POLL
, 0);
755 static irqreturn_t
mtk_nor_irq_handler(int irq
, void *data
)
757 struct mtk_nor
*sp
= data
;
758 u32 irq_status
, irq_enabled
;
760 irq_status
= readl(sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
761 irq_enabled
= readl(sp
->base
+ MTK_NOR_REG_IRQ_EN
);
762 // write status back to clear interrupt
763 writel(irq_status
, sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
765 if (!(irq_status
& irq_enabled
))
768 if (irq_status
& MTK_NOR_IRQ_DMA
) {
769 complete(&sp
->op_done
);
770 writel(0, sp
->base
+ MTK_NOR_REG_IRQ_EN
);
776 static size_t mtk_max_msg_size(struct spi_device
*spi
)
778 return MTK_NOR_PRG_MAX_SIZE
;
781 static const struct spi_controller_mem_ops mtk_nor_mem_ops
= {
782 .adjust_op_size
= mtk_nor_adjust_op_size
,
783 .supports_op
= mtk_nor_supports_op
,
784 .exec_op
= mtk_nor_exec_op
787 static const struct mtk_nor_caps mtk_nor_caps_mt8173
= {
789 .extra_dummy_bit
= 0,
792 static const struct mtk_nor_caps mtk_nor_caps_mt8186
= {
794 .extra_dummy_bit
= 1,
797 static const struct mtk_nor_caps mtk_nor_caps_mt8192
= {
799 .extra_dummy_bit
= 0,
802 static const struct of_device_id mtk_nor_match
[] = {
803 { .compatible
= "mediatek,mt8173-nor", .data
= &mtk_nor_caps_mt8173
},
804 { .compatible
= "mediatek,mt8186-nor", .data
= &mtk_nor_caps_mt8186
},
805 { .compatible
= "mediatek,mt8192-nor", .data
= &mtk_nor_caps_mt8192
},
808 MODULE_DEVICE_TABLE(of
, mtk_nor_match
);
810 static int mtk_nor_probe(struct platform_device
*pdev
)
812 struct spi_controller
*ctlr
;
814 struct mtk_nor_caps
*caps
;
816 struct clk
*spi_clk
, *ctlr_clk
, *axi_clk
, *axi_s_clk
;
819 base
= devm_platform_ioremap_resource(pdev
, 0);
821 return PTR_ERR(base
);
823 spi_clk
= devm_clk_get(&pdev
->dev
, "spi");
825 return PTR_ERR(spi_clk
);
827 ctlr_clk
= devm_clk_get(&pdev
->dev
, "sf");
828 if (IS_ERR(ctlr_clk
))
829 return PTR_ERR(ctlr_clk
);
831 axi_clk
= devm_clk_get_optional(&pdev
->dev
, "axi");
833 return PTR_ERR(axi_clk
);
835 axi_s_clk
= devm_clk_get_optional(&pdev
->dev
, "axi_s");
836 if (IS_ERR(axi_s_clk
))
837 return PTR_ERR(axi_s_clk
);
839 caps
= (struct mtk_nor_caps
*)of_device_get_match_data(&pdev
->dev
);
841 ret
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(caps
->dma_bits
));
843 dev_err(&pdev
->dev
, "failed to set dma mask(%u)\n", caps
->dma_bits
);
847 ctlr
= devm_spi_alloc_host(&pdev
->dev
, sizeof(*sp
));
849 dev_err(&pdev
->dev
, "failed to allocate spi controller\n");
853 ctlr
->bits_per_word_mask
= SPI_BPW_MASK(8);
854 ctlr
->dev
.of_node
= pdev
->dev
.of_node
;
855 ctlr
->max_message_size
= mtk_max_msg_size
;
856 ctlr
->mem_ops
= &mtk_nor_mem_ops
;
857 ctlr
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_TX_DUAL
| SPI_TX_QUAD
;
858 ctlr
->num_chipselect
= 1;
859 ctlr
->setup
= mtk_nor_setup
;
860 ctlr
->transfer_one_message
= mtk_nor_transfer_one_message
;
861 ctlr
->auto_runtime_pm
= true;
863 dev_set_drvdata(&pdev
->dev
, ctlr
);
865 sp
= spi_controller_get_devdata(ctlr
);
870 sp
->dev
= &pdev
->dev
;
871 sp
->spi_clk
= spi_clk
;
872 sp
->ctlr_clk
= ctlr_clk
;
873 sp
->axi_clk
= axi_clk
;
874 sp
->axi_s_clk
= axi_s_clk
;
876 sp
->high_dma
= caps
->dma_bits
> 32;
877 sp
->buffer
= dmam_alloc_coherent(&pdev
->dev
,
878 MTK_NOR_BOUNCE_BUF_SIZE
+ MTK_NOR_DMA_ALIGN
,
879 &sp
->buffer_dma
, GFP_KERNEL
);
883 if ((uintptr_t)sp
->buffer
& MTK_NOR_DMA_ALIGN_MASK
) {
884 dev_err(sp
->dev
, "misaligned allocation of internal buffer.\n");
888 ret
= mtk_nor_enable_clk(sp
);
892 sp
->spi_freq
= clk_get_rate(sp
->spi_clk
);
896 irq
= platform_get_irq_optional(pdev
, 0);
899 dev_warn(sp
->dev
, "IRQ not available.");
901 ret
= devm_request_irq(sp
->dev
, irq
, mtk_nor_irq_handler
, 0,
904 dev_warn(sp
->dev
, "failed to request IRQ.");
906 init_completion(&sp
->op_done
);
911 pm_runtime_set_autosuspend_delay(&pdev
->dev
, -1);
912 pm_runtime_use_autosuspend(&pdev
->dev
);
913 pm_runtime_set_active(&pdev
->dev
);
914 pm_runtime_enable(&pdev
->dev
);
915 pm_runtime_get_noresume(&pdev
->dev
);
917 ret
= devm_spi_register_controller(&pdev
->dev
, ctlr
);
921 pm_runtime_mark_last_busy(&pdev
->dev
);
922 pm_runtime_put_autosuspend(&pdev
->dev
);
924 dev_info(&pdev
->dev
, "spi frequency: %d Hz\n", sp
->spi_freq
);
929 pm_runtime_disable(&pdev
->dev
);
930 pm_runtime_set_suspended(&pdev
->dev
);
931 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
933 mtk_nor_disable_clk(sp
);
938 static void mtk_nor_remove(struct platform_device
*pdev
)
940 struct spi_controller
*ctlr
= dev_get_drvdata(&pdev
->dev
);
941 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
943 pm_runtime_disable(&pdev
->dev
);
944 pm_runtime_set_suspended(&pdev
->dev
);
945 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
947 mtk_nor_disable_clk(sp
);
950 static int __maybe_unused
mtk_nor_runtime_suspend(struct device
*dev
)
952 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
953 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
955 mtk_nor_disable_clk(sp
);
960 static int __maybe_unused
mtk_nor_runtime_resume(struct device
*dev
)
962 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
963 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
965 return mtk_nor_enable_clk(sp
);
968 static int __maybe_unused
mtk_nor_suspend(struct device
*dev
)
970 return pm_runtime_force_suspend(dev
);
973 static int __maybe_unused
mtk_nor_resume(struct device
*dev
)
975 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
976 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
979 ret
= pm_runtime_force_resume(dev
);
988 static const struct dev_pm_ops mtk_nor_pm_ops
= {
989 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend
,
990 mtk_nor_runtime_resume
, NULL
)
991 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend
, mtk_nor_resume
)
994 static struct platform_driver mtk_nor_driver
= {
997 .of_match_table
= mtk_nor_match
,
998 .pm
= &mtk_nor_pm_ops
,
1000 .probe
= mtk_nor_probe
,
1001 .remove
= mtk_nor_remove
,
1004 module_platform_driver(mtk_nor_driver
);
1006 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
1007 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
1008 MODULE_LICENSE("GPL v2");
1009 MODULE_ALIAS("platform:" DRIVER_NAME
);