1 // SPDX-License-Identifier: GPL-2.0
3 // Mediatek SPI NOR controller driver
5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
7 #include <linux/bits.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/spi/spi.h>
19 #include <linux/spi/spi-mem.h>
20 #include <linux/string.h>
22 #define DRIVER_NAME "mtk-spi-nor"
24 #define MTK_NOR_REG_CMD 0x00
25 #define MTK_NOR_CMD_WRITE BIT(4)
26 #define MTK_NOR_CMD_PROGRAM BIT(2)
27 #define MTK_NOR_CMD_READ BIT(0)
28 #define MTK_NOR_CMD_MASK GENMASK(5, 0)
30 #define MTK_NOR_REG_PRG_CNT 0x04
31 #define MTK_NOR_PRG_CNT_MAX 56
32 #define MTK_NOR_REG_RDATA 0x0c
34 #define MTK_NOR_REG_RADR0 0x10
35 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
36 #define MTK_NOR_REG_RADR3 0xc8
38 #define MTK_NOR_REG_WDATA 0x1c
40 #define MTK_NOR_REG_PRGDATA0 0x20
41 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
42 #define MTK_NOR_REG_PRGDATA_MAX 5
44 #define MTK_NOR_REG_SHIFT0 0x38
45 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
46 #define MTK_NOR_REG_SHIFT_MAX 9
48 #define MTK_NOR_REG_CFG1 0x60
49 #define MTK_NOR_FAST_READ BIT(0)
51 #define MTK_NOR_REG_CFG2 0x64
52 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
53 #define MTK_NOR_WR_BUF_EN BIT(0)
55 #define MTK_NOR_REG_PP_DATA 0x98
57 #define MTK_NOR_REG_IRQ_STAT 0xa8
58 #define MTK_NOR_REG_IRQ_EN 0xac
59 #define MTK_NOR_IRQ_DMA BIT(7)
60 #define MTK_NOR_IRQ_MASK GENMASK(7, 0)
62 #define MTK_NOR_REG_CFG3 0xb4
63 #define MTK_NOR_DISABLE_WREN BIT(7)
64 #define MTK_NOR_DISABLE_SR_POLL BIT(5)
66 #define MTK_NOR_REG_WP 0xc4
67 #define MTK_NOR_ENABLE_SF_CMD 0x30
69 #define MTK_NOR_REG_BUSCFG 0xcc
70 #define MTK_NOR_4B_ADDR BIT(4)
71 #define MTK_NOR_QUAD_ADDR BIT(3)
72 #define MTK_NOR_QUAD_READ BIT(2)
73 #define MTK_NOR_DUAL_ADDR BIT(1)
74 #define MTK_NOR_DUAL_READ BIT(0)
75 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
77 #define MTK_NOR_REG_DMA_CTL 0x718
78 #define MTK_NOR_DMA_START BIT(0)
80 #define MTK_NOR_REG_DMA_FADR 0x71c
81 #define MTK_NOR_REG_DMA_DADR 0x720
82 #define MTK_NOR_REG_DMA_END_DADR 0x724
83 #define MTK_NOR_REG_DMA_DADR_HB 0x738
84 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
86 #define MTK_NOR_PRG_MAX_SIZE 6
87 // Reading DMA src/dst addresses have to be 16-byte aligned
88 #define MTK_NOR_DMA_ALIGN 16
89 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
90 // and we allocate a bounce buffer if destination address isn't aligned.
91 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
93 // Buffered page program can do one 128-byte transfer
94 #define MTK_NOR_PP_SIZE 128
96 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
99 struct spi_controller
*ctlr
;
103 dma_addr_t buffer_dma
;
105 struct clk
*ctlr_clk
;
107 unsigned int spi_freq
;
111 struct completion op_done
;
114 static inline void mtk_nor_rmw(struct mtk_nor
*sp
, u32 reg
, u32 set
, u32 clr
)
116 u32 val
= readl(sp
->base
+ reg
);
120 writel(val
, sp
->base
+ reg
);
123 static inline int mtk_nor_cmd_exec(struct mtk_nor
*sp
, u32 cmd
, ulong clk
)
125 ulong delay
= CLK_TO_US(sp
, clk
);
129 writel(cmd
, sp
->base
+ MTK_NOR_REG_CMD
);
130 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CMD
, reg
, !(reg
& cmd
),
131 delay
/ 3, (delay
+ 1) * 200);
133 dev_err(sp
->dev
, "command %u timeout.\n", cmd
);
137 static void mtk_nor_set_addr(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
139 u32 addr
= op
->addr
.val
;
142 for (i
= 0; i
< 3; i
++) {
143 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR(i
));
146 if (op
->addr
.nbytes
== 4) {
147 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR3
);
148 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, MTK_NOR_4B_ADDR
, 0);
150 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, 0, MTK_NOR_4B_ADDR
);
154 static bool need_bounce(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
156 return ((uintptr_t)op
->data
.buf
.in
& MTK_NOR_DMA_ALIGN_MASK
);
159 static bool mtk_nor_match_read(const struct spi_mem_op
*op
)
163 if (op
->dummy
.buswidth
)
164 dummy
= op
->dummy
.nbytes
* BITS_PER_BYTE
/ op
->dummy
.buswidth
;
166 if ((op
->data
.buswidth
== 2) || (op
->data
.buswidth
== 4)) {
167 if (op
->addr
.buswidth
== 1)
169 else if (op
->addr
.buswidth
== 2)
171 else if (op
->addr
.buswidth
== 4)
173 } else if ((op
->addr
.buswidth
== 1) && (op
->data
.buswidth
== 1)) {
174 if (op
->cmd
.opcode
== 0x03)
176 else if (op
->cmd
.opcode
== 0x0b)
182 static bool mtk_nor_match_prg(const struct spi_mem_op
*op
)
184 int tx_len
, rx_len
, prg_len
, prg_left
;
186 // prg mode is spi-only.
187 if ((op
->cmd
.buswidth
> 1) || (op
->addr
.buswidth
> 1) ||
188 (op
->dummy
.buswidth
> 1) || (op
->data
.buswidth
> 1))
191 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
193 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
194 // count dummy bytes only if we need to write data after it
195 tx_len
+= op
->dummy
.nbytes
;
197 // leave at least one byte for data
198 if (tx_len
> MTK_NOR_REG_PRGDATA_MAX
)
201 // if there's no addr, meaning adjust_op_size is impossible,
202 // check data length as well.
203 if ((!op
->addr
.nbytes
) &&
204 (tx_len
+ op
->data
.nbytes
> MTK_NOR_REG_PRGDATA_MAX
+ 1))
206 } else if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
207 if (tx_len
> MTK_NOR_REG_PRGDATA_MAX
+ 1)
210 rx_len
= op
->data
.nbytes
;
211 prg_left
= MTK_NOR_PRG_CNT_MAX
/ 8 - tx_len
- op
->dummy
.nbytes
;
212 if (prg_left
> MTK_NOR_REG_SHIFT_MAX
+ 1)
213 prg_left
= MTK_NOR_REG_SHIFT_MAX
+ 1;
214 if (rx_len
> prg_left
) {
215 if (!op
->addr
.nbytes
)
220 prg_len
= tx_len
+ op
->dummy
.nbytes
+ rx_len
;
221 if (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8)
224 prg_len
= tx_len
+ op
->dummy
.nbytes
;
225 if (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8)
231 static void mtk_nor_adj_prg_size(struct spi_mem_op
*op
)
233 int tx_len
, tx_left
, prg_left
;
235 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
236 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
237 tx_len
+= op
->dummy
.nbytes
;
238 tx_left
= MTK_NOR_REG_PRGDATA_MAX
+ 1 - tx_len
;
239 if (op
->data
.nbytes
> tx_left
)
240 op
->data
.nbytes
= tx_left
;
241 } else if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
242 prg_left
= MTK_NOR_PRG_CNT_MAX
/ 8 - tx_len
- op
->dummy
.nbytes
;
243 if (prg_left
> MTK_NOR_REG_SHIFT_MAX
+ 1)
244 prg_left
= MTK_NOR_REG_SHIFT_MAX
+ 1;
245 if (op
->data
.nbytes
> prg_left
)
246 op
->data
.nbytes
= prg_left
;
250 static int mtk_nor_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
252 struct mtk_nor
*sp
= spi_controller_get_devdata(mem
->spi
->master
);
254 if (!op
->data
.nbytes
)
257 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
258 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) &&
259 mtk_nor_match_read(op
)) {
260 // limit size to prevent timeout calculation overflow
261 if (op
->data
.nbytes
> 0x400000)
262 op
->data
.nbytes
= 0x400000;
264 if ((op
->addr
.val
& MTK_NOR_DMA_ALIGN_MASK
) ||
265 (op
->data
.nbytes
< MTK_NOR_DMA_ALIGN
))
267 else if (!need_bounce(sp
, op
))
268 op
->data
.nbytes
&= ~MTK_NOR_DMA_ALIGN_MASK
;
269 else if (op
->data
.nbytes
> MTK_NOR_BOUNCE_BUF_SIZE
)
270 op
->data
.nbytes
= MTK_NOR_BOUNCE_BUF_SIZE
;
272 } else if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
273 if (op
->data
.nbytes
>= MTK_NOR_PP_SIZE
)
274 op
->data
.nbytes
= MTK_NOR_PP_SIZE
;
281 mtk_nor_adj_prg_size(op
);
285 static bool mtk_nor_supports_op(struct spi_mem
*mem
,
286 const struct spi_mem_op
*op
)
288 if (!spi_mem_default_supports_op(mem
, op
))
291 if (op
->cmd
.buswidth
!= 1)
294 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
295 switch(op
->data
.dir
) {
296 case SPI_MEM_DATA_IN
:
297 if (mtk_nor_match_read(op
))
300 case SPI_MEM_DATA_OUT
:
301 if ((op
->addr
.buswidth
== 1) &&
302 (op
->dummy
.nbytes
== 0) &&
303 (op
->data
.buswidth
== 1))
311 return mtk_nor_match_prg(op
);
314 static void mtk_nor_setup_bus(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
318 if (op
->addr
.nbytes
== 4)
319 reg
|= MTK_NOR_4B_ADDR
;
321 if (op
->data
.buswidth
== 4) {
322 reg
|= MTK_NOR_QUAD_READ
;
323 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(4));
324 if (op
->addr
.buswidth
== 4)
325 reg
|= MTK_NOR_QUAD_ADDR
;
326 } else if (op
->data
.buswidth
== 2) {
327 reg
|= MTK_NOR_DUAL_READ
;
328 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(3));
329 if (op
->addr
.buswidth
== 2)
330 reg
|= MTK_NOR_DUAL_ADDR
;
332 if (op
->cmd
.opcode
== 0x0b)
333 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, MTK_NOR_FAST_READ
, 0);
335 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, 0, MTK_NOR_FAST_READ
);
337 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, reg
, MTK_NOR_BUS_MODE_MASK
);
340 static int mtk_nor_dma_exec(struct mtk_nor
*sp
, u32 from
, unsigned int length
,
347 writel(from
, sp
->base
+ MTK_NOR_REG_DMA_FADR
);
348 writel(dma_addr
, sp
->base
+ MTK_NOR_REG_DMA_DADR
);
349 writel(dma_addr
+ length
, sp
->base
+ MTK_NOR_REG_DMA_END_DADR
);
352 writel(upper_32_bits(dma_addr
),
353 sp
->base
+ MTK_NOR_REG_DMA_DADR_HB
);
354 writel(upper_32_bits(dma_addr
+ length
),
355 sp
->base
+ MTK_NOR_REG_DMA_END_DADR_HB
);
359 reinit_completion(&sp
->op_done
);
360 mtk_nor_rmw(sp
, MTK_NOR_REG_IRQ_EN
, MTK_NOR_IRQ_DMA
, 0);
363 mtk_nor_rmw(sp
, MTK_NOR_REG_DMA_CTL
, MTK_NOR_DMA_START
, 0);
365 delay
= CLK_TO_US(sp
, (length
+ 5) * BITS_PER_BYTE
);
368 if (!wait_for_completion_timeout(&sp
->op_done
,
372 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_DMA_CTL
, reg
,
373 !(reg
& MTK_NOR_DMA_START
), delay
/ 3,
378 dev_err(sp
->dev
, "dma read timeout.\n");
383 static int mtk_nor_read_bounce(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
388 if (op
->data
.nbytes
& MTK_NOR_DMA_ALIGN_MASK
)
389 rdlen
= (op
->data
.nbytes
+ MTK_NOR_DMA_ALIGN
) & ~MTK_NOR_DMA_ALIGN_MASK
;
391 rdlen
= op
->data
.nbytes
;
393 ret
= mtk_nor_dma_exec(sp
, op
->addr
.val
, rdlen
, sp
->buffer_dma
);
396 memcpy(op
->data
.buf
.in
, sp
->buffer
, op
->data
.nbytes
);
401 static int mtk_nor_read_dma(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
406 if (need_bounce(sp
, op
))
407 return mtk_nor_read_bounce(sp
, op
);
409 dma_addr
= dma_map_single(sp
->dev
, op
->data
.buf
.in
,
410 op
->data
.nbytes
, DMA_FROM_DEVICE
);
412 if (dma_mapping_error(sp
->dev
, dma_addr
))
415 ret
= mtk_nor_dma_exec(sp
, op
->addr
.val
, op
->data
.nbytes
, dma_addr
);
417 dma_unmap_single(sp
->dev
, dma_addr
, op
->data
.nbytes
, DMA_FROM_DEVICE
);
422 static int mtk_nor_read_pio(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
424 u8
*buf
= op
->data
.buf
.in
;
427 ret
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_READ
, 6 * BITS_PER_BYTE
);
429 buf
[0] = readb(sp
->base
+ MTK_NOR_REG_RDATA
);
433 static int mtk_nor_write_buffer_enable(struct mtk_nor
*sp
)
441 val
= readl(sp
->base
+ MTK_NOR_REG_CFG2
);
442 writel(val
| MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
443 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
444 val
& MTK_NOR_WR_BUF_EN
, 0, 10000);
450 static int mtk_nor_write_buffer_disable(struct mtk_nor
*sp
)
457 val
= readl(sp
->base
+ MTK_NOR_REG_CFG2
);
458 writel(val
& ~MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
459 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
460 !(val
& MTK_NOR_WR_BUF_EN
), 0, 10000);
466 static int mtk_nor_pp_buffered(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
468 const u8
*buf
= op
->data
.buf
.out
;
472 ret
= mtk_nor_write_buffer_enable(sp
);
476 for (i
= 0; i
< op
->data
.nbytes
; i
+= 4) {
477 val
= buf
[i
+ 3] << 24 | buf
[i
+ 2] << 16 | buf
[i
+ 1] << 8 |
479 writel(val
, sp
->base
+ MTK_NOR_REG_PP_DATA
);
481 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
,
482 (op
->data
.nbytes
+ 5) * BITS_PER_BYTE
);
485 static int mtk_nor_pp_unbuffered(struct mtk_nor
*sp
,
486 const struct spi_mem_op
*op
)
488 const u8
*buf
= op
->data
.buf
.out
;
491 ret
= mtk_nor_write_buffer_disable(sp
);
494 writeb(buf
[0], sp
->base
+ MTK_NOR_REG_WDATA
);
495 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
, 6 * BITS_PER_BYTE
);
498 static int mtk_nor_spi_mem_prg(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
501 int reg_offset
= MTK_NOR_REG_PRGDATA_MAX
;
507 tx_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
509 // count dummy bytes only if we need to write data after it
510 if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
511 tx_len
+= op
->dummy
.nbytes
+ op
->data
.nbytes
;
512 else if (op
->data
.dir
== SPI_MEM_DATA_IN
)
513 rx_len
= op
->data
.nbytes
;
515 prg_len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
+
518 // an invalid op may reach here if the caller calls exec_op without
519 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
520 // spi-mem won't try this op again with generic spi transfers.
521 if ((tx_len
> MTK_NOR_REG_PRGDATA_MAX
+ 1) ||
522 (rx_len
> MTK_NOR_REG_SHIFT_MAX
+ 1) ||
523 (prg_len
> MTK_NOR_PRG_CNT_MAX
/ 8))
527 for (i
= op
->cmd
.nbytes
; i
> 0; i
--, reg_offset
--) {
528 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
529 bufbyte
= (op
->cmd
.opcode
>> ((i
- 1) * BITS_PER_BYTE
)) & 0xff;
530 writeb(bufbyte
, reg
);
533 for (i
= op
->addr
.nbytes
; i
> 0; i
--, reg_offset
--) {
534 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
535 bufbyte
= (op
->addr
.val
>> ((i
- 1) * BITS_PER_BYTE
)) & 0xff;
536 writeb(bufbyte
, reg
);
539 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
540 for (i
= 0; i
< op
->dummy
.nbytes
; i
++, reg_offset
--) {
541 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
545 for (i
= 0; i
< op
->data
.nbytes
; i
++, reg_offset
--) {
546 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
547 writeb(((const u8
*)(op
->data
.buf
.out
))[i
], reg
);
551 for (; reg_offset
>= 0; reg_offset
--) {
552 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
557 writel(prg_len
* BITS_PER_BYTE
, sp
->base
+ MTK_NOR_REG_PRG_CNT
);
558 ret
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_PROGRAM
,
559 prg_len
* BITS_PER_BYTE
);
565 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
566 for (i
= op
->data
.nbytes
- 1; i
>= 0; i
--, reg_offset
++) {
567 reg
= sp
->base
+ MTK_NOR_REG_SHIFT(reg_offset
);
568 ((u8
*)(op
->data
.buf
.in
))[i
] = readb(reg
);
575 static int mtk_nor_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
577 struct mtk_nor
*sp
= spi_controller_get_devdata(mem
->spi
->master
);
580 if ((op
->data
.nbytes
== 0) ||
581 ((op
->addr
.nbytes
!= 3) && (op
->addr
.nbytes
!= 4)))
582 return mtk_nor_spi_mem_prg(sp
, op
);
584 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
585 mtk_nor_set_addr(sp
, op
);
586 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA0
);
587 if (op
->data
.nbytes
== MTK_NOR_PP_SIZE
)
588 return mtk_nor_pp_buffered(sp
, op
);
589 return mtk_nor_pp_unbuffered(sp
, op
);
592 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) && mtk_nor_match_read(op
)) {
593 ret
= mtk_nor_write_buffer_disable(sp
);
596 mtk_nor_setup_bus(sp
, op
);
597 if (op
->data
.nbytes
== 1) {
598 mtk_nor_set_addr(sp
, op
);
599 return mtk_nor_read_pio(sp
, op
);
601 return mtk_nor_read_dma(sp
, op
);
605 return mtk_nor_spi_mem_prg(sp
, op
);
608 static int mtk_nor_setup(struct spi_device
*spi
)
610 struct mtk_nor
*sp
= spi_controller_get_devdata(spi
->master
);
612 if (spi
->max_speed_hz
&& (spi
->max_speed_hz
< sp
->spi_freq
)) {
613 dev_err(&spi
->dev
, "spi clock should be %u Hz.\n",
617 spi
->max_speed_hz
= sp
->spi_freq
;
622 static int mtk_nor_transfer_one_message(struct spi_controller
*master
,
623 struct spi_message
*m
)
625 struct mtk_nor
*sp
= spi_controller_get_devdata(master
);
626 struct spi_transfer
*t
= NULL
;
627 unsigned long trx_len
= 0;
629 int reg_offset
= MTK_NOR_REG_PRGDATA_MAX
;
635 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
637 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
638 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
640 writeb(txbuf
[i
], reg
);
647 writel(trx_len
* BITS_PER_BYTE
, sp
->base
+ MTK_NOR_REG_PRG_CNT
);
649 stat
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_PROGRAM
,
650 trx_len
* BITS_PER_BYTE
);
654 reg_offset
= trx_len
- 1;
655 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
657 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
658 reg
= sp
->base
+ MTK_NOR_REG_SHIFT(reg_offset
);
660 rxbuf
[i
] = readb(reg
);
664 m
->actual_length
= trx_len
;
667 spi_finalize_current_message(master
);
672 static void mtk_nor_disable_clk(struct mtk_nor
*sp
)
674 clk_disable_unprepare(sp
->spi_clk
);
675 clk_disable_unprepare(sp
->ctlr_clk
);
676 clk_disable_unprepare(sp
->axi_clk
);
679 static int mtk_nor_enable_clk(struct mtk_nor
*sp
)
683 ret
= clk_prepare_enable(sp
->spi_clk
);
687 ret
= clk_prepare_enable(sp
->ctlr_clk
);
689 clk_disable_unprepare(sp
->spi_clk
);
693 ret
= clk_prepare_enable(sp
->axi_clk
);
695 clk_disable_unprepare(sp
->spi_clk
);
696 clk_disable_unprepare(sp
->ctlr_clk
);
703 static void mtk_nor_init(struct mtk_nor
*sp
)
705 writel(0, sp
->base
+ MTK_NOR_REG_IRQ_EN
);
706 writel(MTK_NOR_IRQ_MASK
, sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
708 writel(MTK_NOR_ENABLE_SF_CMD
, sp
->base
+ MTK_NOR_REG_WP
);
709 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG2
, MTK_NOR_WR_CUSTOM_OP_EN
, 0);
710 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG3
,
711 MTK_NOR_DISABLE_WREN
| MTK_NOR_DISABLE_SR_POLL
, 0);
714 static irqreturn_t
mtk_nor_irq_handler(int irq
, void *data
)
716 struct mtk_nor
*sp
= data
;
717 u32 irq_status
, irq_enabled
;
719 irq_status
= readl(sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
720 irq_enabled
= readl(sp
->base
+ MTK_NOR_REG_IRQ_EN
);
721 // write status back to clear interrupt
722 writel(irq_status
, sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
724 if (!(irq_status
& irq_enabled
))
727 if (irq_status
& MTK_NOR_IRQ_DMA
) {
728 complete(&sp
->op_done
);
729 writel(0, sp
->base
+ MTK_NOR_REG_IRQ_EN
);
735 static size_t mtk_max_msg_size(struct spi_device
*spi
)
737 return MTK_NOR_PRG_MAX_SIZE
;
740 static const struct spi_controller_mem_ops mtk_nor_mem_ops
= {
741 .adjust_op_size
= mtk_nor_adjust_op_size
,
742 .supports_op
= mtk_nor_supports_op
,
743 .exec_op
= mtk_nor_exec_op
746 static const struct of_device_id mtk_nor_match
[] = {
747 { .compatible
= "mediatek,mt8192-nor", .data
= (void *)36 },
748 { .compatible
= "mediatek,mt8173-nor", .data
= (void *)32 },
751 MODULE_DEVICE_TABLE(of
, mtk_nor_match
);
753 static int mtk_nor_probe(struct platform_device
*pdev
)
755 struct spi_controller
*ctlr
;
758 struct clk
*spi_clk
, *ctlr_clk
, *axi_clk
;
760 unsigned long dma_bits
;
762 base
= devm_platform_ioremap_resource(pdev
, 0);
764 return PTR_ERR(base
);
766 spi_clk
= devm_clk_get(&pdev
->dev
, "spi");
768 return PTR_ERR(spi_clk
);
770 ctlr_clk
= devm_clk_get(&pdev
->dev
, "sf");
771 if (IS_ERR(ctlr_clk
))
772 return PTR_ERR(ctlr_clk
);
774 axi_clk
= devm_clk_get_optional(&pdev
->dev
, "axi");
776 return PTR_ERR(axi_clk
);
778 dma_bits
= (unsigned long)of_device_get_match_data(&pdev
->dev
);
779 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(dma_bits
))) {
780 dev_err(&pdev
->dev
, "failed to set dma mask(%lu)\n", dma_bits
);
784 ctlr
= devm_spi_alloc_master(&pdev
->dev
, sizeof(*sp
));
786 dev_err(&pdev
->dev
, "failed to allocate spi controller\n");
790 ctlr
->bits_per_word_mask
= SPI_BPW_MASK(8);
791 ctlr
->dev
.of_node
= pdev
->dev
.of_node
;
792 ctlr
->max_message_size
= mtk_max_msg_size
;
793 ctlr
->mem_ops
= &mtk_nor_mem_ops
;
794 ctlr
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_TX_DUAL
| SPI_TX_QUAD
;
795 ctlr
->num_chipselect
= 1;
796 ctlr
->setup
= mtk_nor_setup
;
797 ctlr
->transfer_one_message
= mtk_nor_transfer_one_message
;
798 ctlr
->auto_runtime_pm
= true;
800 dev_set_drvdata(&pdev
->dev
, ctlr
);
802 sp
= spi_controller_get_devdata(ctlr
);
807 sp
->dev
= &pdev
->dev
;
808 sp
->spi_clk
= spi_clk
;
809 sp
->ctlr_clk
= ctlr_clk
;
810 sp
->axi_clk
= axi_clk
;
811 sp
->high_dma
= (dma_bits
> 32);
812 sp
->buffer
= dmam_alloc_coherent(&pdev
->dev
,
813 MTK_NOR_BOUNCE_BUF_SIZE
+ MTK_NOR_DMA_ALIGN
,
814 &sp
->buffer_dma
, GFP_KERNEL
);
818 if ((uintptr_t)sp
->buffer
& MTK_NOR_DMA_ALIGN_MASK
) {
819 dev_err(sp
->dev
, "misaligned allocation of internal buffer.\n");
823 ret
= mtk_nor_enable_clk(sp
);
827 sp
->spi_freq
= clk_get_rate(sp
->spi_clk
);
831 irq
= platform_get_irq_optional(pdev
, 0);
834 dev_warn(sp
->dev
, "IRQ not available.");
836 ret
= devm_request_irq(sp
->dev
, irq
, mtk_nor_irq_handler
, 0,
839 dev_warn(sp
->dev
, "failed to request IRQ.");
841 init_completion(&sp
->op_done
);
846 pm_runtime_set_autosuspend_delay(&pdev
->dev
, -1);
847 pm_runtime_use_autosuspend(&pdev
->dev
);
848 pm_runtime_set_active(&pdev
->dev
);
849 pm_runtime_enable(&pdev
->dev
);
850 pm_runtime_get_noresume(&pdev
->dev
);
852 ret
= devm_spi_register_controller(&pdev
->dev
, ctlr
);
856 pm_runtime_mark_last_busy(&pdev
->dev
);
857 pm_runtime_put_autosuspend(&pdev
->dev
);
859 dev_info(&pdev
->dev
, "spi frequency: %d Hz\n", sp
->spi_freq
);
864 pm_runtime_disable(&pdev
->dev
);
865 pm_runtime_set_suspended(&pdev
->dev
);
866 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
868 mtk_nor_disable_clk(sp
);
873 static int mtk_nor_remove(struct platform_device
*pdev
)
875 struct spi_controller
*ctlr
= dev_get_drvdata(&pdev
->dev
);
876 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
878 pm_runtime_disable(&pdev
->dev
);
879 pm_runtime_set_suspended(&pdev
->dev
);
880 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
882 mtk_nor_disable_clk(sp
);
887 static int __maybe_unused
mtk_nor_runtime_suspend(struct device
*dev
)
889 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
890 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
892 mtk_nor_disable_clk(sp
);
897 static int __maybe_unused
mtk_nor_runtime_resume(struct device
*dev
)
899 struct spi_controller
*ctlr
= dev_get_drvdata(dev
);
900 struct mtk_nor
*sp
= spi_controller_get_devdata(ctlr
);
902 return mtk_nor_enable_clk(sp
);
905 static int __maybe_unused
mtk_nor_suspend(struct device
*dev
)
907 return pm_runtime_force_suspend(dev
);
910 static int __maybe_unused
mtk_nor_resume(struct device
*dev
)
912 return pm_runtime_force_resume(dev
);
915 static const struct dev_pm_ops mtk_nor_pm_ops
= {
916 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend
,
917 mtk_nor_runtime_resume
, NULL
)
918 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend
, mtk_nor_resume
)
921 static struct platform_driver mtk_nor_driver
= {
924 .of_match_table
= mtk_nor_match
,
925 .pm
= &mtk_nor_pm_ops
,
927 .probe
= mtk_nor_probe
,
928 .remove
= mtk_nor_remove
,
931 module_platform_driver(mtk_nor_driver
);
933 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
934 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
935 MODULE_LICENSE("GPL v2");
936 MODULE_ALIAS("platform:" DRIVER_NAME
);