1 // SPDX-License-Identifier: GPL-2.0
3 // Mediatek SPI NOR controller driver
5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
7 #include <linux/bits.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/spi/spi.h>
18 #include <linux/spi/spi-mem.h>
19 #include <linux/string.h>
21 #define DRIVER_NAME "mtk-spi-nor"
23 #define MTK_NOR_REG_CMD 0x00
24 #define MTK_NOR_CMD_WRITE BIT(4)
25 #define MTK_NOR_CMD_PROGRAM BIT(2)
26 #define MTK_NOR_CMD_READ BIT(0)
27 #define MTK_NOR_CMD_MASK GENMASK(5, 0)
29 #define MTK_NOR_REG_PRG_CNT 0x04
30 #define MTK_NOR_REG_RDATA 0x0c
32 #define MTK_NOR_REG_RADR0 0x10
33 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
34 #define MTK_NOR_REG_RADR3 0xc8
36 #define MTK_NOR_REG_WDATA 0x1c
38 #define MTK_NOR_REG_PRGDATA0 0x20
39 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
40 #define MTK_NOR_REG_PRGDATA_MAX 5
42 #define MTK_NOR_REG_SHIFT0 0x38
43 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
44 #define MTK_NOR_REG_SHIFT_MAX 9
46 #define MTK_NOR_REG_CFG1 0x60
47 #define MTK_NOR_FAST_READ BIT(0)
49 #define MTK_NOR_REG_CFG2 0x64
50 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
51 #define MTK_NOR_WR_BUF_EN BIT(0)
53 #define MTK_NOR_REG_PP_DATA 0x98
55 #define MTK_NOR_REG_IRQ_STAT 0xa8
56 #define MTK_NOR_REG_IRQ_EN 0xac
57 #define MTK_NOR_IRQ_DMA BIT(7)
58 #define MTK_NOR_IRQ_MASK GENMASK(7, 0)
60 #define MTK_NOR_REG_CFG3 0xb4
61 #define MTK_NOR_DISABLE_WREN BIT(7)
62 #define MTK_NOR_DISABLE_SR_POLL BIT(5)
64 #define MTK_NOR_REG_WP 0xc4
65 #define MTK_NOR_ENABLE_SF_CMD 0x30
67 #define MTK_NOR_REG_BUSCFG 0xcc
68 #define MTK_NOR_4B_ADDR BIT(4)
69 #define MTK_NOR_QUAD_ADDR BIT(3)
70 #define MTK_NOR_QUAD_READ BIT(2)
71 #define MTK_NOR_DUAL_ADDR BIT(1)
72 #define MTK_NOR_DUAL_READ BIT(0)
73 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
75 #define MTK_NOR_REG_DMA_CTL 0x718
76 #define MTK_NOR_DMA_START BIT(0)
78 #define MTK_NOR_REG_DMA_FADR 0x71c
79 #define MTK_NOR_REG_DMA_DADR 0x720
80 #define MTK_NOR_REG_DMA_END_DADR 0x724
82 #define MTK_NOR_PRG_MAX_SIZE 6
83 // Reading DMA src/dst addresses have to be 16-byte aligned
84 #define MTK_NOR_DMA_ALIGN 16
85 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
86 // and we allocate a bounce buffer if destination address isn't aligned.
87 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
89 // Buffered page program can do one 128-byte transfer
90 #define MTK_NOR_PP_SIZE 128
92 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
95 struct spi_controller
*ctlr
;
100 struct clk
*ctlr_clk
;
101 unsigned int spi_freq
;
104 struct completion op_done
;
107 static inline void mtk_nor_rmw(struct mtk_nor
*sp
, u32 reg
, u32 set
, u32 clr
)
109 u32 val
= readl(sp
->base
+ reg
);
113 writel(val
, sp
->base
+ reg
);
116 static inline int mtk_nor_cmd_exec(struct mtk_nor
*sp
, u32 cmd
, ulong clk
)
118 ulong delay
= CLK_TO_US(sp
, clk
);
122 writel(cmd
, sp
->base
+ MTK_NOR_REG_CMD
);
123 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CMD
, reg
, !(reg
& cmd
),
124 delay
/ 3, (delay
+ 1) * 200);
126 dev_err(sp
->dev
, "command %u timeout.\n", cmd
);
130 static void mtk_nor_set_addr(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
132 u32 addr
= op
->addr
.val
;
135 for (i
= 0; i
< 3; i
++) {
136 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR(i
));
139 if (op
->addr
.nbytes
== 4) {
140 writeb(addr
& 0xff, sp
->base
+ MTK_NOR_REG_RADR3
);
141 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, MTK_NOR_4B_ADDR
, 0);
143 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, 0, MTK_NOR_4B_ADDR
);
147 static bool mtk_nor_match_read(const struct spi_mem_op
*op
)
151 if (op
->dummy
.buswidth
)
152 dummy
= op
->dummy
.nbytes
* BITS_PER_BYTE
/ op
->dummy
.buswidth
;
154 if ((op
->data
.buswidth
== 2) || (op
->data
.buswidth
== 4)) {
155 if (op
->addr
.buswidth
== 1)
157 else if (op
->addr
.buswidth
== 2)
159 else if (op
->addr
.buswidth
== 4)
161 } else if ((op
->addr
.buswidth
== 1) && (op
->data
.buswidth
== 1)) {
162 if (op
->cmd
.opcode
== 0x03)
164 else if (op
->cmd
.opcode
== 0x0b)
170 static int mtk_nor_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
174 if (!op
->data
.nbytes
)
177 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
178 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) &&
179 mtk_nor_match_read(op
)) {
180 // limit size to prevent timeout calculation overflow
181 if (op
->data
.nbytes
> 0x400000)
182 op
->data
.nbytes
= 0x400000;
184 if ((op
->addr
.val
& MTK_NOR_DMA_ALIGN_MASK
) ||
185 (op
->data
.nbytes
< MTK_NOR_DMA_ALIGN
))
187 else if (!((ulong
)(op
->data
.buf
.in
) &
188 MTK_NOR_DMA_ALIGN_MASK
))
189 op
->data
.nbytes
&= ~MTK_NOR_DMA_ALIGN_MASK
;
190 else if (op
->data
.nbytes
> MTK_NOR_BOUNCE_BUF_SIZE
)
191 op
->data
.nbytes
= MTK_NOR_BOUNCE_BUF_SIZE
;
193 } else if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
194 if (op
->data
.nbytes
>= MTK_NOR_PP_SIZE
)
195 op
->data
.nbytes
= MTK_NOR_PP_SIZE
;
202 len
= MTK_NOR_PRG_MAX_SIZE
- op
->cmd
.nbytes
- op
->addr
.nbytes
-
204 if (op
->data
.nbytes
> len
)
205 op
->data
.nbytes
= len
;
210 static bool mtk_nor_supports_op(struct spi_mem
*mem
,
211 const struct spi_mem_op
*op
)
215 if (op
->cmd
.buswidth
!= 1)
218 /* DTR ops not supported. */
219 if (op
->cmd
.dtr
|| op
->addr
.dtr
|| op
->dummy
.dtr
|| op
->data
.dtr
)
221 if (op
->cmd
.nbytes
!= 1)
224 if ((op
->addr
.nbytes
== 3) || (op
->addr
.nbytes
== 4)) {
225 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) && mtk_nor_match_read(op
))
227 else if (op
->data
.dir
== SPI_MEM_DATA_OUT
)
228 return (op
->addr
.buswidth
== 1) &&
229 (op
->dummy
.buswidth
== 0) &&
230 (op
->data
.buswidth
== 1);
232 len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
;
233 if ((len
> MTK_NOR_PRG_MAX_SIZE
) ||
234 ((op
->data
.nbytes
) && (len
== MTK_NOR_PRG_MAX_SIZE
)))
239 static void mtk_nor_setup_bus(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
243 if (op
->addr
.nbytes
== 4)
244 reg
|= MTK_NOR_4B_ADDR
;
246 if (op
->data
.buswidth
== 4) {
247 reg
|= MTK_NOR_QUAD_READ
;
248 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(4));
249 if (op
->addr
.buswidth
== 4)
250 reg
|= MTK_NOR_QUAD_ADDR
;
251 } else if (op
->data
.buswidth
== 2) {
252 reg
|= MTK_NOR_DUAL_READ
;
253 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA(3));
254 if (op
->addr
.buswidth
== 2)
255 reg
|= MTK_NOR_DUAL_ADDR
;
257 if (op
->cmd
.opcode
== 0x0b)
258 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, MTK_NOR_FAST_READ
, 0);
260 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG1
, 0, MTK_NOR_FAST_READ
);
262 mtk_nor_rmw(sp
, MTK_NOR_REG_BUSCFG
, reg
, MTK_NOR_BUS_MODE_MASK
);
265 static int mtk_nor_read_dma(struct mtk_nor
*sp
, u32 from
, unsigned int length
,
273 dma_addr
= dma_map_single(sp
->dev
, buffer
, length
, DMA_FROM_DEVICE
);
274 if (dma_mapping_error(sp
->dev
, dma_addr
)) {
275 dev_err(sp
->dev
, "failed to map dma buffer.\n");
279 writel(from
, sp
->base
+ MTK_NOR_REG_DMA_FADR
);
280 writel(dma_addr
, sp
->base
+ MTK_NOR_REG_DMA_DADR
);
281 writel(dma_addr
+ length
, sp
->base
+ MTK_NOR_REG_DMA_END_DADR
);
284 reinit_completion(&sp
->op_done
);
285 mtk_nor_rmw(sp
, MTK_NOR_REG_IRQ_EN
, MTK_NOR_IRQ_DMA
, 0);
288 mtk_nor_rmw(sp
, MTK_NOR_REG_DMA_CTL
, MTK_NOR_DMA_START
, 0);
290 delay
= CLK_TO_US(sp
, (length
+ 5) * BITS_PER_BYTE
);
293 if (!wait_for_completion_timeout(&sp
->op_done
,
297 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_DMA_CTL
, reg
,
298 !(reg
& MTK_NOR_DMA_START
), delay
/ 3,
302 dma_unmap_single(sp
->dev
, dma_addr
, length
, DMA_FROM_DEVICE
);
304 dev_err(sp
->dev
, "dma read timeout.\n");
309 static int mtk_nor_read_bounce(struct mtk_nor
*sp
, u32 from
,
310 unsigned int length
, u8
*buffer
)
315 if (length
& MTK_NOR_DMA_ALIGN_MASK
)
316 rdlen
= (length
+ MTK_NOR_DMA_ALIGN
) & ~MTK_NOR_DMA_ALIGN_MASK
;
320 ret
= mtk_nor_read_dma(sp
, from
, rdlen
, sp
->buffer
);
324 memcpy(buffer
, sp
->buffer
, length
);
328 static int mtk_nor_read_pio(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
330 u8
*buf
= op
->data
.buf
.in
;
333 ret
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_READ
, 6 * BITS_PER_BYTE
);
335 buf
[0] = readb(sp
->base
+ MTK_NOR_REG_RDATA
);
339 static int mtk_nor_write_buffer_enable(struct mtk_nor
*sp
)
347 val
= readl(sp
->base
+ MTK_NOR_REG_CFG2
);
348 writel(val
| MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
349 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
350 val
& MTK_NOR_WR_BUF_EN
, 0, 10000);
356 static int mtk_nor_write_buffer_disable(struct mtk_nor
*sp
)
363 val
= readl(sp
->base
+ MTK_NOR_REG_CFG2
);
364 writel(val
& ~MTK_NOR_WR_BUF_EN
, sp
->base
+ MTK_NOR_REG_CFG2
);
365 ret
= readl_poll_timeout(sp
->base
+ MTK_NOR_REG_CFG2
, val
,
366 !(val
& MTK_NOR_WR_BUF_EN
), 0, 10000);
372 static int mtk_nor_pp_buffered(struct mtk_nor
*sp
, const struct spi_mem_op
*op
)
374 const u8
*buf
= op
->data
.buf
.out
;
378 ret
= mtk_nor_write_buffer_enable(sp
);
382 for (i
= 0; i
< op
->data
.nbytes
; i
+= 4) {
383 val
= buf
[i
+ 3] << 24 | buf
[i
+ 2] << 16 | buf
[i
+ 1] << 8 |
385 writel(val
, sp
->base
+ MTK_NOR_REG_PP_DATA
);
387 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
,
388 (op
->data
.nbytes
+ 5) * BITS_PER_BYTE
);
391 static int mtk_nor_pp_unbuffered(struct mtk_nor
*sp
,
392 const struct spi_mem_op
*op
)
394 const u8
*buf
= op
->data
.buf
.out
;
397 ret
= mtk_nor_write_buffer_disable(sp
);
400 writeb(buf
[0], sp
->base
+ MTK_NOR_REG_WDATA
);
401 return mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_WRITE
, 6 * BITS_PER_BYTE
);
404 static int mtk_nor_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
406 struct mtk_nor
*sp
= spi_controller_get_devdata(mem
->spi
->master
);
409 if ((op
->data
.nbytes
== 0) ||
410 ((op
->addr
.nbytes
!= 3) && (op
->addr
.nbytes
!= 4)))
413 if (op
->data
.dir
== SPI_MEM_DATA_OUT
) {
414 mtk_nor_set_addr(sp
, op
);
415 writeb(op
->cmd
.opcode
, sp
->base
+ MTK_NOR_REG_PRGDATA0
);
416 if (op
->data
.nbytes
== MTK_NOR_PP_SIZE
)
417 return mtk_nor_pp_buffered(sp
, op
);
418 return mtk_nor_pp_unbuffered(sp
, op
);
421 if ((op
->data
.dir
== SPI_MEM_DATA_IN
) && mtk_nor_match_read(op
)) {
422 ret
= mtk_nor_write_buffer_disable(sp
);
425 mtk_nor_setup_bus(sp
, op
);
426 if (op
->data
.nbytes
== 1) {
427 mtk_nor_set_addr(sp
, op
);
428 return mtk_nor_read_pio(sp
, op
);
429 } else if (((ulong
)(op
->data
.buf
.in
) &
430 MTK_NOR_DMA_ALIGN_MASK
)) {
431 return mtk_nor_read_bounce(sp
, op
->addr
.val
,
435 return mtk_nor_read_dma(sp
, op
->addr
.val
,
444 static int mtk_nor_setup(struct spi_device
*spi
)
446 struct mtk_nor
*sp
= spi_controller_get_devdata(spi
->master
);
448 if (spi
->max_speed_hz
&& (spi
->max_speed_hz
< sp
->spi_freq
)) {
449 dev_err(&spi
->dev
, "spi clock should be %u Hz.\n",
453 spi
->max_speed_hz
= sp
->spi_freq
;
458 static int mtk_nor_transfer_one_message(struct spi_controller
*master
,
459 struct spi_message
*m
)
461 struct mtk_nor
*sp
= spi_controller_get_devdata(master
);
462 struct spi_transfer
*t
= NULL
;
463 unsigned long trx_len
= 0;
465 int reg_offset
= MTK_NOR_REG_PRGDATA_MAX
;
471 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
473 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
474 reg
= sp
->base
+ MTK_NOR_REG_PRGDATA(reg_offset
);
476 writeb(txbuf
[i
], reg
);
483 writel(trx_len
* BITS_PER_BYTE
, sp
->base
+ MTK_NOR_REG_PRG_CNT
);
485 stat
= mtk_nor_cmd_exec(sp
, MTK_NOR_CMD_PROGRAM
,
486 trx_len
* BITS_PER_BYTE
);
490 reg_offset
= trx_len
- 1;
491 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
493 for (i
= 0; i
< t
->len
; i
++, reg_offset
--) {
494 reg
= sp
->base
+ MTK_NOR_REG_SHIFT(reg_offset
);
496 rxbuf
[i
] = readb(reg
);
500 m
->actual_length
= trx_len
;
503 spi_finalize_current_message(master
);
508 static void mtk_nor_disable_clk(struct mtk_nor
*sp
)
510 clk_disable_unprepare(sp
->spi_clk
);
511 clk_disable_unprepare(sp
->ctlr_clk
);
514 static int mtk_nor_enable_clk(struct mtk_nor
*sp
)
518 ret
= clk_prepare_enable(sp
->spi_clk
);
522 ret
= clk_prepare_enable(sp
->ctlr_clk
);
524 clk_disable_unprepare(sp
->spi_clk
);
531 static int mtk_nor_init(struct mtk_nor
*sp
)
535 ret
= mtk_nor_enable_clk(sp
);
539 sp
->spi_freq
= clk_get_rate(sp
->spi_clk
);
541 writel(MTK_NOR_ENABLE_SF_CMD
, sp
->base
+ MTK_NOR_REG_WP
);
542 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG2
, MTK_NOR_WR_CUSTOM_OP_EN
, 0);
543 mtk_nor_rmw(sp
, MTK_NOR_REG_CFG3
,
544 MTK_NOR_DISABLE_WREN
| MTK_NOR_DISABLE_SR_POLL
, 0);
549 static irqreturn_t
mtk_nor_irq_handler(int irq
, void *data
)
551 struct mtk_nor
*sp
= data
;
552 u32 irq_status
, irq_enabled
;
554 irq_status
= readl(sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
555 irq_enabled
= readl(sp
->base
+ MTK_NOR_REG_IRQ_EN
);
556 // write status back to clear interrupt
557 writel(irq_status
, sp
->base
+ MTK_NOR_REG_IRQ_STAT
);
559 if (!(irq_status
& irq_enabled
))
562 if (irq_status
& MTK_NOR_IRQ_DMA
) {
563 complete(&sp
->op_done
);
564 writel(0, sp
->base
+ MTK_NOR_REG_IRQ_EN
);
570 static size_t mtk_max_msg_size(struct spi_device
*spi
)
572 return MTK_NOR_PRG_MAX_SIZE
;
575 static const struct spi_controller_mem_ops mtk_nor_mem_ops
= {
576 .adjust_op_size
= mtk_nor_adjust_op_size
,
577 .supports_op
= mtk_nor_supports_op
,
578 .exec_op
= mtk_nor_exec_op
581 static const struct of_device_id mtk_nor_match
[] = {
582 { .compatible
= "mediatek,mt8173-nor" },
585 MODULE_DEVICE_TABLE(of
, mtk_nor_match
);
587 static int mtk_nor_probe(struct platform_device
*pdev
)
589 struct spi_controller
*ctlr
;
593 struct clk
*spi_clk
, *ctlr_clk
;
596 base
= devm_platform_ioremap_resource(pdev
, 0);
598 return PTR_ERR(base
);
600 spi_clk
= devm_clk_get(&pdev
->dev
, "spi");
602 return PTR_ERR(spi_clk
);
604 ctlr_clk
= devm_clk_get(&pdev
->dev
, "sf");
605 if (IS_ERR(ctlr_clk
))
606 return PTR_ERR(ctlr_clk
);
608 buffer
= devm_kmalloc(&pdev
->dev
,
609 MTK_NOR_BOUNCE_BUF_SIZE
+ MTK_NOR_DMA_ALIGN
,
614 if ((ulong
)buffer
& MTK_NOR_DMA_ALIGN_MASK
)
615 buffer
= (u8
*)(((ulong
)buffer
+ MTK_NOR_DMA_ALIGN
) &
616 ~MTK_NOR_DMA_ALIGN_MASK
);
618 ctlr
= spi_alloc_master(&pdev
->dev
, sizeof(*sp
));
620 dev_err(&pdev
->dev
, "failed to allocate spi controller\n");
624 ctlr
->bits_per_word_mask
= SPI_BPW_MASK(8);
625 ctlr
->dev
.of_node
= pdev
->dev
.of_node
;
626 ctlr
->max_message_size
= mtk_max_msg_size
;
627 ctlr
->mem_ops
= &mtk_nor_mem_ops
;
628 ctlr
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_TX_DUAL
| SPI_TX_QUAD
;
629 ctlr
->num_chipselect
= 1;
630 ctlr
->setup
= mtk_nor_setup
;
631 ctlr
->transfer_one_message
= mtk_nor_transfer_one_message
;
633 dev_set_drvdata(&pdev
->dev
, ctlr
);
635 sp
= spi_controller_get_devdata(ctlr
);
641 sp
->dev
= &pdev
->dev
;
642 sp
->spi_clk
= spi_clk
;
643 sp
->ctlr_clk
= ctlr_clk
;
645 irq
= platform_get_irq_optional(pdev
, 0);
647 dev_warn(sp
->dev
, "IRQ not available.");
649 writel(MTK_NOR_IRQ_MASK
, base
+ MTK_NOR_REG_IRQ_STAT
);
650 writel(0, base
+ MTK_NOR_REG_IRQ_EN
);
651 ret
= devm_request_irq(sp
->dev
, irq
, mtk_nor_irq_handler
, 0,
654 dev_warn(sp
->dev
, "failed to request IRQ.");
656 init_completion(&sp
->op_done
);
661 ret
= mtk_nor_init(sp
);
667 dev_info(&pdev
->dev
, "spi frequency: %d Hz\n", sp
->spi_freq
);
669 return devm_spi_register_controller(&pdev
->dev
, ctlr
);
672 static int mtk_nor_remove(struct platform_device
*pdev
)
674 struct spi_controller
*ctlr
;
677 ctlr
= dev_get_drvdata(&pdev
->dev
);
678 sp
= spi_controller_get_devdata(ctlr
);
680 mtk_nor_disable_clk(sp
);
685 static struct platform_driver mtk_nor_driver
= {
688 .of_match_table
= mtk_nor_match
,
690 .probe
= mtk_nor_probe
,
691 .remove
= mtk_nor_remove
,
694 module_platform_driver(mtk_nor_driver
);
696 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
697 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
698 MODULE_LICENSE("GPL v2");
699 MODULE_ALIAS("platform:" DRIVER_NAME
);