1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
6 #include <linux/bitfield.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/errno.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
17 #include <linux/of_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
22 #include <linux/sizes.h>
23 #include <linux/spi/spi-mem.h>
27 #define CR_ABORT BIT(1)
28 #define CR_DMAEN BIT(2)
29 #define CR_TCEN BIT(3)
30 #define CR_SSHIFT BIT(4)
32 #define CR_FSEL BIT(7)
33 #define CR_FTHRES_SHIFT 8
34 #define CR_TEIE BIT(16)
35 #define CR_TCIE BIT(17)
36 #define CR_FTIE BIT(18)
37 #define CR_SMIE BIT(19)
38 #define CR_TOIE BIT(20)
39 #define CR_PRESC_MASK GENMASK(31, 24)
42 #define DCR_FSIZE_MASK GENMASK(20, 16)
50 #define SR_BUSY BIT(5)
51 #define SR_FLEVEL_MASK GENMASK(13, 8)
54 #define FCR_CTEF BIT(0)
55 #define FCR_CTCF BIT(1)
60 #define CCR_INST_MASK GENMASK(7, 0)
61 #define CCR_IMODE_MASK GENMASK(9, 8)
62 #define CCR_ADMODE_MASK GENMASK(11, 10)
63 #define CCR_ADSIZE_MASK GENMASK(13, 12)
64 #define CCR_DCYC_MASK GENMASK(22, 18)
65 #define CCR_DMODE_MASK GENMASK(25, 24)
66 #define CCR_FMODE_MASK GENMASK(27, 26)
67 #define CCR_FMODE_INDW (0U << 26)
68 #define CCR_FMODE_INDR (1U << 26)
69 #define CCR_FMODE_APM (2U << 26)
70 #define CCR_FMODE_MM (3U << 26)
71 #define CCR_BUSWIDTH_0 0x0
72 #define CCR_BUSWIDTH_1 0x1
73 #define CCR_BUSWIDTH_2 0x2
74 #define CCR_BUSWIDTH_4 0x3
79 #define QSPI_PSMKR 0x24
80 #define QSPI_PSMAR 0x28
82 #define QSPI_LPTR 0x30
84 #define STM32_QSPI_MAX_MMAP_SZ SZ_256M
85 #define STM32_QSPI_MAX_NORCHIP 2
87 #define STM32_FIFO_TIMEOUT_US 30000
88 #define STM32_BUSY_TIMEOUT_US 100000
89 #define STM32_ABT_TIMEOUT_US 100000
90 #define STM32_COMP_TIMEOUT_MS 1000
91 #define STM32_AUTOSUSPEND_DELAY -1
93 struct stm32_qspi_flash
{
94 struct stm32_qspi
*qspi
;
101 struct spi_controller
*ctrl
;
102 phys_addr_t phys_base
;
103 void __iomem
*io_base
;
104 void __iomem
*mm_base
;
105 resource_size_t mm_size
;
108 struct stm32_qspi_flash flash
[STM32_QSPI_MAX_NORCHIP
];
109 struct completion data_completion
;
112 struct dma_chan
*dma_chtx
;
113 struct dma_chan
*dma_chrx
;
114 struct completion dma_completion
;
120 * to protect device configuration, could be different between
121 * 2 flash access (bk1, bk2)
126 static irqreturn_t
stm32_qspi_irq(int irq
, void *dev_id
)
128 struct stm32_qspi
*qspi
= (struct stm32_qspi
*)dev_id
;
131 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
133 if (sr
& (SR_TEF
| SR_TCF
)) {
135 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
136 cr
&= ~CR_TCIE
& ~CR_TEIE
;
137 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
138 complete(&qspi
->data_completion
);
144 static void stm32_qspi_read_fifo(u8
*val
, void __iomem
*addr
)
146 *val
= readb_relaxed(addr
);
149 static void stm32_qspi_write_fifo(u8
*val
, void __iomem
*addr
)
151 writeb_relaxed(*val
, addr
);
154 static int stm32_qspi_tx_poll(struct stm32_qspi
*qspi
,
155 const struct spi_mem_op
*op
)
157 void (*tx_fifo
)(u8
*val
, void __iomem
*addr
);
158 u32 len
= op
->data
.nbytes
, sr
;
162 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
163 tx_fifo
= stm32_qspi_read_fifo
;
164 buf
= op
->data
.buf
.in
;
167 tx_fifo
= stm32_qspi_write_fifo
;
168 buf
= (u8
*)op
->data
.buf
.out
;
172 ret
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
,
173 sr
, (sr
& SR_FTF
), 1,
174 STM32_FIFO_TIMEOUT_US
);
176 dev_err(qspi
->dev
, "fifo timeout (len:%d stat:%#x)\n",
180 tx_fifo(buf
++, qspi
->io_base
+ QSPI_DR
);
186 static int stm32_qspi_tx_mm(struct stm32_qspi
*qspi
,
187 const struct spi_mem_op
*op
)
189 memcpy_fromio(op
->data
.buf
.in
, qspi
->mm_base
+ op
->addr
.val
,
194 static void stm32_qspi_dma_callback(void *arg
)
196 struct completion
*dma_completion
= arg
;
198 complete(dma_completion
);
201 static int stm32_qspi_tx_dma(struct stm32_qspi
*qspi
,
202 const struct spi_mem_op
*op
)
204 struct dma_async_tx_descriptor
*desc
;
205 enum dma_transfer_direction dma_dir
;
206 struct dma_chan
*dma_ch
;
212 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
213 dma_dir
= DMA_DEV_TO_MEM
;
214 dma_ch
= qspi
->dma_chrx
;
216 dma_dir
= DMA_MEM_TO_DEV
;
217 dma_ch
= qspi
->dma_chtx
;
221 * spi_map_buf return -EINVAL if the buffer is not DMA-able
222 * (DMA-able: in vmalloc | kmap | virt_addr_valid)
224 err
= spi_controller_dma_map_mem_op_data(qspi
->ctrl
, op
, &sgt
);
228 desc
= dmaengine_prep_slave_sg(dma_ch
, sgt
.sgl
, sgt
.nents
,
229 dma_dir
, DMA_PREP_INTERRUPT
);
235 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
237 reinit_completion(&qspi
->dma_completion
);
238 desc
->callback
= stm32_qspi_dma_callback
;
239 desc
->callback_param
= &qspi
->dma_completion
;
240 cookie
= dmaengine_submit(desc
);
241 err
= dma_submit_error(cookie
);
245 dma_async_issue_pending(dma_ch
);
247 writel_relaxed(cr
| CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
249 t_out
= sgt
.nents
* STM32_COMP_TIMEOUT_MS
;
250 if (!wait_for_completion_timeout(&qspi
->dma_completion
,
251 msecs_to_jiffies(t_out
)))
255 dmaengine_terminate_all(dma_ch
);
258 writel_relaxed(cr
& ~CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
260 spi_controller_dma_unmap_mem_op_data(qspi
->ctrl
, op
, &sgt
);
265 static int stm32_qspi_tx(struct stm32_qspi
*qspi
, const struct spi_mem_op
*op
)
267 if (!op
->data
.nbytes
)
270 if (qspi
->fmode
== CCR_FMODE_MM
)
271 return stm32_qspi_tx_mm(qspi
, op
);
272 else if ((op
->data
.dir
== SPI_MEM_DATA_IN
&& qspi
->dma_chrx
) ||
273 (op
->data
.dir
== SPI_MEM_DATA_OUT
&& qspi
->dma_chtx
))
274 if (!stm32_qspi_tx_dma(qspi
, op
))
277 return stm32_qspi_tx_poll(qspi
, op
);
280 static int stm32_qspi_wait_nobusy(struct stm32_qspi
*qspi
)
284 return readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
, sr
,
286 STM32_BUSY_TIMEOUT_US
);
289 static int stm32_qspi_wait_cmd(struct stm32_qspi
*qspi
,
290 const struct spi_mem_op
*op
)
295 if (!op
->data
.nbytes
)
296 return stm32_qspi_wait_nobusy(qspi
);
298 if (readl_relaxed(qspi
->io_base
+ QSPI_SR
) & SR_TCF
)
301 reinit_completion(&qspi
->data_completion
);
302 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
303 writel_relaxed(cr
| CR_TCIE
| CR_TEIE
, qspi
->io_base
+ QSPI_CR
);
305 if (!wait_for_completion_timeout(&qspi
->data_completion
,
306 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS
))) {
309 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
316 writel_relaxed(FCR_CTCF
| FCR_CTEF
, qspi
->io_base
+ QSPI_FCR
);
321 static int stm32_qspi_get_mode(struct stm32_qspi
*qspi
, u8 buswidth
)
324 return CCR_BUSWIDTH_4
;
329 static int stm32_qspi_send(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
331 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
332 struct stm32_qspi_flash
*flash
= &qspi
->flash
[mem
->spi
->chip_select
];
333 u32 ccr
, cr
, addr_max
;
334 int timeout
, err
= 0;
336 dev_dbg(qspi
->dev
, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
337 op
->cmd
.opcode
, op
->cmd
.buswidth
, op
->addr
.buswidth
,
338 op
->dummy
.buswidth
, op
->data
.buswidth
,
339 op
->addr
.val
, op
->data
.nbytes
);
341 err
= stm32_qspi_wait_nobusy(qspi
);
345 addr_max
= op
->addr
.val
+ op
->data
.nbytes
+ 1;
347 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
348 if (addr_max
< qspi
->mm_size
&&
350 qspi
->fmode
= CCR_FMODE_MM
;
352 qspi
->fmode
= CCR_FMODE_INDR
;
354 qspi
->fmode
= CCR_FMODE_INDW
;
357 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
358 cr
&= ~CR_PRESC_MASK
& ~CR_FSEL
;
359 cr
|= FIELD_PREP(CR_PRESC_MASK
, flash
->presc
);
360 cr
|= FIELD_PREP(CR_FSEL
, flash
->cs
);
361 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
364 writel_relaxed(op
->data
.nbytes
- 1,
365 qspi
->io_base
+ QSPI_DLR
);
367 qspi
->fmode
= CCR_FMODE_INDW
;
370 ccr
|= FIELD_PREP(CCR_INST_MASK
, op
->cmd
.opcode
);
371 ccr
|= FIELD_PREP(CCR_IMODE_MASK
,
372 stm32_qspi_get_mode(qspi
, op
->cmd
.buswidth
));
374 if (op
->addr
.nbytes
) {
375 ccr
|= FIELD_PREP(CCR_ADMODE_MASK
,
376 stm32_qspi_get_mode(qspi
, op
->addr
.buswidth
));
377 ccr
|= FIELD_PREP(CCR_ADSIZE_MASK
, op
->addr
.nbytes
- 1);
380 if (op
->dummy
.buswidth
&& op
->dummy
.nbytes
)
381 ccr
|= FIELD_PREP(CCR_DCYC_MASK
,
382 op
->dummy
.nbytes
* 8 / op
->dummy
.buswidth
);
384 if (op
->data
.nbytes
) {
385 ccr
|= FIELD_PREP(CCR_DMODE_MASK
,
386 stm32_qspi_get_mode(qspi
, op
->data
.buswidth
));
389 writel_relaxed(ccr
, qspi
->io_base
+ QSPI_CCR
);
391 if (op
->addr
.nbytes
&& qspi
->fmode
!= CCR_FMODE_MM
)
392 writel_relaxed(op
->addr
.val
, qspi
->io_base
+ QSPI_AR
);
394 err
= stm32_qspi_tx(qspi
, op
);
399 * -read memory map: prefetching must be stopped if we read the last
400 * byte of device (device size - fifo size). like device size is not
401 * knows, the prefetching is always stop.
403 if (err
|| qspi
->fmode
== CCR_FMODE_MM
)
406 /* wait end of tx in indirect mode */
407 err
= stm32_qspi_wait_cmd(qspi
, op
);
414 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
) | CR_ABORT
;
415 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
417 /* wait clear of abort bit by hw */
418 timeout
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_CR
,
419 cr
, !(cr
& CR_ABORT
), 1,
420 STM32_ABT_TIMEOUT_US
);
422 writel_relaxed(FCR_CTCF
, qspi
->io_base
+ QSPI_FCR
);
425 dev_err(qspi
->dev
, "%s err:%d abort timeout:%d\n",
426 __func__
, err
, timeout
);
431 static int stm32_qspi_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
433 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
436 ret
= pm_runtime_get_sync(qspi
->dev
);
440 mutex_lock(&qspi
->lock
);
441 ret
= stm32_qspi_send(mem
, op
);
442 mutex_unlock(&qspi
->lock
);
444 pm_runtime_mark_last_busy(qspi
->dev
);
445 pm_runtime_put_autosuspend(qspi
->dev
);
450 static int stm32_qspi_setup(struct spi_device
*spi
)
452 struct spi_controller
*ctrl
= spi
->master
;
453 struct stm32_qspi
*qspi
= spi_controller_get_devdata(ctrl
);
454 struct stm32_qspi_flash
*flash
;
461 if (!spi
->max_speed_hz
)
464 ret
= pm_runtime_get_sync(qspi
->dev
);
468 presc
= DIV_ROUND_UP(qspi
->clk_rate
, spi
->max_speed_hz
) - 1;
470 flash
= &qspi
->flash
[spi
->chip_select
];
472 flash
->cs
= spi
->chip_select
;
473 flash
->presc
= presc
;
475 mutex_lock(&qspi
->lock
);
476 qspi
->cr_reg
= 3 << CR_FTHRES_SHIFT
| CR_SSHIFT
| CR_EN
;
477 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
479 /* set dcr fsize to max address */
480 qspi
->dcr_reg
= DCR_FSIZE_MASK
;
481 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
482 mutex_unlock(&qspi
->lock
);
484 pm_runtime_mark_last_busy(qspi
->dev
);
485 pm_runtime_put_autosuspend(qspi
->dev
);
490 static int stm32_qspi_dma_setup(struct stm32_qspi
*qspi
)
492 struct dma_slave_config dma_cfg
;
493 struct device
*dev
= qspi
->dev
;
496 memset(&dma_cfg
, 0, sizeof(dma_cfg
));
498 dma_cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
499 dma_cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
500 dma_cfg
.src_addr
= qspi
->phys_base
+ QSPI_DR
;
501 dma_cfg
.dst_addr
= qspi
->phys_base
+ QSPI_DR
;
502 dma_cfg
.src_maxburst
= 4;
503 dma_cfg
.dst_maxburst
= 4;
505 qspi
->dma_chrx
= dma_request_chan(dev
, "rx");
506 if (IS_ERR(qspi
->dma_chrx
)) {
507 ret
= PTR_ERR(qspi
->dma_chrx
);
508 qspi
->dma_chrx
= NULL
;
509 if (ret
== -EPROBE_DEFER
)
512 if (dmaengine_slave_config(qspi
->dma_chrx
, &dma_cfg
)) {
513 dev_err(dev
, "dma rx config failed\n");
514 dma_release_channel(qspi
->dma_chrx
);
515 qspi
->dma_chrx
= NULL
;
519 qspi
->dma_chtx
= dma_request_chan(dev
, "tx");
520 if (IS_ERR(qspi
->dma_chtx
)) {
521 ret
= PTR_ERR(qspi
->dma_chtx
);
522 qspi
->dma_chtx
= NULL
;
524 if (dmaengine_slave_config(qspi
->dma_chtx
, &dma_cfg
)) {
525 dev_err(dev
, "dma tx config failed\n");
526 dma_release_channel(qspi
->dma_chtx
);
527 qspi
->dma_chtx
= NULL
;
532 init_completion(&qspi
->dma_completion
);
534 if (ret
!= -EPROBE_DEFER
)
540 static void stm32_qspi_dma_free(struct stm32_qspi
*qspi
)
543 dma_release_channel(qspi
->dma_chtx
);
545 dma_release_channel(qspi
->dma_chrx
);
549 * no special host constraint, so use default spi_mem_default_supports_op
550 * to check supported mode.
552 static const struct spi_controller_mem_ops stm32_qspi_mem_ops
= {
553 .exec_op
= stm32_qspi_exec_op
,
556 static int stm32_qspi_probe(struct platform_device
*pdev
)
558 struct device
*dev
= &pdev
->dev
;
559 struct spi_controller
*ctrl
;
560 struct reset_control
*rstc
;
561 struct stm32_qspi
*qspi
;
562 struct resource
*res
;
565 ctrl
= spi_alloc_master(dev
, sizeof(*qspi
));
569 qspi
= spi_controller_get_devdata(ctrl
);
572 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi");
573 qspi
->io_base
= devm_ioremap_resource(dev
, res
);
574 if (IS_ERR(qspi
->io_base
)) {
575 ret
= PTR_ERR(qspi
->io_base
);
579 qspi
->phys_base
= res
->start
;
581 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi_mm");
582 qspi
->mm_base
= devm_ioremap_resource(dev
, res
);
583 if (IS_ERR(qspi
->mm_base
)) {
584 ret
= PTR_ERR(qspi
->mm_base
);
588 qspi
->mm_size
= resource_size(res
);
589 if (qspi
->mm_size
> STM32_QSPI_MAX_MMAP_SZ
) {
594 irq
= platform_get_irq(pdev
, 0);
600 ret
= devm_request_irq(dev
, irq
, stm32_qspi_irq
, 0,
601 dev_name(dev
), qspi
);
603 dev_err(dev
, "failed to request irq\n");
607 init_completion(&qspi
->data_completion
);
609 qspi
->clk
= devm_clk_get(dev
, NULL
);
610 if (IS_ERR(qspi
->clk
)) {
611 ret
= PTR_ERR(qspi
->clk
);
615 qspi
->clk_rate
= clk_get_rate(qspi
->clk
);
616 if (!qspi
->clk_rate
) {
621 ret
= clk_prepare_enable(qspi
->clk
);
623 dev_err(dev
, "can not enable the clock\n");
627 rstc
= devm_reset_control_get_exclusive(dev
, NULL
);
630 if (ret
== -EPROBE_DEFER
)
631 goto err_clk_disable
;
633 reset_control_assert(rstc
);
635 reset_control_deassert(rstc
);
639 platform_set_drvdata(pdev
, qspi
);
640 ret
= stm32_qspi_dma_setup(qspi
);
644 mutex_init(&qspi
->lock
);
646 ctrl
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
647 | SPI_TX_DUAL
| SPI_TX_QUAD
;
648 ctrl
->setup
= stm32_qspi_setup
;
650 ctrl
->mem_ops
= &stm32_qspi_mem_ops
;
651 ctrl
->num_chipselect
= STM32_QSPI_MAX_NORCHIP
;
652 ctrl
->dev
.of_node
= dev
->of_node
;
654 pm_runtime_set_autosuspend_delay(dev
, STM32_AUTOSUSPEND_DELAY
);
655 pm_runtime_use_autosuspend(dev
);
656 pm_runtime_set_active(dev
);
657 pm_runtime_enable(dev
);
658 pm_runtime_get_noresume(dev
);
660 ret
= devm_spi_register_master(dev
, ctrl
);
662 goto err_pm_runtime_free
;
664 pm_runtime_mark_last_busy(dev
);
665 pm_runtime_put_autosuspend(dev
);
670 pm_runtime_get_sync(qspi
->dev
);
672 writel_relaxed(0, qspi
->io_base
+ QSPI_CR
);
673 mutex_destroy(&qspi
->lock
);
674 pm_runtime_put_noidle(qspi
->dev
);
675 pm_runtime_disable(qspi
->dev
);
676 pm_runtime_set_suspended(qspi
->dev
);
677 pm_runtime_dont_use_autosuspend(qspi
->dev
);
679 stm32_qspi_dma_free(qspi
);
681 clk_disable_unprepare(qspi
->clk
);
683 spi_master_put(qspi
->ctrl
);
688 static int stm32_qspi_remove(struct platform_device
*pdev
)
690 struct stm32_qspi
*qspi
= platform_get_drvdata(pdev
);
692 pm_runtime_get_sync(qspi
->dev
);
694 writel_relaxed(0, qspi
->io_base
+ QSPI_CR
);
695 stm32_qspi_dma_free(qspi
);
696 mutex_destroy(&qspi
->lock
);
697 pm_runtime_put_noidle(qspi
->dev
);
698 pm_runtime_disable(qspi
->dev
);
699 pm_runtime_set_suspended(qspi
->dev
);
700 pm_runtime_dont_use_autosuspend(qspi
->dev
);
701 clk_disable_unprepare(qspi
->clk
);
706 static int __maybe_unused
stm32_qspi_runtime_suspend(struct device
*dev
)
708 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
710 clk_disable_unprepare(qspi
->clk
);
715 static int __maybe_unused
stm32_qspi_runtime_resume(struct device
*dev
)
717 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
719 return clk_prepare_enable(qspi
->clk
);
722 static int __maybe_unused
stm32_qspi_suspend(struct device
*dev
)
724 pinctrl_pm_select_sleep_state(dev
);
729 static int __maybe_unused
stm32_qspi_resume(struct device
*dev
)
731 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
733 pinctrl_pm_select_default_state(dev
);
734 clk_prepare_enable(qspi
->clk
);
736 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
737 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
739 pm_runtime_mark_last_busy(qspi
->dev
);
740 pm_runtime_put_autosuspend(qspi
->dev
);
745 static const struct dev_pm_ops stm32_qspi_pm_ops
= {
746 SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend
,
747 stm32_qspi_runtime_resume
, NULL
)
748 SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend
, stm32_qspi_resume
)
751 static const struct of_device_id stm32_qspi_match
[] = {
752 {.compatible
= "st,stm32f469-qspi"},
755 MODULE_DEVICE_TABLE(of
, stm32_qspi_match
);
757 static struct platform_driver stm32_qspi_driver
= {
758 .probe
= stm32_qspi_probe
,
759 .remove
= stm32_qspi_remove
,
761 .name
= "stm32-qspi",
762 .of_match_table
= stm32_qspi_match
,
763 .pm
= &stm32_qspi_pm_ops
,
766 module_platform_driver(stm32_qspi_driver
);
768 MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
769 MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
770 MODULE_LICENSE("GPL v2");