1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
6 #include <linux/bitfield.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/errno.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
17 #include <linux/of_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/platform_device.h>
21 #include <linux/reset.h>
22 #include <linux/sizes.h>
23 #include <linux/spi/spi-mem.h>
27 #define CR_ABORT BIT(1)
28 #define CR_DMAEN BIT(2)
29 #define CR_TCEN BIT(3)
30 #define CR_SSHIFT BIT(4)
32 #define CR_FSEL BIT(7)
33 #define CR_FTHRES_SHIFT 8
34 #define CR_TEIE BIT(16)
35 #define CR_TCIE BIT(17)
36 #define CR_FTIE BIT(18)
37 #define CR_SMIE BIT(19)
38 #define CR_TOIE BIT(20)
39 #define CR_PRESC_MASK GENMASK(31, 24)
42 #define DCR_FSIZE_MASK GENMASK(20, 16)
50 #define SR_BUSY BIT(5)
51 #define SR_FLEVEL_MASK GENMASK(13, 8)
54 #define FCR_CTEF BIT(0)
55 #define FCR_CTCF BIT(1)
60 #define CCR_INST_MASK GENMASK(7, 0)
61 #define CCR_IMODE_MASK GENMASK(9, 8)
62 #define CCR_ADMODE_MASK GENMASK(11, 10)
63 #define CCR_ADSIZE_MASK GENMASK(13, 12)
64 #define CCR_DCYC_MASK GENMASK(22, 18)
65 #define CCR_DMODE_MASK GENMASK(25, 24)
66 #define CCR_FMODE_MASK GENMASK(27, 26)
67 #define CCR_FMODE_INDW (0U << 26)
68 #define CCR_FMODE_INDR (1U << 26)
69 #define CCR_FMODE_APM (2U << 26)
70 #define CCR_FMODE_MM (3U << 26)
71 #define CCR_BUSWIDTH_0 0x0
72 #define CCR_BUSWIDTH_1 0x1
73 #define CCR_BUSWIDTH_2 0x2
74 #define CCR_BUSWIDTH_4 0x3
79 #define QSPI_PSMKR 0x24
80 #define QSPI_PSMAR 0x28
82 #define QSPI_LPTR 0x30
84 #define STM32_QSPI_MAX_MMAP_SZ SZ_256M
85 #define STM32_QSPI_MAX_NORCHIP 2
87 #define STM32_FIFO_TIMEOUT_US 30000
88 #define STM32_BUSY_TIMEOUT_US 100000
89 #define STM32_ABT_TIMEOUT_US 100000
90 #define STM32_COMP_TIMEOUT_MS 1000
91 #define STM32_AUTOSUSPEND_DELAY -1
93 struct stm32_qspi_flash
{
94 struct stm32_qspi
*qspi
;
101 struct spi_controller
*ctrl
;
102 phys_addr_t phys_base
;
103 void __iomem
*io_base
;
104 void __iomem
*mm_base
;
105 resource_size_t mm_size
;
108 struct stm32_qspi_flash flash
[STM32_QSPI_MAX_NORCHIP
];
109 struct completion data_completion
;
112 struct dma_chan
*dma_chtx
;
113 struct dma_chan
*dma_chrx
;
114 struct completion dma_completion
;
120 * to protect device configuration, could be different between
121 * 2 flash access (bk1, bk2)
126 static irqreturn_t
stm32_qspi_irq(int irq
, void *dev_id
)
128 struct stm32_qspi
*qspi
= (struct stm32_qspi
*)dev_id
;
131 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
133 if (sr
& (SR_TEF
| SR_TCF
)) {
135 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
136 cr
&= ~CR_TCIE
& ~CR_TEIE
;
137 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
138 complete(&qspi
->data_completion
);
144 static void stm32_qspi_read_fifo(u8
*val
, void __iomem
*addr
)
146 *val
= readb_relaxed(addr
);
149 static void stm32_qspi_write_fifo(u8
*val
, void __iomem
*addr
)
151 writeb_relaxed(*val
, addr
);
154 static int stm32_qspi_tx_poll(struct stm32_qspi
*qspi
,
155 const struct spi_mem_op
*op
)
157 void (*tx_fifo
)(u8
*val
, void __iomem
*addr
);
158 u32 len
= op
->data
.nbytes
, sr
;
162 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
163 tx_fifo
= stm32_qspi_read_fifo
;
164 buf
= op
->data
.buf
.in
;
167 tx_fifo
= stm32_qspi_write_fifo
;
168 buf
= (u8
*)op
->data
.buf
.out
;
172 ret
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
,
173 sr
, (sr
& SR_FTF
), 1,
174 STM32_FIFO_TIMEOUT_US
);
176 dev_err(qspi
->dev
, "fifo timeout (len:%d stat:%#x)\n",
180 tx_fifo(buf
++, qspi
->io_base
+ QSPI_DR
);
186 static int stm32_qspi_tx_mm(struct stm32_qspi
*qspi
,
187 const struct spi_mem_op
*op
)
189 memcpy_fromio(op
->data
.buf
.in
, qspi
->mm_base
+ op
->addr
.val
,
194 static void stm32_qspi_dma_callback(void *arg
)
196 struct completion
*dma_completion
= arg
;
198 complete(dma_completion
);
201 static int stm32_qspi_tx_dma(struct stm32_qspi
*qspi
,
202 const struct spi_mem_op
*op
)
204 struct dma_async_tx_descriptor
*desc
;
205 enum dma_transfer_direction dma_dir
;
206 struct dma_chan
*dma_ch
;
212 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
213 dma_dir
= DMA_DEV_TO_MEM
;
214 dma_ch
= qspi
->dma_chrx
;
216 dma_dir
= DMA_MEM_TO_DEV
;
217 dma_ch
= qspi
->dma_chtx
;
221 * spi_map_buf return -EINVAL if the buffer is not DMA-able
222 * (DMA-able: in vmalloc | kmap | virt_addr_valid)
224 err
= spi_controller_dma_map_mem_op_data(qspi
->ctrl
, op
, &sgt
);
228 desc
= dmaengine_prep_slave_sg(dma_ch
, sgt
.sgl
, sgt
.nents
,
229 dma_dir
, DMA_PREP_INTERRUPT
);
235 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
237 reinit_completion(&qspi
->dma_completion
);
238 desc
->callback
= stm32_qspi_dma_callback
;
239 desc
->callback_param
= &qspi
->dma_completion
;
240 cookie
= dmaengine_submit(desc
);
241 err
= dma_submit_error(cookie
);
245 dma_async_issue_pending(dma_ch
);
247 writel_relaxed(cr
| CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
249 t_out
= sgt
.nents
* STM32_COMP_TIMEOUT_MS
;
250 if (!wait_for_completion_timeout(&qspi
->dma_completion
,
251 msecs_to_jiffies(t_out
)))
255 dmaengine_terminate_all(dma_ch
);
258 writel_relaxed(cr
& ~CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
260 spi_controller_dma_unmap_mem_op_data(qspi
->ctrl
, op
, &sgt
);
265 static int stm32_qspi_tx(struct stm32_qspi
*qspi
, const struct spi_mem_op
*op
)
267 if (!op
->data
.nbytes
)
270 if (qspi
->fmode
== CCR_FMODE_MM
)
271 return stm32_qspi_tx_mm(qspi
, op
);
272 else if ((op
->data
.dir
== SPI_MEM_DATA_IN
&& qspi
->dma_chrx
) ||
273 (op
->data
.dir
== SPI_MEM_DATA_OUT
&& qspi
->dma_chtx
))
274 if (!stm32_qspi_tx_dma(qspi
, op
))
277 return stm32_qspi_tx_poll(qspi
, op
);
280 static int stm32_qspi_wait_nobusy(struct stm32_qspi
*qspi
)
284 return readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
, sr
,
286 STM32_BUSY_TIMEOUT_US
);
289 static int stm32_qspi_wait_cmd(struct stm32_qspi
*qspi
,
290 const struct spi_mem_op
*op
)
295 if (!op
->data
.nbytes
)
296 return stm32_qspi_wait_nobusy(qspi
);
298 if (readl_relaxed(qspi
->io_base
+ QSPI_SR
) & SR_TCF
)
301 reinit_completion(&qspi
->data_completion
);
302 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
303 writel_relaxed(cr
| CR_TCIE
| CR_TEIE
, qspi
->io_base
+ QSPI_CR
);
305 if (!wait_for_completion_timeout(&qspi
->data_completion
,
306 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS
))) {
309 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
316 writel_relaxed(FCR_CTCF
| FCR_CTEF
, qspi
->io_base
+ QSPI_FCR
);
321 static int stm32_qspi_get_mode(struct stm32_qspi
*qspi
, u8 buswidth
)
324 return CCR_BUSWIDTH_4
;
329 static int stm32_qspi_send(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
331 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
332 struct stm32_qspi_flash
*flash
= &qspi
->flash
[mem
->spi
->chip_select
];
333 u32 ccr
, cr
, addr_max
;
334 int timeout
, err
= 0;
336 dev_dbg(qspi
->dev
, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
337 op
->cmd
.opcode
, op
->cmd
.buswidth
, op
->addr
.buswidth
,
338 op
->dummy
.buswidth
, op
->data
.buswidth
,
339 op
->addr
.val
, op
->data
.nbytes
);
341 err
= stm32_qspi_wait_nobusy(qspi
);
345 addr_max
= op
->addr
.val
+ op
->data
.nbytes
+ 1;
347 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
348 if (addr_max
< qspi
->mm_size
&&
350 qspi
->fmode
= CCR_FMODE_MM
;
352 qspi
->fmode
= CCR_FMODE_INDR
;
354 qspi
->fmode
= CCR_FMODE_INDW
;
357 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
358 cr
&= ~CR_PRESC_MASK
& ~CR_FSEL
;
359 cr
|= FIELD_PREP(CR_PRESC_MASK
, flash
->presc
);
360 cr
|= FIELD_PREP(CR_FSEL
, flash
->cs
);
361 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
364 writel_relaxed(op
->data
.nbytes
- 1,
365 qspi
->io_base
+ QSPI_DLR
);
367 qspi
->fmode
= CCR_FMODE_INDW
;
370 ccr
|= FIELD_PREP(CCR_INST_MASK
, op
->cmd
.opcode
);
371 ccr
|= FIELD_PREP(CCR_IMODE_MASK
,
372 stm32_qspi_get_mode(qspi
, op
->cmd
.buswidth
));
374 if (op
->addr
.nbytes
) {
375 ccr
|= FIELD_PREP(CCR_ADMODE_MASK
,
376 stm32_qspi_get_mode(qspi
, op
->addr
.buswidth
));
377 ccr
|= FIELD_PREP(CCR_ADSIZE_MASK
, op
->addr
.nbytes
- 1);
380 if (op
->dummy
.buswidth
&& op
->dummy
.nbytes
)
381 ccr
|= FIELD_PREP(CCR_DCYC_MASK
,
382 op
->dummy
.nbytes
* 8 / op
->dummy
.buswidth
);
384 if (op
->data
.nbytes
) {
385 ccr
|= FIELD_PREP(CCR_DMODE_MASK
,
386 stm32_qspi_get_mode(qspi
, op
->data
.buswidth
));
389 writel_relaxed(ccr
, qspi
->io_base
+ QSPI_CCR
);
391 if (op
->addr
.nbytes
&& qspi
->fmode
!= CCR_FMODE_MM
)
392 writel_relaxed(op
->addr
.val
, qspi
->io_base
+ QSPI_AR
);
394 err
= stm32_qspi_tx(qspi
, op
);
399 * -read memory map: prefetching must be stopped if we read the last
400 * byte of device (device size - fifo size). like device size is not
401 * knows, the prefetching is always stop.
403 if (err
|| qspi
->fmode
== CCR_FMODE_MM
)
406 /* wait end of tx in indirect mode */
407 err
= stm32_qspi_wait_cmd(qspi
, op
);
414 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
) | CR_ABORT
;
415 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
417 /* wait clear of abort bit by hw */
418 timeout
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_CR
,
419 cr
, !(cr
& CR_ABORT
), 1,
420 STM32_ABT_TIMEOUT_US
);
422 writel_relaxed(FCR_CTCF
, qspi
->io_base
+ QSPI_FCR
);
425 dev_err(qspi
->dev
, "%s err:%d abort timeout:%d\n",
426 __func__
, err
, timeout
);
431 static int stm32_qspi_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
433 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
436 ret
= pm_runtime_get_sync(qspi
->dev
);
438 pm_runtime_put_noidle(qspi
->dev
);
442 mutex_lock(&qspi
->lock
);
443 ret
= stm32_qspi_send(mem
, op
);
444 mutex_unlock(&qspi
->lock
);
446 pm_runtime_mark_last_busy(qspi
->dev
);
447 pm_runtime_put_autosuspend(qspi
->dev
);
452 static int stm32_qspi_setup(struct spi_device
*spi
)
454 struct spi_controller
*ctrl
= spi
->master
;
455 struct stm32_qspi
*qspi
= spi_controller_get_devdata(ctrl
);
456 struct stm32_qspi_flash
*flash
;
463 if (!spi
->max_speed_hz
)
466 ret
= pm_runtime_get_sync(qspi
->dev
);
468 pm_runtime_put_noidle(qspi
->dev
);
472 presc
= DIV_ROUND_UP(qspi
->clk_rate
, spi
->max_speed_hz
) - 1;
474 flash
= &qspi
->flash
[spi
->chip_select
];
476 flash
->cs
= spi
->chip_select
;
477 flash
->presc
= presc
;
479 mutex_lock(&qspi
->lock
);
480 qspi
->cr_reg
= 3 << CR_FTHRES_SHIFT
| CR_SSHIFT
| CR_EN
;
481 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
483 /* set dcr fsize to max address */
484 qspi
->dcr_reg
= DCR_FSIZE_MASK
;
485 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
486 mutex_unlock(&qspi
->lock
);
488 pm_runtime_mark_last_busy(qspi
->dev
);
489 pm_runtime_put_autosuspend(qspi
->dev
);
494 static int stm32_qspi_dma_setup(struct stm32_qspi
*qspi
)
496 struct dma_slave_config dma_cfg
;
497 struct device
*dev
= qspi
->dev
;
500 memset(&dma_cfg
, 0, sizeof(dma_cfg
));
502 dma_cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
503 dma_cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
504 dma_cfg
.src_addr
= qspi
->phys_base
+ QSPI_DR
;
505 dma_cfg
.dst_addr
= qspi
->phys_base
+ QSPI_DR
;
506 dma_cfg
.src_maxburst
= 4;
507 dma_cfg
.dst_maxburst
= 4;
509 qspi
->dma_chrx
= dma_request_chan(dev
, "rx");
510 if (IS_ERR(qspi
->dma_chrx
)) {
511 ret
= PTR_ERR(qspi
->dma_chrx
);
512 qspi
->dma_chrx
= NULL
;
513 if (ret
== -EPROBE_DEFER
)
516 if (dmaengine_slave_config(qspi
->dma_chrx
, &dma_cfg
)) {
517 dev_err(dev
, "dma rx config failed\n");
518 dma_release_channel(qspi
->dma_chrx
);
519 qspi
->dma_chrx
= NULL
;
523 qspi
->dma_chtx
= dma_request_chan(dev
, "tx");
524 if (IS_ERR(qspi
->dma_chtx
)) {
525 ret
= PTR_ERR(qspi
->dma_chtx
);
526 qspi
->dma_chtx
= NULL
;
528 if (dmaengine_slave_config(qspi
->dma_chtx
, &dma_cfg
)) {
529 dev_err(dev
, "dma tx config failed\n");
530 dma_release_channel(qspi
->dma_chtx
);
531 qspi
->dma_chtx
= NULL
;
536 init_completion(&qspi
->dma_completion
);
538 if (ret
!= -EPROBE_DEFER
)
544 static void stm32_qspi_dma_free(struct stm32_qspi
*qspi
)
547 dma_release_channel(qspi
->dma_chtx
);
549 dma_release_channel(qspi
->dma_chrx
);
553 * no special host constraint, so use default spi_mem_default_supports_op
554 * to check supported mode.
556 static const struct spi_controller_mem_ops stm32_qspi_mem_ops
= {
557 .exec_op
= stm32_qspi_exec_op
,
560 static int stm32_qspi_probe(struct platform_device
*pdev
)
562 struct device
*dev
= &pdev
->dev
;
563 struct spi_controller
*ctrl
;
564 struct reset_control
*rstc
;
565 struct stm32_qspi
*qspi
;
566 struct resource
*res
;
569 ctrl
= spi_alloc_master(dev
, sizeof(*qspi
));
573 qspi
= spi_controller_get_devdata(ctrl
);
576 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi");
577 qspi
->io_base
= devm_ioremap_resource(dev
, res
);
578 if (IS_ERR(qspi
->io_base
)) {
579 ret
= PTR_ERR(qspi
->io_base
);
583 qspi
->phys_base
= res
->start
;
585 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi_mm");
586 qspi
->mm_base
= devm_ioremap_resource(dev
, res
);
587 if (IS_ERR(qspi
->mm_base
)) {
588 ret
= PTR_ERR(qspi
->mm_base
);
592 qspi
->mm_size
= resource_size(res
);
593 if (qspi
->mm_size
> STM32_QSPI_MAX_MMAP_SZ
) {
598 irq
= platform_get_irq(pdev
, 0);
604 ret
= devm_request_irq(dev
, irq
, stm32_qspi_irq
, 0,
605 dev_name(dev
), qspi
);
607 dev_err(dev
, "failed to request irq\n");
611 init_completion(&qspi
->data_completion
);
613 qspi
->clk
= devm_clk_get(dev
, NULL
);
614 if (IS_ERR(qspi
->clk
)) {
615 ret
= PTR_ERR(qspi
->clk
);
619 qspi
->clk_rate
= clk_get_rate(qspi
->clk
);
620 if (!qspi
->clk_rate
) {
625 ret
= clk_prepare_enable(qspi
->clk
);
627 dev_err(dev
, "can not enable the clock\n");
631 rstc
= devm_reset_control_get_exclusive(dev
, NULL
);
634 if (ret
== -EPROBE_DEFER
)
635 goto err_clk_disable
;
637 reset_control_assert(rstc
);
639 reset_control_deassert(rstc
);
643 platform_set_drvdata(pdev
, qspi
);
644 ret
= stm32_qspi_dma_setup(qspi
);
648 mutex_init(&qspi
->lock
);
650 ctrl
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
651 | SPI_TX_DUAL
| SPI_TX_QUAD
;
652 ctrl
->setup
= stm32_qspi_setup
;
654 ctrl
->mem_ops
= &stm32_qspi_mem_ops
;
655 ctrl
->num_chipselect
= STM32_QSPI_MAX_NORCHIP
;
656 ctrl
->dev
.of_node
= dev
->of_node
;
658 pm_runtime_set_autosuspend_delay(dev
, STM32_AUTOSUSPEND_DELAY
);
659 pm_runtime_use_autosuspend(dev
);
660 pm_runtime_set_active(dev
);
661 pm_runtime_enable(dev
);
662 pm_runtime_get_noresume(dev
);
664 ret
= devm_spi_register_master(dev
, ctrl
);
666 goto err_pm_runtime_free
;
668 pm_runtime_mark_last_busy(dev
);
669 pm_runtime_put_autosuspend(dev
);
674 pm_runtime_get_sync(qspi
->dev
);
676 writel_relaxed(0, qspi
->io_base
+ QSPI_CR
);
677 mutex_destroy(&qspi
->lock
);
678 pm_runtime_put_noidle(qspi
->dev
);
679 pm_runtime_disable(qspi
->dev
);
680 pm_runtime_set_suspended(qspi
->dev
);
681 pm_runtime_dont_use_autosuspend(qspi
->dev
);
683 stm32_qspi_dma_free(qspi
);
685 clk_disable_unprepare(qspi
->clk
);
687 spi_master_put(qspi
->ctrl
);
692 static int stm32_qspi_remove(struct platform_device
*pdev
)
694 struct stm32_qspi
*qspi
= platform_get_drvdata(pdev
);
696 pm_runtime_get_sync(qspi
->dev
);
698 writel_relaxed(0, qspi
->io_base
+ QSPI_CR
);
699 stm32_qspi_dma_free(qspi
);
700 mutex_destroy(&qspi
->lock
);
701 pm_runtime_put_noidle(qspi
->dev
);
702 pm_runtime_disable(qspi
->dev
);
703 pm_runtime_set_suspended(qspi
->dev
);
704 pm_runtime_dont_use_autosuspend(qspi
->dev
);
705 clk_disable_unprepare(qspi
->clk
);
710 static int __maybe_unused
stm32_qspi_runtime_suspend(struct device
*dev
)
712 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
714 clk_disable_unprepare(qspi
->clk
);
719 static int __maybe_unused
stm32_qspi_runtime_resume(struct device
*dev
)
721 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
723 return clk_prepare_enable(qspi
->clk
);
726 static int __maybe_unused
stm32_qspi_suspend(struct device
*dev
)
728 pinctrl_pm_select_sleep_state(dev
);
733 static int __maybe_unused
stm32_qspi_resume(struct device
*dev
)
735 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
737 pinctrl_pm_select_default_state(dev
);
738 clk_prepare_enable(qspi
->clk
);
740 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
741 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
743 pm_runtime_mark_last_busy(qspi
->dev
);
744 pm_runtime_put_autosuspend(qspi
->dev
);
749 static const struct dev_pm_ops stm32_qspi_pm_ops
= {
750 SET_RUNTIME_PM_OPS(stm32_qspi_runtime_suspend
,
751 stm32_qspi_runtime_resume
, NULL
)
752 SET_SYSTEM_SLEEP_PM_OPS(stm32_qspi_suspend
, stm32_qspi_resume
)
755 static const struct of_device_id stm32_qspi_match
[] = {
756 {.compatible
= "st,stm32f469-qspi"},
759 MODULE_DEVICE_TABLE(of
, stm32_qspi_match
);
761 static struct platform_driver stm32_qspi_driver
= {
762 .probe
= stm32_qspi_probe
,
763 .remove
= stm32_qspi_remove
,
765 .name
= "stm32-qspi",
766 .of_match_table
= stm32_qspi_match
,
767 .pm
= &stm32_qspi_pm_ops
,
770 module_platform_driver(stm32_qspi_driver
);
772 MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
773 MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
774 MODULE_LICENSE("GPL v2");