1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4 * Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
6 #include <linux/bitfield.h>
8 #include <linux/dmaengine.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/errno.h>
12 #include <linux/iopoll.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
17 #include <linux/of_device.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/platform_device.h>
20 #include <linux/reset.h>
21 #include <linux/sizes.h>
22 #include <linux/spi/spi-mem.h>
26 #define CR_ABORT BIT(1)
27 #define CR_DMAEN BIT(2)
28 #define CR_TCEN BIT(3)
29 #define CR_SSHIFT BIT(4)
31 #define CR_FSEL BIT(7)
32 #define CR_FTHRES_SHIFT 8
33 #define CR_TEIE BIT(16)
34 #define CR_TCIE BIT(17)
35 #define CR_FTIE BIT(18)
36 #define CR_SMIE BIT(19)
37 #define CR_TOIE BIT(20)
38 #define CR_PRESC_MASK GENMASK(31, 24)
41 #define DCR_FSIZE_MASK GENMASK(20, 16)
49 #define SR_BUSY BIT(5)
50 #define SR_FLEVEL_MASK GENMASK(13, 8)
53 #define FCR_CTEF BIT(0)
54 #define FCR_CTCF BIT(1)
59 #define CCR_INST_MASK GENMASK(7, 0)
60 #define CCR_IMODE_MASK GENMASK(9, 8)
61 #define CCR_ADMODE_MASK GENMASK(11, 10)
62 #define CCR_ADSIZE_MASK GENMASK(13, 12)
63 #define CCR_DCYC_MASK GENMASK(22, 18)
64 #define CCR_DMODE_MASK GENMASK(25, 24)
65 #define CCR_FMODE_MASK GENMASK(27, 26)
66 #define CCR_FMODE_INDW (0U << 26)
67 #define CCR_FMODE_INDR (1U << 26)
68 #define CCR_FMODE_APM (2U << 26)
69 #define CCR_FMODE_MM (3U << 26)
70 #define CCR_BUSWIDTH_0 0x0
71 #define CCR_BUSWIDTH_1 0x1
72 #define CCR_BUSWIDTH_2 0x2
73 #define CCR_BUSWIDTH_4 0x3
78 #define QSPI_PSMKR 0x24
79 #define QSPI_PSMAR 0x28
81 #define QSPI_LPTR 0x30
83 #define STM32_QSPI_MAX_MMAP_SZ SZ_256M
84 #define STM32_QSPI_MAX_NORCHIP 2
86 #define STM32_FIFO_TIMEOUT_US 30000
87 #define STM32_BUSY_TIMEOUT_US 100000
88 #define STM32_ABT_TIMEOUT_US 100000
89 #define STM32_COMP_TIMEOUT_MS 1000
91 struct stm32_qspi_flash
{
92 struct stm32_qspi
*qspi
;
99 struct spi_controller
*ctrl
;
100 phys_addr_t phys_base
;
101 void __iomem
*io_base
;
102 void __iomem
*mm_base
;
103 resource_size_t mm_size
;
106 struct stm32_qspi_flash flash
[STM32_QSPI_MAX_NORCHIP
];
107 struct completion data_completion
;
110 struct dma_chan
*dma_chtx
;
111 struct dma_chan
*dma_chrx
;
112 struct completion dma_completion
;
118 * to protect device configuration, could be different between
119 * 2 flash access (bk1, bk2)
124 static irqreturn_t
stm32_qspi_irq(int irq
, void *dev_id
)
126 struct stm32_qspi
*qspi
= (struct stm32_qspi
*)dev_id
;
129 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
131 if (sr
& (SR_TEF
| SR_TCF
)) {
133 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
134 cr
&= ~CR_TCIE
& ~CR_TEIE
;
135 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
136 complete(&qspi
->data_completion
);
142 static void stm32_qspi_read_fifo(u8
*val
, void __iomem
*addr
)
144 *val
= readb_relaxed(addr
);
147 static void stm32_qspi_write_fifo(u8
*val
, void __iomem
*addr
)
149 writeb_relaxed(*val
, addr
);
152 static int stm32_qspi_tx_poll(struct stm32_qspi
*qspi
,
153 const struct spi_mem_op
*op
)
155 void (*tx_fifo
)(u8
*val
, void __iomem
*addr
);
156 u32 len
= op
->data
.nbytes
, sr
;
160 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
161 tx_fifo
= stm32_qspi_read_fifo
;
162 buf
= op
->data
.buf
.in
;
165 tx_fifo
= stm32_qspi_write_fifo
;
166 buf
= (u8
*)op
->data
.buf
.out
;
170 ret
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
,
171 sr
, (sr
& SR_FTF
), 1,
172 STM32_FIFO_TIMEOUT_US
);
174 dev_err(qspi
->dev
, "fifo timeout (len:%d stat:%#x)\n",
178 tx_fifo(buf
++, qspi
->io_base
+ QSPI_DR
);
184 static int stm32_qspi_tx_mm(struct stm32_qspi
*qspi
,
185 const struct spi_mem_op
*op
)
187 memcpy_fromio(op
->data
.buf
.in
, qspi
->mm_base
+ op
->addr
.val
,
192 static void stm32_qspi_dma_callback(void *arg
)
194 struct completion
*dma_completion
= arg
;
196 complete(dma_completion
);
199 static int stm32_qspi_tx_dma(struct stm32_qspi
*qspi
,
200 const struct spi_mem_op
*op
)
202 struct dma_async_tx_descriptor
*desc
;
203 enum dma_transfer_direction dma_dir
;
204 struct dma_chan
*dma_ch
;
210 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
211 dma_dir
= DMA_DEV_TO_MEM
;
212 dma_ch
= qspi
->dma_chrx
;
214 dma_dir
= DMA_MEM_TO_DEV
;
215 dma_ch
= qspi
->dma_chtx
;
219 * spi_map_buf return -EINVAL if the buffer is not DMA-able
220 * (DMA-able: in vmalloc | kmap | virt_addr_valid)
222 err
= spi_controller_dma_map_mem_op_data(qspi
->ctrl
, op
, &sgt
);
226 desc
= dmaengine_prep_slave_sg(dma_ch
, sgt
.sgl
, sgt
.nents
,
227 dma_dir
, DMA_PREP_INTERRUPT
);
233 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
235 reinit_completion(&qspi
->dma_completion
);
236 desc
->callback
= stm32_qspi_dma_callback
;
237 desc
->callback_param
= &qspi
->dma_completion
;
238 cookie
= dmaengine_submit(desc
);
239 err
= dma_submit_error(cookie
);
243 dma_async_issue_pending(dma_ch
);
245 writel_relaxed(cr
| CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
247 t_out
= sgt
.nents
* STM32_COMP_TIMEOUT_MS
;
248 if (!wait_for_completion_timeout(&qspi
->dma_completion
,
249 msecs_to_jiffies(t_out
)))
253 dmaengine_terminate_all(dma_ch
);
256 writel_relaxed(cr
& ~CR_DMAEN
, qspi
->io_base
+ QSPI_CR
);
258 spi_controller_dma_unmap_mem_op_data(qspi
->ctrl
, op
, &sgt
);
263 static int stm32_qspi_tx(struct stm32_qspi
*qspi
, const struct spi_mem_op
*op
)
265 if (!op
->data
.nbytes
)
268 if (qspi
->fmode
== CCR_FMODE_MM
)
269 return stm32_qspi_tx_mm(qspi
, op
);
270 else if ((op
->data
.dir
== SPI_MEM_DATA_IN
&& qspi
->dma_chrx
) ||
271 (op
->data
.dir
== SPI_MEM_DATA_OUT
&& qspi
->dma_chtx
))
272 if (!stm32_qspi_tx_dma(qspi
, op
))
275 return stm32_qspi_tx_poll(qspi
, op
);
278 static int stm32_qspi_wait_nobusy(struct stm32_qspi
*qspi
)
282 return readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_SR
, sr
,
284 STM32_BUSY_TIMEOUT_US
);
287 static int stm32_qspi_wait_cmd(struct stm32_qspi
*qspi
,
288 const struct spi_mem_op
*op
)
293 if (!op
->data
.nbytes
)
294 return stm32_qspi_wait_nobusy(qspi
);
296 if (readl_relaxed(qspi
->io_base
+ QSPI_SR
) & SR_TCF
)
299 reinit_completion(&qspi
->data_completion
);
300 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
301 writel_relaxed(cr
| CR_TCIE
| CR_TEIE
, qspi
->io_base
+ QSPI_CR
);
303 if (!wait_for_completion_timeout(&qspi
->data_completion
,
304 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS
))) {
307 sr
= readl_relaxed(qspi
->io_base
+ QSPI_SR
);
314 writel_relaxed(FCR_CTCF
| FCR_CTEF
, qspi
->io_base
+ QSPI_FCR
);
319 static int stm32_qspi_get_mode(struct stm32_qspi
*qspi
, u8 buswidth
)
322 return CCR_BUSWIDTH_4
;
327 static int stm32_qspi_send(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
329 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
330 struct stm32_qspi_flash
*flash
= &qspi
->flash
[mem
->spi
->chip_select
];
331 u32 ccr
, cr
, addr_max
;
332 int timeout
, err
= 0;
334 dev_dbg(qspi
->dev
, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
335 op
->cmd
.opcode
, op
->cmd
.buswidth
, op
->addr
.buswidth
,
336 op
->dummy
.buswidth
, op
->data
.buswidth
,
337 op
->addr
.val
, op
->data
.nbytes
);
339 err
= stm32_qspi_wait_nobusy(qspi
);
343 addr_max
= op
->addr
.val
+ op
->data
.nbytes
+ 1;
345 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
346 if (addr_max
< qspi
->mm_size
&&
348 qspi
->fmode
= CCR_FMODE_MM
;
350 qspi
->fmode
= CCR_FMODE_INDR
;
352 qspi
->fmode
= CCR_FMODE_INDW
;
355 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
);
356 cr
&= ~CR_PRESC_MASK
& ~CR_FSEL
;
357 cr
|= FIELD_PREP(CR_PRESC_MASK
, flash
->presc
);
358 cr
|= FIELD_PREP(CR_FSEL
, flash
->cs
);
359 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
362 writel_relaxed(op
->data
.nbytes
- 1,
363 qspi
->io_base
+ QSPI_DLR
);
365 qspi
->fmode
= CCR_FMODE_INDW
;
368 ccr
|= FIELD_PREP(CCR_INST_MASK
, op
->cmd
.opcode
);
369 ccr
|= FIELD_PREP(CCR_IMODE_MASK
,
370 stm32_qspi_get_mode(qspi
, op
->cmd
.buswidth
));
372 if (op
->addr
.nbytes
) {
373 ccr
|= FIELD_PREP(CCR_ADMODE_MASK
,
374 stm32_qspi_get_mode(qspi
, op
->addr
.buswidth
));
375 ccr
|= FIELD_PREP(CCR_ADSIZE_MASK
, op
->addr
.nbytes
- 1);
378 if (op
->dummy
.buswidth
&& op
->dummy
.nbytes
)
379 ccr
|= FIELD_PREP(CCR_DCYC_MASK
,
380 op
->dummy
.nbytes
* 8 / op
->dummy
.buswidth
);
382 if (op
->data
.nbytes
) {
383 ccr
|= FIELD_PREP(CCR_DMODE_MASK
,
384 stm32_qspi_get_mode(qspi
, op
->data
.buswidth
));
387 writel_relaxed(ccr
, qspi
->io_base
+ QSPI_CCR
);
389 if (op
->addr
.nbytes
&& qspi
->fmode
!= CCR_FMODE_MM
)
390 writel_relaxed(op
->addr
.val
, qspi
->io_base
+ QSPI_AR
);
392 err
= stm32_qspi_tx(qspi
, op
);
397 * -read memory map: prefetching must be stopped if we read the last
398 * byte of device (device size - fifo size). like device size is not
399 * knows, the prefetching is always stop.
401 if (err
|| qspi
->fmode
== CCR_FMODE_MM
)
404 /* wait end of tx in indirect mode */
405 err
= stm32_qspi_wait_cmd(qspi
, op
);
412 cr
= readl_relaxed(qspi
->io_base
+ QSPI_CR
) | CR_ABORT
;
413 writel_relaxed(cr
, qspi
->io_base
+ QSPI_CR
);
415 /* wait clear of abort bit by hw */
416 timeout
= readl_relaxed_poll_timeout_atomic(qspi
->io_base
+ QSPI_CR
,
417 cr
, !(cr
& CR_ABORT
), 1,
418 STM32_ABT_TIMEOUT_US
);
420 writel_relaxed(FCR_CTCF
, qspi
->io_base
+ QSPI_FCR
);
423 dev_err(qspi
->dev
, "%s err:%d abort timeout:%d\n",
424 __func__
, err
, timeout
);
429 static int stm32_qspi_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
431 struct stm32_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->master
);
434 mutex_lock(&qspi
->lock
);
435 ret
= stm32_qspi_send(mem
, op
);
436 mutex_unlock(&qspi
->lock
);
441 static int stm32_qspi_setup(struct spi_device
*spi
)
443 struct spi_controller
*ctrl
= spi
->master
;
444 struct stm32_qspi
*qspi
= spi_controller_get_devdata(ctrl
);
445 struct stm32_qspi_flash
*flash
;
451 if (!spi
->max_speed_hz
)
454 presc
= DIV_ROUND_UP(qspi
->clk_rate
, spi
->max_speed_hz
) - 1;
456 flash
= &qspi
->flash
[spi
->chip_select
];
458 flash
->cs
= spi
->chip_select
;
459 flash
->presc
= presc
;
461 mutex_lock(&qspi
->lock
);
462 qspi
->cr_reg
= 3 << CR_FTHRES_SHIFT
| CR_SSHIFT
| CR_EN
;
463 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
465 /* set dcr fsize to max address */
466 qspi
->dcr_reg
= DCR_FSIZE_MASK
;
467 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
468 mutex_unlock(&qspi
->lock
);
473 static int stm32_qspi_dma_setup(struct stm32_qspi
*qspi
)
475 struct dma_slave_config dma_cfg
;
476 struct device
*dev
= qspi
->dev
;
479 memset(&dma_cfg
, 0, sizeof(dma_cfg
));
481 dma_cfg
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
482 dma_cfg
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
483 dma_cfg
.src_addr
= qspi
->phys_base
+ QSPI_DR
;
484 dma_cfg
.dst_addr
= qspi
->phys_base
+ QSPI_DR
;
485 dma_cfg
.src_maxburst
= 4;
486 dma_cfg
.dst_maxburst
= 4;
488 qspi
->dma_chrx
= dma_request_chan(dev
, "rx");
489 if (IS_ERR(qspi
->dma_chrx
)) {
490 ret
= PTR_ERR(qspi
->dma_chrx
);
491 qspi
->dma_chrx
= NULL
;
492 if (ret
== -EPROBE_DEFER
)
495 if (dmaengine_slave_config(qspi
->dma_chrx
, &dma_cfg
)) {
496 dev_err(dev
, "dma rx config failed\n");
497 dma_release_channel(qspi
->dma_chrx
);
498 qspi
->dma_chrx
= NULL
;
502 qspi
->dma_chtx
= dma_request_chan(dev
, "tx");
503 if (IS_ERR(qspi
->dma_chtx
)) {
504 ret
= PTR_ERR(qspi
->dma_chtx
);
505 qspi
->dma_chtx
= NULL
;
507 if (dmaengine_slave_config(qspi
->dma_chtx
, &dma_cfg
)) {
508 dev_err(dev
, "dma tx config failed\n");
509 dma_release_channel(qspi
->dma_chtx
);
510 qspi
->dma_chtx
= NULL
;
515 init_completion(&qspi
->dma_completion
);
517 if (ret
!= -EPROBE_DEFER
)
523 static void stm32_qspi_dma_free(struct stm32_qspi
*qspi
)
526 dma_release_channel(qspi
->dma_chtx
);
528 dma_release_channel(qspi
->dma_chrx
);
532 * no special host constraint, so use default spi_mem_default_supports_op
533 * to check supported mode.
535 static const struct spi_controller_mem_ops stm32_qspi_mem_ops
= {
536 .exec_op
= stm32_qspi_exec_op
,
539 static void stm32_qspi_release(struct stm32_qspi
*qspi
)
542 writel_relaxed(0, qspi
->io_base
+ QSPI_CR
);
543 stm32_qspi_dma_free(qspi
);
544 mutex_destroy(&qspi
->lock
);
545 clk_disable_unprepare(qspi
->clk
);
548 static int stm32_qspi_probe(struct platform_device
*pdev
)
550 struct device
*dev
= &pdev
->dev
;
551 struct spi_controller
*ctrl
;
552 struct reset_control
*rstc
;
553 struct stm32_qspi
*qspi
;
554 struct resource
*res
;
557 ctrl
= spi_alloc_master(dev
, sizeof(*qspi
));
561 qspi
= spi_controller_get_devdata(ctrl
);
564 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi");
565 qspi
->io_base
= devm_ioremap_resource(dev
, res
);
566 if (IS_ERR(qspi
->io_base
)) {
567 ret
= PTR_ERR(qspi
->io_base
);
571 qspi
->phys_base
= res
->start
;
573 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi_mm");
574 qspi
->mm_base
= devm_ioremap_resource(dev
, res
);
575 if (IS_ERR(qspi
->mm_base
)) {
576 ret
= PTR_ERR(qspi
->mm_base
);
580 qspi
->mm_size
= resource_size(res
);
581 if (qspi
->mm_size
> STM32_QSPI_MAX_MMAP_SZ
) {
586 irq
= platform_get_irq(pdev
, 0);
592 ret
= devm_request_irq(dev
, irq
, stm32_qspi_irq
, 0,
593 dev_name(dev
), qspi
);
595 dev_err(dev
, "failed to request irq\n");
599 init_completion(&qspi
->data_completion
);
601 qspi
->clk
= devm_clk_get(dev
, NULL
);
602 if (IS_ERR(qspi
->clk
)) {
603 ret
= PTR_ERR(qspi
->clk
);
607 qspi
->clk_rate
= clk_get_rate(qspi
->clk
);
608 if (!qspi
->clk_rate
) {
613 ret
= clk_prepare_enable(qspi
->clk
);
615 dev_err(dev
, "can not enable the clock\n");
619 rstc
= devm_reset_control_get_exclusive(dev
, NULL
);
622 if (ret
== -EPROBE_DEFER
)
623 goto err_qspi_release
;
625 reset_control_assert(rstc
);
627 reset_control_deassert(rstc
);
631 platform_set_drvdata(pdev
, qspi
);
632 ret
= stm32_qspi_dma_setup(qspi
);
634 goto err_qspi_release
;
636 mutex_init(&qspi
->lock
);
638 ctrl
->mode_bits
= SPI_RX_DUAL
| SPI_RX_QUAD
639 | SPI_TX_DUAL
| SPI_TX_QUAD
;
640 ctrl
->setup
= stm32_qspi_setup
;
642 ctrl
->mem_ops
= &stm32_qspi_mem_ops
;
643 ctrl
->num_chipselect
= STM32_QSPI_MAX_NORCHIP
;
644 ctrl
->dev
.of_node
= dev
->of_node
;
646 ret
= devm_spi_register_master(dev
, ctrl
);
651 stm32_qspi_release(qspi
);
653 spi_master_put(qspi
->ctrl
);
658 static int stm32_qspi_remove(struct platform_device
*pdev
)
660 struct stm32_qspi
*qspi
= platform_get_drvdata(pdev
);
662 stm32_qspi_release(qspi
);
666 static int __maybe_unused
stm32_qspi_suspend(struct device
*dev
)
668 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
670 clk_disable_unprepare(qspi
->clk
);
671 pinctrl_pm_select_sleep_state(dev
);
676 static int __maybe_unused
stm32_qspi_resume(struct device
*dev
)
678 struct stm32_qspi
*qspi
= dev_get_drvdata(dev
);
680 pinctrl_pm_select_default_state(dev
);
681 clk_prepare_enable(qspi
->clk
);
683 writel_relaxed(qspi
->cr_reg
, qspi
->io_base
+ QSPI_CR
);
684 writel_relaxed(qspi
->dcr_reg
, qspi
->io_base
+ QSPI_DCR
);
689 static SIMPLE_DEV_PM_OPS(stm32_qspi_pm_ops
, stm32_qspi_suspend
, stm32_qspi_resume
);
691 static const struct of_device_id stm32_qspi_match
[] = {
692 {.compatible
= "st,stm32f469-qspi"},
695 MODULE_DEVICE_TABLE(of
, stm32_qspi_match
);
697 static struct platform_driver stm32_qspi_driver
= {
698 .probe
= stm32_qspi_probe
,
699 .remove
= stm32_qspi_remove
,
701 .name
= "stm32-qspi",
702 .of_match_table
= stm32_qspi_match
,
703 .pm
= &stm32_qspi_pm_ops
,
706 module_platform_driver(stm32_qspi_driver
);
708 MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
709 MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
710 MODULE_LICENSE("GPL v2");