1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
6 * Author: Sourav Poddar <sourav.poddar@ti.com>
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmaengine.h>
17 #include <linux/omap-dma.h>
18 #include <linux/platform_device.h>
19 #include <linux/err.h>
20 #include <linux/clk.h>
22 #include <linux/slab.h>
23 #include <linux/pm_runtime.h>
25 #include <linux/pinctrl/consumer.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/regmap.h>
28 #include <linux/sizes.h>
30 #include <linux/spi/spi.h>
31 #include <linux/spi/spi-mem.h>
38 struct completion transfer_complete
;
40 /* list synchronization */
41 struct mutex list_lock
;
43 struct spi_controller
*host
;
45 void __iomem
*mmap_base
;
47 struct regmap
*ctrl_base
;
48 unsigned int ctrl_reg
;
52 struct ti_qspi_regs ctx_reg
;
54 dma_addr_t mmap_phys_base
;
55 dma_addr_t rx_bb_dma_addr
;
57 struct dma_chan
*rx_chan
;
66 #define QSPI_PID (0x0)
67 #define QSPI_SYSCONFIG (0x10)
68 #define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
69 #define QSPI_SPI_DC_REG (0x44)
70 #define QSPI_SPI_CMD_REG (0x48)
71 #define QSPI_SPI_STATUS_REG (0x4c)
72 #define QSPI_SPI_DATA_REG (0x50)
73 #define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
74 #define QSPI_SPI_SWITCH_REG (0x64)
75 #define QSPI_SPI_DATA_REG_1 (0x68)
76 #define QSPI_SPI_DATA_REG_2 (0x6c)
77 #define QSPI_SPI_DATA_REG_3 (0x70)
79 #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
82 #define QSPI_CLK_EN (1 << 31)
83 #define QSPI_CLK_DIV_MAX 0xffff
86 #define QSPI_EN_CS(n) (n << 28)
87 #define QSPI_WLEN(n) ((n - 1) << 19)
88 #define QSPI_3_PIN (1 << 18)
89 #define QSPI_RD_SNGL (1 << 16)
90 #define QSPI_WR_SNGL (2 << 16)
91 #define QSPI_RD_DUAL (3 << 16)
92 #define QSPI_RD_QUAD (7 << 16)
93 #define QSPI_INVAL (4 << 16)
94 #define QSPI_FLEN(n) ((n - 1) << 0)
95 #define QSPI_WLEN_MAX_BITS 128
96 #define QSPI_WLEN_MAX_BYTES 16
97 #define QSPI_WLEN_MASK QSPI_WLEN(QSPI_WLEN_MAX_BITS)
104 #define QSPI_DD(m, n) (m << (3 + n * 8))
105 #define QSPI_CKPHA(n) (1 << (2 + n * 8))
106 #define QSPI_CSPOL(n) (1 << (1 + n * 8))
107 #define QSPI_CKPOL(n) (1 << (n * 8))
109 #define QSPI_FRAME 4096
111 #define QSPI_AUTOSUSPEND_TIMEOUT 2000
113 #define MEM_CS_EN(n) ((n + 1) << 8)
114 #define MEM_CS_MASK (7 << 8)
116 #define MM_SWITCH 0x1
118 #define QSPI_SETUP_RD_NORMAL (0x0 << 12)
119 #define QSPI_SETUP_RD_DUAL (0x1 << 12)
120 #define QSPI_SETUP_RD_QUAD (0x3 << 12)
121 #define QSPI_SETUP_ADDR_SHIFT 8
122 #define QSPI_SETUP_DUMMY_SHIFT 10
124 #define QSPI_DMA_BUFFER_SIZE SZ_64K
126 static inline unsigned long ti_qspi_read(struct ti_qspi
*qspi
,
129 return readl(qspi
->base
+ reg
);
132 static inline void ti_qspi_write(struct ti_qspi
*qspi
,
133 unsigned long val
, unsigned long reg
)
135 writel(val
, qspi
->base
+ reg
);
138 static int ti_qspi_setup(struct spi_device
*spi
)
140 struct ti_qspi
*qspi
= spi_controller_get_devdata(spi
->controller
);
143 if (spi
->controller
->busy
) {
144 dev_dbg(qspi
->dev
, "host busy doing other transfers\n");
148 if (!qspi
->host
->max_speed_hz
) {
149 dev_err(qspi
->dev
, "spi max frequency not defined\n");
153 spi
->max_speed_hz
= min(spi
->max_speed_hz
, qspi
->host
->max_speed_hz
);
155 ret
= pm_runtime_resume_and_get(qspi
->dev
);
157 dev_err(qspi
->dev
, "pm_runtime_get_sync() failed\n");
161 pm_runtime_mark_last_busy(qspi
->dev
);
162 ret
= pm_runtime_put_autosuspend(qspi
->dev
);
164 dev_err(qspi
->dev
, "pm_runtime_put_autosuspend() failed\n");
171 static void ti_qspi_setup_clk(struct ti_qspi
*qspi
, u32 speed_hz
)
173 struct ti_qspi_regs
*ctx_reg
= &qspi
->ctx_reg
;
175 u32 clk_ctrl_reg
, clk_rate
, clk_ctrl_new
;
177 clk_rate
= clk_get_rate(qspi
->fclk
);
178 clk_div
= DIV_ROUND_UP(clk_rate
, speed_hz
) - 1;
179 clk_div
= clamp(clk_div
, 0, QSPI_CLK_DIV_MAX
);
180 dev_dbg(qspi
->dev
, "hz: %d, clock divider %d\n", speed_hz
, clk_div
);
182 pm_runtime_resume_and_get(qspi
->dev
);
184 clk_ctrl_new
= QSPI_CLK_EN
| clk_div
;
185 if (ctx_reg
->clkctrl
!= clk_ctrl_new
) {
186 clk_ctrl_reg
= ti_qspi_read(qspi
, QSPI_SPI_CLOCK_CNTRL_REG
);
188 clk_ctrl_reg
&= ~QSPI_CLK_EN
;
191 ti_qspi_write(qspi
, clk_ctrl_reg
, QSPI_SPI_CLOCK_CNTRL_REG
);
194 ti_qspi_write(qspi
, clk_ctrl_new
, QSPI_SPI_CLOCK_CNTRL_REG
);
195 ctx_reg
->clkctrl
= clk_ctrl_new
;
198 pm_runtime_mark_last_busy(qspi
->dev
);
199 pm_runtime_put_autosuspend(qspi
->dev
);
202 static void ti_qspi_restore_ctx(struct ti_qspi
*qspi
)
204 struct ti_qspi_regs
*ctx_reg
= &qspi
->ctx_reg
;
206 ti_qspi_write(qspi
, ctx_reg
->clkctrl
, QSPI_SPI_CLOCK_CNTRL_REG
);
209 static inline u32
qspi_is_busy(struct ti_qspi
*qspi
)
212 unsigned long timeout
= jiffies
+ QSPI_COMPLETION_TIMEOUT
;
214 stat
= ti_qspi_read(qspi
, QSPI_SPI_STATUS_REG
);
215 while ((stat
& BUSY
) && time_after(timeout
, jiffies
)) {
217 stat
= ti_qspi_read(qspi
, QSPI_SPI_STATUS_REG
);
220 WARN(stat
& BUSY
, "qspi busy\n");
224 static inline int ti_qspi_poll_wc(struct ti_qspi
*qspi
)
227 unsigned long timeout
= jiffies
+ QSPI_COMPLETION_TIMEOUT
;
230 stat
= ti_qspi_read(qspi
, QSPI_SPI_STATUS_REG
);
234 } while (time_after(timeout
, jiffies
));
236 stat
= ti_qspi_read(qspi
, QSPI_SPI_STATUS_REG
);
242 static int qspi_write_msg(struct ti_qspi
*qspi
, struct spi_transfer
*t
,
251 cmd
= qspi
->cmd
| QSPI_WR_SNGL
;
252 wlen
= t
->bits_per_word
>> 3; /* in bytes */
256 if (qspi_is_busy(qspi
))
261 dev_dbg(qspi
->dev
, "tx cmd %08x dc %08x data %02x\n",
262 cmd
, qspi
->dc
, *txbuf
);
263 if (count
>= QSPI_WLEN_MAX_BYTES
) {
264 u32
*txp
= (u32
*)txbuf
;
266 data
= cpu_to_be32(*txp
++);
267 writel(data
, qspi
->base
+
268 QSPI_SPI_DATA_REG_3
);
269 data
= cpu_to_be32(*txp
++);
270 writel(data
, qspi
->base
+
271 QSPI_SPI_DATA_REG_2
);
272 data
= cpu_to_be32(*txp
++);
273 writel(data
, qspi
->base
+
274 QSPI_SPI_DATA_REG_1
);
275 data
= cpu_to_be32(*txp
++);
276 writel(data
, qspi
->base
+
278 xfer_len
= QSPI_WLEN_MAX_BYTES
;
279 cmd
|= QSPI_WLEN(QSPI_WLEN_MAX_BITS
);
281 writeb(*txbuf
, qspi
->base
+ QSPI_SPI_DATA_REG
);
282 cmd
= qspi
->cmd
| QSPI_WR_SNGL
;
284 cmd
|= QSPI_WLEN(wlen
);
288 dev_dbg(qspi
->dev
, "tx cmd %08x dc %08x data %04x\n",
289 cmd
, qspi
->dc
, *txbuf
);
290 writew(*((u16
*)txbuf
), qspi
->base
+ QSPI_SPI_DATA_REG
);
293 dev_dbg(qspi
->dev
, "tx cmd %08x dc %08x data %08x\n",
294 cmd
, qspi
->dc
, *txbuf
);
295 writel(*((u32
*)txbuf
), qspi
->base
+ QSPI_SPI_DATA_REG
);
299 ti_qspi_write(qspi
, cmd
, QSPI_SPI_CMD_REG
);
300 if (ti_qspi_poll_wc(qspi
)) {
301 dev_err(qspi
->dev
, "write timed out\n");
311 static int qspi_read_msg(struct ti_qspi
*qspi
, struct spi_transfer
*t
,
322 switch (t
->rx_nbits
) {
333 wlen
= t
->bits_per_word
>> 3; /* in bytes */
337 dev_dbg(qspi
->dev
, "rx cmd %08x dc %08x\n", cmd
, qspi
->dc
);
338 if (qspi_is_busy(qspi
))
344 * Optimize the 8-bit words transfers, as used by
345 * the SPI flash devices.
347 if (count
>= QSPI_WLEN_MAX_BYTES
) {
348 rxlen
= QSPI_WLEN_MAX_BYTES
;
350 rxlen
= min(count
, 4);
352 rx_wlen
= rxlen
<< 3;
353 cmd
&= ~QSPI_WLEN_MASK
;
354 cmd
|= QSPI_WLEN(rx_wlen
);
361 ti_qspi_write(qspi
, cmd
, QSPI_SPI_CMD_REG
);
362 if (ti_qspi_poll_wc(qspi
)) {
363 dev_err(qspi
->dev
, "read timed out\n");
370 * Optimize the 8-bit words transfers, as used by
371 * the SPI flash devices.
373 if (count
>= QSPI_WLEN_MAX_BYTES
) {
374 u32
*rxp
= (u32
*) rxbuf
;
375 rx
= readl(qspi
->base
+ QSPI_SPI_DATA_REG_3
);
376 *rxp
++ = be32_to_cpu(rx
);
377 rx
= readl(qspi
->base
+ QSPI_SPI_DATA_REG_2
);
378 *rxp
++ = be32_to_cpu(rx
);
379 rx
= readl(qspi
->base
+ QSPI_SPI_DATA_REG_1
);
380 *rxp
++ = be32_to_cpu(rx
);
381 rx
= readl(qspi
->base
+ QSPI_SPI_DATA_REG
);
382 *rxp
++ = be32_to_cpu(rx
);
385 rx
= readl(qspi
->base
+ QSPI_SPI_DATA_REG
);
387 *rxp
++ = rx
>> (rx_wlen
- 8);
389 *rxp
++ = rx
>> (rx_wlen
- 16);
391 *rxp
++ = rx
>> (rx_wlen
- 24);
397 *((u16
*)rxbuf
) = readw(qspi
->base
+ QSPI_SPI_DATA_REG
);
400 *((u32
*)rxbuf
) = readl(qspi
->base
+ QSPI_SPI_DATA_REG
);
410 static int qspi_transfer_msg(struct ti_qspi
*qspi
, struct spi_transfer
*t
,
416 ret
= qspi_write_msg(qspi
, t
, count
);
418 dev_dbg(qspi
->dev
, "Error while writing\n");
424 ret
= qspi_read_msg(qspi
, t
, count
);
426 dev_dbg(qspi
->dev
, "Error while reading\n");
434 static void ti_qspi_dma_callback(void *param
)
436 struct ti_qspi
*qspi
= param
;
438 complete(&qspi
->transfer_complete
);
441 static int ti_qspi_dma_xfer(struct ti_qspi
*qspi
, dma_addr_t dma_dst
,
442 dma_addr_t dma_src
, size_t len
)
444 struct dma_chan
*chan
= qspi
->rx_chan
;
446 enum dma_ctrl_flags flags
= DMA_CTRL_ACK
| DMA_PREP_INTERRUPT
;
447 struct dma_async_tx_descriptor
*tx
;
449 unsigned long time_left
;
451 tx
= dmaengine_prep_dma_memcpy(chan
, dma_dst
, dma_src
, len
, flags
);
453 dev_err(qspi
->dev
, "device_prep_dma_memcpy error\n");
457 tx
->callback
= ti_qspi_dma_callback
;
458 tx
->callback_param
= qspi
;
459 cookie
= tx
->tx_submit(tx
);
460 reinit_completion(&qspi
->transfer_complete
);
462 ret
= dma_submit_error(cookie
);
464 dev_err(qspi
->dev
, "dma_submit_error %d\n", cookie
);
468 dma_async_issue_pending(chan
);
469 time_left
= wait_for_completion_timeout(&qspi
->transfer_complete
,
470 msecs_to_jiffies(len
));
471 if (time_left
== 0) {
472 dmaengine_terminate_sync(chan
);
473 dev_err(qspi
->dev
, "DMA wait_for_completion_timeout\n");
480 static int ti_qspi_dma_bounce_buffer(struct ti_qspi
*qspi
, loff_t offs
,
481 void *to
, size_t readsize
)
483 dma_addr_t dma_src
= qspi
->mmap_phys_base
+ offs
;
487 * Use bounce buffer as FS like jffs2, ubifs may pass
488 * buffers that does not belong to kernel lowmem region.
490 while (readsize
!= 0) {
491 size_t xfer_len
= min_t(size_t, QSPI_DMA_BUFFER_SIZE
,
494 ret
= ti_qspi_dma_xfer(qspi
, qspi
->rx_bb_dma_addr
,
498 memcpy(to
, qspi
->rx_bb_addr
, xfer_len
);
499 readsize
-= xfer_len
;
507 static int ti_qspi_dma_xfer_sg(struct ti_qspi
*qspi
, struct sg_table rx_sg
,
510 struct scatterlist
*sg
;
511 dma_addr_t dma_src
= qspi
->mmap_phys_base
+ from
;
515 for_each_sg(rx_sg
.sgl
, sg
, rx_sg
.nents
, i
) {
516 dma_dst
= sg_dma_address(sg
);
517 len
= sg_dma_len(sg
);
518 ret
= ti_qspi_dma_xfer(qspi
, dma_dst
, dma_src
, len
);
527 static void ti_qspi_enable_memory_map(struct spi_device
*spi
)
529 struct ti_qspi
*qspi
= spi_controller_get_devdata(spi
->controller
);
531 ti_qspi_write(qspi
, MM_SWITCH
, QSPI_SPI_SWITCH_REG
);
532 if (qspi
->ctrl_base
) {
533 regmap_update_bits(qspi
->ctrl_base
, qspi
->ctrl_reg
,
535 MEM_CS_EN(spi_get_chipselect(spi
, 0)));
537 qspi
->mmap_enabled
= true;
538 qspi
->current_cs
= spi_get_chipselect(spi
, 0);
541 static void ti_qspi_disable_memory_map(struct spi_device
*spi
)
543 struct ti_qspi
*qspi
= spi_controller_get_devdata(spi
->controller
);
545 ti_qspi_write(qspi
, 0, QSPI_SPI_SWITCH_REG
);
547 regmap_update_bits(qspi
->ctrl_base
, qspi
->ctrl_reg
,
549 qspi
->mmap_enabled
= false;
550 qspi
->current_cs
= -1;
553 static void ti_qspi_setup_mmap_read(struct spi_device
*spi
, u8 opcode
,
554 u8 data_nbits
, u8 addr_width
,
557 struct ti_qspi
*qspi
= spi_controller_get_devdata(spi
->controller
);
560 switch (data_nbits
) {
562 memval
|= QSPI_SETUP_RD_QUAD
;
565 memval
|= QSPI_SETUP_RD_DUAL
;
568 memval
|= QSPI_SETUP_RD_NORMAL
;
571 memval
|= ((addr_width
- 1) << QSPI_SETUP_ADDR_SHIFT
|
572 dummy_bytes
<< QSPI_SETUP_DUMMY_SHIFT
);
573 ti_qspi_write(qspi
, memval
,
574 QSPI_SPI_SETUP_REG(spi_get_chipselect(spi
, 0)));
577 static int ti_qspi_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
579 struct ti_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->controller
);
582 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
583 if (op
->addr
.val
< qspi
->mmap_size
) {
584 /* Limit MMIO to the mmaped region */
585 if (op
->addr
.val
+ op
->data
.nbytes
> qspi
->mmap_size
) {
586 max_len
= qspi
->mmap_size
- op
->addr
.val
;
587 op
->data
.nbytes
= min((size_t) op
->data
.nbytes
,
592 * Use fallback mode (SW generated transfers) above the
594 * Adjust size to comply with the QSPI max frame length.
596 max_len
= QSPI_FRAME
;
597 max_len
-= 1 + op
->addr
.nbytes
+ op
->dummy
.nbytes
;
598 op
->data
.nbytes
= min((size_t) op
->data
.nbytes
,
606 static int ti_qspi_exec_mem_op(struct spi_mem
*mem
,
607 const struct spi_mem_op
*op
)
609 struct ti_qspi
*qspi
= spi_controller_get_devdata(mem
->spi
->controller
);
613 /* Only optimize read path. */
614 if (!op
->data
.nbytes
|| op
->data
.dir
!= SPI_MEM_DATA_IN
||
615 !op
->addr
.nbytes
|| op
->addr
.nbytes
> 4)
618 /* Address exceeds MMIO window size, fall back to regular mode. */
620 if (from
+ op
->data
.nbytes
> qspi
->mmap_size
)
623 mutex_lock(&qspi
->list_lock
);
625 if (!qspi
->mmap_enabled
|| qspi
->current_cs
!= spi_get_chipselect(mem
->spi
, 0)) {
626 ti_qspi_setup_clk(qspi
, mem
->spi
->max_speed_hz
);
627 ti_qspi_enable_memory_map(mem
->spi
);
629 ti_qspi_setup_mmap_read(mem
->spi
, op
->cmd
.opcode
, op
->data
.buswidth
,
630 op
->addr
.nbytes
, op
->dummy
.nbytes
);
635 if (virt_addr_valid(op
->data
.buf
.in
) &&
636 !spi_controller_dma_map_mem_op_data(mem
->spi
->controller
, op
,
638 ret
= ti_qspi_dma_xfer_sg(qspi
, sgt
, from
);
639 spi_controller_dma_unmap_mem_op_data(mem
->spi
->controller
,
642 ret
= ti_qspi_dma_bounce_buffer(qspi
, from
,
647 memcpy_fromio(op
->data
.buf
.in
, qspi
->mmap_base
+ from
,
651 mutex_unlock(&qspi
->list_lock
);
656 static const struct spi_controller_mem_ops ti_qspi_mem_ops
= {
657 .exec_op
= ti_qspi_exec_mem_op
,
658 .adjust_op_size
= ti_qspi_adjust_op_size
,
661 static int ti_qspi_start_transfer_one(struct spi_controller
*host
,
662 struct spi_message
*m
)
664 struct ti_qspi
*qspi
= spi_controller_get_devdata(host
);
665 struct spi_device
*spi
= m
->spi
;
666 struct spi_transfer
*t
;
668 unsigned int frame_len_words
, transfer_len_words
;
671 /* setup device control reg */
674 if (spi
->mode
& SPI_CPHA
)
675 qspi
->dc
|= QSPI_CKPHA(spi_get_chipselect(spi
, 0));
676 if (spi
->mode
& SPI_CPOL
)
677 qspi
->dc
|= QSPI_CKPOL(spi_get_chipselect(spi
, 0));
678 if (spi
->mode
& SPI_CS_HIGH
)
679 qspi
->dc
|= QSPI_CSPOL(spi_get_chipselect(spi
, 0));
682 list_for_each_entry(t
, &m
->transfers
, transfer_list
)
683 frame_len_words
+= t
->len
/ (t
->bits_per_word
>> 3);
684 frame_len_words
= min_t(unsigned int, frame_len_words
, QSPI_FRAME
);
686 /* setup command reg */
688 qspi
->cmd
|= QSPI_EN_CS(spi_get_chipselect(spi
, 0));
689 qspi
->cmd
|= QSPI_FLEN(frame_len_words
);
691 ti_qspi_write(qspi
, qspi
->dc
, QSPI_SPI_DC_REG
);
693 mutex_lock(&qspi
->list_lock
);
695 if (qspi
->mmap_enabled
)
696 ti_qspi_disable_memory_map(spi
);
698 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
699 qspi
->cmd
= ((qspi
->cmd
& ~QSPI_WLEN_MASK
) |
700 QSPI_WLEN(t
->bits_per_word
));
702 wlen
= t
->bits_per_word
>> 3;
703 transfer_len_words
= min(t
->len
/ wlen
, frame_len_words
);
705 ti_qspi_setup_clk(qspi
, t
->speed_hz
);
706 ret
= qspi_transfer_msg(qspi
, t
, transfer_len_words
* wlen
);
708 dev_dbg(qspi
->dev
, "transfer message failed\n");
709 mutex_unlock(&qspi
->list_lock
);
713 m
->actual_length
+= transfer_len_words
* wlen
;
714 frame_len_words
-= transfer_len_words
;
715 if (frame_len_words
== 0)
719 mutex_unlock(&qspi
->list_lock
);
721 ti_qspi_write(qspi
, qspi
->cmd
| QSPI_INVAL
, QSPI_SPI_CMD_REG
);
723 spi_finalize_current_message(host
);
728 static int ti_qspi_runtime_resume(struct device
*dev
)
730 struct ti_qspi
*qspi
;
732 qspi
= dev_get_drvdata(dev
);
733 ti_qspi_restore_ctx(qspi
);
738 static void ti_qspi_dma_cleanup(struct ti_qspi
*qspi
)
740 if (qspi
->rx_bb_addr
)
741 dma_free_coherent(qspi
->dev
, QSPI_DMA_BUFFER_SIZE
,
743 qspi
->rx_bb_dma_addr
);
746 dma_release_channel(qspi
->rx_chan
);
749 static const struct of_device_id ti_qspi_match
[] = {
750 {.compatible
= "ti,dra7xxx-qspi" },
751 {.compatible
= "ti,am4372-qspi" },
754 MODULE_DEVICE_TABLE(of
, ti_qspi_match
);
756 static int ti_qspi_probe(struct platform_device
*pdev
)
758 struct ti_qspi
*qspi
;
759 struct spi_controller
*host
;
760 struct resource
*r
, *res_mmap
;
761 struct device_node
*np
= pdev
->dev
.of_node
;
763 int ret
= 0, num_cs
, irq
;
766 host
= spi_alloc_host(&pdev
->dev
, sizeof(*qspi
));
770 host
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_RX_DUAL
| SPI_RX_QUAD
;
772 host
->flags
= SPI_CONTROLLER_HALF_DUPLEX
;
773 host
->setup
= ti_qspi_setup
;
774 host
->auto_runtime_pm
= true;
775 host
->transfer_one_message
= ti_qspi_start_transfer_one
;
776 host
->dev
.of_node
= pdev
->dev
.of_node
;
777 host
->bits_per_word_mask
= SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
779 host
->mem_ops
= &ti_qspi_mem_ops
;
781 if (!of_property_read_u32(np
, "num-cs", &num_cs
))
782 host
->num_chipselect
= num_cs
;
784 qspi
= spi_controller_get_devdata(host
);
786 qspi
->dev
= &pdev
->dev
;
787 platform_set_drvdata(pdev
, qspi
);
789 r
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "qspi_base");
791 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
793 dev_err(&pdev
->dev
, "missing platform data\n");
799 res_mmap
= platform_get_resource_byname(pdev
,
800 IORESOURCE_MEM
, "qspi_mmap");
801 if (res_mmap
== NULL
) {
802 res_mmap
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
803 if (res_mmap
== NULL
) {
805 "memory mapped resource not required\n");
810 qspi
->mmap_size
= resource_size(res_mmap
);
812 irq
= platform_get_irq(pdev
, 0);
818 mutex_init(&qspi
->list_lock
);
820 qspi
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
821 if (IS_ERR(qspi
->base
)) {
822 ret
= PTR_ERR(qspi
->base
);
827 if (of_property_present(np
, "syscon-chipselects")) {
829 syscon_regmap_lookup_by_phandle(np
,
830 "syscon-chipselects");
831 if (IS_ERR(qspi
->ctrl_base
)) {
832 ret
= PTR_ERR(qspi
->ctrl_base
);
835 ret
= of_property_read_u32_index(np
,
836 "syscon-chipselects",
840 "couldn't get ctrl_mod reg index\n");
845 qspi
->fclk
= devm_clk_get(&pdev
->dev
, "fck");
846 if (IS_ERR(qspi
->fclk
)) {
847 ret
= PTR_ERR(qspi
->fclk
);
848 dev_err(&pdev
->dev
, "could not get clk: %d\n", ret
);
851 pm_runtime_use_autosuspend(&pdev
->dev
);
852 pm_runtime_set_autosuspend_delay(&pdev
->dev
, QSPI_AUTOSUSPEND_TIMEOUT
);
853 pm_runtime_enable(&pdev
->dev
);
855 if (!of_property_read_u32(np
, "spi-max-frequency", &max_freq
))
856 host
->max_speed_hz
= max_freq
;
859 dma_cap_set(DMA_MEMCPY
, mask
);
861 qspi
->rx_chan
= dma_request_chan_by_mask(&mask
);
862 if (IS_ERR(qspi
->rx_chan
)) {
864 "No Rx DMA available, trying mmap mode\n");
865 qspi
->rx_chan
= NULL
;
868 qspi
->rx_bb_addr
= dma_alloc_coherent(qspi
->dev
,
869 QSPI_DMA_BUFFER_SIZE
,
870 &qspi
->rx_bb_dma_addr
,
871 GFP_KERNEL
| GFP_DMA
);
872 if (!qspi
->rx_bb_addr
) {
874 "dma_alloc_coherent failed, using PIO mode\n");
875 dma_release_channel(qspi
->rx_chan
);
878 host
->dma_rx
= qspi
->rx_chan
;
879 init_completion(&qspi
->transfer_complete
);
881 qspi
->mmap_phys_base
= (dma_addr_t
)res_mmap
->start
;
884 if (!qspi
->rx_chan
&& res_mmap
) {
885 qspi
->mmap_base
= devm_ioremap_resource(&pdev
->dev
, res_mmap
);
886 if (IS_ERR(qspi
->mmap_base
)) {
888 "mmap failed with error %ld using PIO mode\n",
889 PTR_ERR(qspi
->mmap_base
));
890 qspi
->mmap_base
= NULL
;
891 host
->mem_ops
= NULL
;
894 qspi
->mmap_enabled
= false;
895 qspi
->current_cs
= -1;
897 ret
= devm_spi_register_controller(&pdev
->dev
, host
);
901 ti_qspi_dma_cleanup(qspi
);
903 pm_runtime_disable(&pdev
->dev
);
905 spi_controller_put(host
);
909 static void ti_qspi_remove(struct platform_device
*pdev
)
911 struct ti_qspi
*qspi
= platform_get_drvdata(pdev
);
914 rc
= spi_controller_suspend(qspi
->host
);
916 dev_alert(&pdev
->dev
, "spi_controller_suspend() failed (%pe)\n",
921 pm_runtime_put_sync(&pdev
->dev
);
922 pm_runtime_disable(&pdev
->dev
);
924 ti_qspi_dma_cleanup(qspi
);
927 static const struct dev_pm_ops ti_qspi_pm_ops
= {
928 .runtime_resume
= ti_qspi_runtime_resume
,
931 static struct platform_driver ti_qspi_driver
= {
932 .probe
= ti_qspi_probe
,
933 .remove
= ti_qspi_remove
,
936 .pm
= &ti_qspi_pm_ops
,
937 .of_match_table
= ti_qspi_match
,
941 module_platform_driver(ti_qspi_driver
);
943 MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
944 MODULE_LICENSE("GPL v2");
945 MODULE_DESCRIPTION("TI QSPI controller driver");
946 MODULE_ALIAS("platform:ti-qspi");