2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
25 #include <linux/dmaengine.h>
26 #include <linux/dma-mapping.h>
28 #define QUP_CONFIG 0x0000
29 #define QUP_STATE 0x0004
30 #define QUP_IO_M_MODES 0x0008
31 #define QUP_SW_RESET 0x000c
32 #define QUP_OPERATIONAL 0x0018
33 #define QUP_ERROR_FLAGS 0x001c
34 #define QUP_ERROR_FLAGS_EN 0x0020
35 #define QUP_OPERATIONAL_MASK 0x0028
36 #define QUP_HW_VERSION 0x0030
37 #define QUP_MX_OUTPUT_CNT 0x0100
38 #define QUP_OUTPUT_FIFO 0x0110
39 #define QUP_MX_WRITE_CNT 0x0150
40 #define QUP_MX_INPUT_CNT 0x0200
41 #define QUP_MX_READ_CNT 0x0208
42 #define QUP_INPUT_FIFO 0x0218
44 #define SPI_CONFIG 0x0300
45 #define SPI_IO_CONTROL 0x0304
46 #define SPI_ERROR_FLAGS 0x0308
47 #define SPI_ERROR_FLAGS_EN 0x030c
49 /* QUP_CONFIG fields */
50 #define QUP_CONFIG_SPI_MODE (1 << 8)
51 #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
52 #define QUP_CONFIG_NO_INPUT BIT(7)
53 #define QUP_CONFIG_NO_OUTPUT BIT(6)
54 #define QUP_CONFIG_N 0x001f
56 /* QUP_STATE fields */
57 #define QUP_STATE_VALID BIT(2)
58 #define QUP_STATE_RESET 0
59 #define QUP_STATE_RUN 1
60 #define QUP_STATE_PAUSE 3
61 #define QUP_STATE_MASK 3
62 #define QUP_STATE_CLEAR 2
64 #define QUP_HW_VERSION_2_1_1 0x20010001
66 /* QUP_IO_M_MODES fields */
67 #define QUP_IO_M_PACK_EN BIT(15)
68 #define QUP_IO_M_UNPACK_EN BIT(14)
69 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
70 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
71 #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
72 #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
74 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
75 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
76 #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
77 #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
79 #define QUP_IO_M_MODE_FIFO 0
80 #define QUP_IO_M_MODE_BLOCK 1
81 #define QUP_IO_M_MODE_DMOV 2
82 #define QUP_IO_M_MODE_BAM 3
84 /* QUP_OPERATIONAL fields */
85 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
86 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
87 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
88 #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
89 #define QUP_OP_IN_FIFO_FULL BIT(7)
90 #define QUP_OP_OUT_FIFO_FULL BIT(6)
91 #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
92 #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
94 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
95 #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
96 #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
97 #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
98 #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
100 /* SPI_CONFIG fields */
101 #define SPI_CONFIG_HS_MODE BIT(10)
102 #define SPI_CONFIG_INPUT_FIRST BIT(9)
103 #define SPI_CONFIG_LOOPBACK BIT(8)
105 /* SPI_IO_CONTROL fields */
106 #define SPI_IO_C_FORCE_CS BIT(11)
107 #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
108 #define SPI_IO_C_MX_CS_MODE BIT(8)
109 #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
110 #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
111 #define SPI_IO_C_CS_SELECT_MASK 0x000c
112 #define SPI_IO_C_TRISTATE_CS BIT(1)
113 #define SPI_IO_C_NO_TRI_STATE BIT(0)
115 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
116 #define SPI_ERROR_CLK_OVER_RUN BIT(1)
117 #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
119 #define SPI_NUM_CHIPSELECTS 4
121 #define SPI_MAX_DMA_XFER (SZ_64K - 64)
123 /* high speed mode is when bus rate is greater then 26MHz */
124 #define SPI_HS_MIN_RATE 26000000
125 #define SPI_MAX_RATE 50000000
127 #define SPI_DELAY_THRESHOLD 1
128 #define SPI_DELAY_RETRY 10
133 struct clk
*cclk
; /* core clock */
134 struct clk
*iclk
; /* interface clock */
143 struct spi_transfer
*xfer
;
144 struct completion done
;
146 int w_size
; /* bytes per SPI word */
153 struct dma_slave_config rx_conf
;
154 struct dma_slave_config tx_conf
;
158 static inline bool spi_qup_is_valid_state(struct spi_qup
*controller
)
160 u32 opstate
= readl_relaxed(controller
->base
+ QUP_STATE
);
162 return opstate
& QUP_STATE_VALID
;
165 static int spi_qup_set_state(struct spi_qup
*controller
, u32 state
)
171 while (!spi_qup_is_valid_state(controller
)) {
173 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
175 if (++loop
> SPI_DELAY_RETRY
)
180 dev_dbg(controller
->dev
, "invalid state for %ld,us %d\n",
183 cur_state
= readl_relaxed(controller
->base
+ QUP_STATE
);
185 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
186 * of (b10) are required
188 if (((cur_state
& QUP_STATE_MASK
) == QUP_STATE_PAUSE
) &&
189 (state
== QUP_STATE_RESET
)) {
190 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
191 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
193 cur_state
&= ~QUP_STATE_MASK
;
195 writel_relaxed(cur_state
, controller
->base
+ QUP_STATE
);
199 while (!spi_qup_is_valid_state(controller
)) {
201 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
203 if (++loop
> SPI_DELAY_RETRY
)
210 static void spi_qup_fifo_read(struct spi_qup
*controller
,
211 struct spi_transfer
*xfer
)
213 u8
*rx_buf
= xfer
->rx_buf
;
215 int idx
, shift
, w_size
;
217 w_size
= controller
->w_size
;
219 while (controller
->rx_bytes
< xfer
->len
) {
221 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
222 if (0 == (state
& QUP_OP_IN_FIFO_NOT_EMPTY
))
225 word
= readl_relaxed(controller
->base
+ QUP_INPUT_FIFO
);
228 controller
->rx_bytes
+= w_size
;
232 for (idx
= 0; idx
< w_size
; idx
++, controller
->rx_bytes
++) {
234 * The data format depends on bytes per SPI word:
235 * 4 bytes: 0x12345678
236 * 2 bytes: 0x00001234
237 * 1 byte : 0x00000012
239 shift
= BITS_PER_BYTE
;
240 shift
*= (w_size
- idx
- 1);
241 rx_buf
[controller
->rx_bytes
] = word
>> shift
;
246 static void spi_qup_fifo_write(struct spi_qup
*controller
,
247 struct spi_transfer
*xfer
)
249 const u8
*tx_buf
= xfer
->tx_buf
;
250 u32 word
, state
, data
;
253 w_size
= controller
->w_size
;
255 while (controller
->tx_bytes
< xfer
->len
) {
257 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
258 if (state
& QUP_OP_OUT_FIFO_FULL
)
262 for (idx
= 0; idx
< w_size
; idx
++, controller
->tx_bytes
++) {
265 controller
->tx_bytes
+= w_size
;
269 data
= tx_buf
[controller
->tx_bytes
];
270 word
|= data
<< (BITS_PER_BYTE
* (3 - idx
));
273 writel_relaxed(word
, controller
->base
+ QUP_OUTPUT_FIFO
);
277 static void spi_qup_dma_done(void *data
)
279 struct spi_qup
*qup
= data
;
281 complete(&qup
->done
);
284 static int spi_qup_prep_sg(struct spi_master
*master
, struct spi_transfer
*xfer
,
285 enum dma_transfer_direction dir
,
286 dma_async_tx_callback callback
)
288 struct spi_qup
*qup
= spi_master_get_devdata(master
);
289 unsigned long flags
= DMA_PREP_INTERRUPT
| DMA_PREP_FENCE
;
290 struct dma_async_tx_descriptor
*desc
;
291 struct scatterlist
*sgl
;
292 struct dma_chan
*chan
;
296 if (dir
== DMA_MEM_TO_DEV
) {
297 chan
= master
->dma_tx
;
298 nents
= xfer
->tx_sg
.nents
;
299 sgl
= xfer
->tx_sg
.sgl
;
301 chan
= master
->dma_rx
;
302 nents
= xfer
->rx_sg
.nents
;
303 sgl
= xfer
->rx_sg
.sgl
;
306 desc
= dmaengine_prep_slave_sg(chan
, sgl
, nents
, dir
, flags
);
310 desc
->callback
= callback
;
311 desc
->callback_param
= qup
;
313 cookie
= dmaengine_submit(desc
);
315 return dma_submit_error(cookie
);
318 static void spi_qup_dma_terminate(struct spi_master
*master
,
319 struct spi_transfer
*xfer
)
322 dmaengine_terminate_all(master
->dma_tx
);
324 dmaengine_terminate_all(master
->dma_rx
);
327 static int spi_qup_do_dma(struct spi_master
*master
, struct spi_transfer
*xfer
)
329 dma_async_tx_callback rx_done
= NULL
, tx_done
= NULL
;
333 rx_done
= spi_qup_dma_done
;
334 else if (xfer
->tx_buf
)
335 tx_done
= spi_qup_dma_done
;
338 ret
= spi_qup_prep_sg(master
, xfer
, DMA_DEV_TO_MEM
, rx_done
);
342 dma_async_issue_pending(master
->dma_rx
);
346 ret
= spi_qup_prep_sg(master
, xfer
, DMA_MEM_TO_DEV
, tx_done
);
350 dma_async_issue_pending(master
->dma_tx
);
356 static int spi_qup_do_pio(struct spi_master
*master
, struct spi_transfer
*xfer
)
358 struct spi_qup
*qup
= spi_master_get_devdata(master
);
361 ret
= spi_qup_set_state(qup
, QUP_STATE_RUN
);
363 dev_warn(qup
->dev
, "cannot set RUN state\n");
367 ret
= spi_qup_set_state(qup
, QUP_STATE_PAUSE
);
369 dev_warn(qup
->dev
, "cannot set PAUSE state\n");
373 spi_qup_fifo_write(qup
, xfer
);
378 static irqreturn_t
spi_qup_qup_irq(int irq
, void *dev_id
)
380 struct spi_qup
*controller
= dev_id
;
381 struct spi_transfer
*xfer
;
382 u32 opflags
, qup_err
, spi_err
;
386 spin_lock_irqsave(&controller
->lock
, flags
);
387 xfer
= controller
->xfer
;
388 controller
->xfer
= NULL
;
389 spin_unlock_irqrestore(&controller
->lock
, flags
);
391 qup_err
= readl_relaxed(controller
->base
+ QUP_ERROR_FLAGS
);
392 spi_err
= readl_relaxed(controller
->base
+ SPI_ERROR_FLAGS
);
393 opflags
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
395 writel_relaxed(qup_err
, controller
->base
+ QUP_ERROR_FLAGS
);
396 writel_relaxed(spi_err
, controller
->base
+ SPI_ERROR_FLAGS
);
397 writel_relaxed(opflags
, controller
->base
+ QUP_OPERATIONAL
);
400 dev_err_ratelimited(controller
->dev
, "unexpected irq %08x %08x %08x\n",
401 qup_err
, spi_err
, opflags
);
406 if (qup_err
& QUP_ERROR_OUTPUT_OVER_RUN
)
407 dev_warn(controller
->dev
, "OUTPUT_OVER_RUN\n");
408 if (qup_err
& QUP_ERROR_INPUT_UNDER_RUN
)
409 dev_warn(controller
->dev
, "INPUT_UNDER_RUN\n");
410 if (qup_err
& QUP_ERROR_OUTPUT_UNDER_RUN
)
411 dev_warn(controller
->dev
, "OUTPUT_UNDER_RUN\n");
412 if (qup_err
& QUP_ERROR_INPUT_OVER_RUN
)
413 dev_warn(controller
->dev
, "INPUT_OVER_RUN\n");
419 if (spi_err
& SPI_ERROR_CLK_OVER_RUN
)
420 dev_warn(controller
->dev
, "CLK_OVER_RUN\n");
421 if (spi_err
& SPI_ERROR_CLK_UNDER_RUN
)
422 dev_warn(controller
->dev
, "CLK_UNDER_RUN\n");
427 if (!controller
->use_dma
) {
428 if (opflags
& QUP_OP_IN_SERVICE_FLAG
)
429 spi_qup_fifo_read(controller
, xfer
);
431 if (opflags
& QUP_OP_OUT_SERVICE_FLAG
)
432 spi_qup_fifo_write(controller
, xfer
);
435 spin_lock_irqsave(&controller
->lock
, flags
);
436 controller
->error
= error
;
437 controller
->xfer
= xfer
;
438 spin_unlock_irqrestore(&controller
->lock
, flags
);
440 if (controller
->rx_bytes
== xfer
->len
|| error
)
441 complete(&controller
->done
);
447 spi_qup_get_mode(struct spi_master
*master
, struct spi_transfer
*xfer
)
449 struct spi_qup
*qup
= spi_master_get_devdata(master
);
454 if (xfer
->bits_per_word
<= 8)
456 else if (xfer
->bits_per_word
<= 16)
459 qup
->n_words
= xfer
->len
/ qup
->w_size
;
461 if (qup
->n_words
<= (qup
->in_fifo_sz
/ sizeof(u32
)))
462 mode
= QUP_IO_M_MODE_FIFO
;
464 mode
= QUP_IO_M_MODE_BLOCK
;
469 /* set clock freq ... bits per word */
470 static int spi_qup_io_config(struct spi_device
*spi
, struct spi_transfer
*xfer
)
472 struct spi_qup
*controller
= spi_master_get_devdata(spi
->master
);
473 u32 config
, iomode
, mode
, control
;
476 if (spi
->mode
& SPI_LOOP
&& xfer
->len
> controller
->in_fifo_sz
) {
477 dev_err(controller
->dev
, "too big size for loopback %d > %d\n",
478 xfer
->len
, controller
->in_fifo_sz
);
482 ret
= clk_set_rate(controller
->cclk
, xfer
->speed_hz
);
484 dev_err(controller
->dev
, "fail to set frequency %d",
489 if (spi_qup_set_state(controller
, QUP_STATE_RESET
)) {
490 dev_err(controller
->dev
, "cannot set RESET state\n");
494 mode
= spi_qup_get_mode(spi
->master
, xfer
);
495 n_words
= controller
->n_words
;
497 if (mode
== QUP_IO_M_MODE_FIFO
) {
498 writel_relaxed(n_words
, controller
->base
+ QUP_MX_READ_CNT
);
499 writel_relaxed(n_words
, controller
->base
+ QUP_MX_WRITE_CNT
);
500 /* must be zero for FIFO */
501 writel_relaxed(0, controller
->base
+ QUP_MX_INPUT_CNT
);
502 writel_relaxed(0, controller
->base
+ QUP_MX_OUTPUT_CNT
);
503 } else if (!controller
->use_dma
) {
504 writel_relaxed(n_words
, controller
->base
+ QUP_MX_INPUT_CNT
);
505 writel_relaxed(n_words
, controller
->base
+ QUP_MX_OUTPUT_CNT
);
506 /* must be zero for BLOCK and BAM */
507 writel_relaxed(0, controller
->base
+ QUP_MX_READ_CNT
);
508 writel_relaxed(0, controller
->base
+ QUP_MX_WRITE_CNT
);
510 mode
= QUP_IO_M_MODE_BAM
;
511 writel_relaxed(0, controller
->base
+ QUP_MX_READ_CNT
);
512 writel_relaxed(0, controller
->base
+ QUP_MX_WRITE_CNT
);
514 if (!controller
->qup_v1
) {
515 void __iomem
*input_cnt
;
517 input_cnt
= controller
->base
+ QUP_MX_INPUT_CNT
;
519 * for DMA transfers, both QUP_MX_INPUT_CNT and
520 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
521 * That case is a non-balanced transfer when there is
525 writel_relaxed(0, input_cnt
);
527 writel_relaxed(n_words
, input_cnt
);
529 writel_relaxed(0, controller
->base
+ QUP_MX_OUTPUT_CNT
);
533 iomode
= readl_relaxed(controller
->base
+ QUP_IO_M_MODES
);
534 /* Set input and output transfer mode */
535 iomode
&= ~(QUP_IO_M_INPUT_MODE_MASK
| QUP_IO_M_OUTPUT_MODE_MASK
);
537 if (!controller
->use_dma
)
538 iomode
&= ~(QUP_IO_M_PACK_EN
| QUP_IO_M_UNPACK_EN
);
540 iomode
|= QUP_IO_M_PACK_EN
| QUP_IO_M_UNPACK_EN
;
542 iomode
|= (mode
<< QUP_IO_M_OUTPUT_MODE_MASK_SHIFT
);
543 iomode
|= (mode
<< QUP_IO_M_INPUT_MODE_MASK_SHIFT
);
545 writel_relaxed(iomode
, controller
->base
+ QUP_IO_M_MODES
);
547 control
= readl_relaxed(controller
->base
+ SPI_IO_CONTROL
);
549 if (spi
->mode
& SPI_CPOL
)
550 control
|= SPI_IO_C_CLK_IDLE_HIGH
;
552 control
&= ~SPI_IO_C_CLK_IDLE_HIGH
;
554 writel_relaxed(control
, controller
->base
+ SPI_IO_CONTROL
);
556 config
= readl_relaxed(controller
->base
+ SPI_CONFIG
);
558 if (spi
->mode
& SPI_LOOP
)
559 config
|= SPI_CONFIG_LOOPBACK
;
561 config
&= ~SPI_CONFIG_LOOPBACK
;
563 if (spi
->mode
& SPI_CPHA
)
564 config
&= ~SPI_CONFIG_INPUT_FIRST
;
566 config
|= SPI_CONFIG_INPUT_FIRST
;
569 * HS_MODE improves signal stability for spi-clk high rates,
570 * but is invalid in loop back mode.
572 if ((xfer
->speed_hz
>= SPI_HS_MIN_RATE
) && !(spi
->mode
& SPI_LOOP
))
573 config
|= SPI_CONFIG_HS_MODE
;
575 config
&= ~SPI_CONFIG_HS_MODE
;
577 writel_relaxed(config
, controller
->base
+ SPI_CONFIG
);
579 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
580 config
&= ~(QUP_CONFIG_NO_INPUT
| QUP_CONFIG_NO_OUTPUT
| QUP_CONFIG_N
);
581 config
|= xfer
->bits_per_word
- 1;
582 config
|= QUP_CONFIG_SPI_MODE
;
584 if (controller
->use_dma
) {
586 config
|= QUP_CONFIG_NO_OUTPUT
;
588 config
|= QUP_CONFIG_NO_INPUT
;
591 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
593 /* only write to OPERATIONAL_MASK when register is present */
594 if (!controller
->qup_v1
) {
598 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
599 * status change in BAM mode
602 if (mode
== QUP_IO_M_MODE_BAM
)
603 mask
= QUP_OP_IN_SERVICE_FLAG
| QUP_OP_OUT_SERVICE_FLAG
;
605 writel_relaxed(mask
, controller
->base
+ QUP_OPERATIONAL_MASK
);
611 static int spi_qup_transfer_one(struct spi_master
*master
,
612 struct spi_device
*spi
,
613 struct spi_transfer
*xfer
)
615 struct spi_qup
*controller
= spi_master_get_devdata(master
);
616 unsigned long timeout
, flags
;
619 ret
= spi_qup_io_config(spi
, xfer
);
623 timeout
= DIV_ROUND_UP(xfer
->speed_hz
, MSEC_PER_SEC
);
624 timeout
= DIV_ROUND_UP(xfer
->len
* 8, timeout
);
625 timeout
= 100 * msecs_to_jiffies(timeout
);
627 reinit_completion(&controller
->done
);
629 spin_lock_irqsave(&controller
->lock
, flags
);
630 controller
->xfer
= xfer
;
631 controller
->error
= 0;
632 controller
->rx_bytes
= 0;
633 controller
->tx_bytes
= 0;
634 spin_unlock_irqrestore(&controller
->lock
, flags
);
636 if (controller
->use_dma
)
637 ret
= spi_qup_do_dma(master
, xfer
);
639 ret
= spi_qup_do_pio(master
, xfer
);
644 if (spi_qup_set_state(controller
, QUP_STATE_RUN
)) {
645 dev_warn(controller
->dev
, "cannot set EXECUTE state\n");
649 if (!wait_for_completion_timeout(&controller
->done
, timeout
))
653 spi_qup_set_state(controller
, QUP_STATE_RESET
);
654 spin_lock_irqsave(&controller
->lock
, flags
);
655 controller
->xfer
= NULL
;
657 ret
= controller
->error
;
658 spin_unlock_irqrestore(&controller
->lock
, flags
);
660 if (ret
&& controller
->use_dma
)
661 spi_qup_dma_terminate(master
, xfer
);
666 static bool spi_qup_can_dma(struct spi_master
*master
, struct spi_device
*spi
,
667 struct spi_transfer
*xfer
)
669 struct spi_qup
*qup
= spi_master_get_devdata(master
);
670 size_t dma_align
= dma_get_cache_alignment();
675 if (xfer
->rx_buf
&& (xfer
->len
% qup
->in_blk_sz
||
676 IS_ERR_OR_NULL(master
->dma_rx
) ||
677 !IS_ALIGNED((size_t)xfer
->rx_buf
, dma_align
)))
680 if (xfer
->tx_buf
&& (xfer
->len
% qup
->out_blk_sz
||
681 IS_ERR_OR_NULL(master
->dma_tx
) ||
682 !IS_ALIGNED((size_t)xfer
->tx_buf
, dma_align
)))
685 mode
= spi_qup_get_mode(master
, xfer
);
686 if (mode
== QUP_IO_M_MODE_FIFO
)
694 static void spi_qup_release_dma(struct spi_master
*master
)
696 if (!IS_ERR_OR_NULL(master
->dma_rx
))
697 dma_release_channel(master
->dma_rx
);
698 if (!IS_ERR_OR_NULL(master
->dma_tx
))
699 dma_release_channel(master
->dma_tx
);
702 static int spi_qup_init_dma(struct spi_master
*master
, resource_size_t base
)
704 struct spi_qup
*spi
= spi_master_get_devdata(master
);
705 struct dma_slave_config
*rx_conf
= &spi
->rx_conf
,
706 *tx_conf
= &spi
->tx_conf
;
707 struct device
*dev
= spi
->dev
;
710 /* allocate dma resources, if available */
711 master
->dma_rx
= dma_request_slave_channel_reason(dev
, "rx");
712 if (IS_ERR(master
->dma_rx
))
713 return PTR_ERR(master
->dma_rx
);
715 master
->dma_tx
= dma_request_slave_channel_reason(dev
, "tx");
716 if (IS_ERR(master
->dma_tx
)) {
717 ret
= PTR_ERR(master
->dma_tx
);
721 /* set DMA parameters */
722 rx_conf
->direction
= DMA_DEV_TO_MEM
;
723 rx_conf
->device_fc
= 1;
724 rx_conf
->src_addr
= base
+ QUP_INPUT_FIFO
;
725 rx_conf
->src_maxburst
= spi
->in_blk_sz
;
727 tx_conf
->direction
= DMA_MEM_TO_DEV
;
728 tx_conf
->device_fc
= 1;
729 tx_conf
->dst_addr
= base
+ QUP_OUTPUT_FIFO
;
730 tx_conf
->dst_maxburst
= spi
->out_blk_sz
;
732 ret
= dmaengine_slave_config(master
->dma_rx
, rx_conf
);
734 dev_err(dev
, "failed to configure RX channel\n");
738 ret
= dmaengine_slave_config(master
->dma_tx
, tx_conf
);
740 dev_err(dev
, "failed to configure TX channel\n");
747 dma_release_channel(master
->dma_tx
);
749 dma_release_channel(master
->dma_rx
);
753 static int spi_qup_probe(struct platform_device
*pdev
)
755 struct spi_master
*master
;
756 struct clk
*iclk
, *cclk
;
757 struct spi_qup
*controller
;
758 struct resource
*res
;
761 u32 max_freq
, iomode
, num_cs
;
765 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
766 base
= devm_ioremap_resource(dev
, res
);
768 return PTR_ERR(base
);
770 irq
= platform_get_irq(pdev
, 0);
774 cclk
= devm_clk_get(dev
, "core");
776 return PTR_ERR(cclk
);
778 iclk
= devm_clk_get(dev
, "iface");
780 return PTR_ERR(iclk
);
782 /* This is optional parameter */
783 if (of_property_read_u32(dev
->of_node
, "spi-max-frequency", &max_freq
))
784 max_freq
= SPI_MAX_RATE
;
786 if (!max_freq
|| max_freq
> SPI_MAX_RATE
) {
787 dev_err(dev
, "invalid clock frequency %d\n", max_freq
);
791 ret
= clk_prepare_enable(cclk
);
793 dev_err(dev
, "cannot enable core clock\n");
797 ret
= clk_prepare_enable(iclk
);
799 clk_disable_unprepare(cclk
);
800 dev_err(dev
, "cannot enable iface clock\n");
804 master
= spi_alloc_master(dev
, sizeof(struct spi_qup
));
806 clk_disable_unprepare(cclk
);
807 clk_disable_unprepare(iclk
);
808 dev_err(dev
, "cannot allocate master\n");
812 /* use num-cs unless not present or out of range */
813 if (of_property_read_u32(dev
->of_node
, "num-cs", &num_cs
) ||
814 num_cs
> SPI_NUM_CHIPSELECTS
)
815 master
->num_chipselect
= SPI_NUM_CHIPSELECTS
;
817 master
->num_chipselect
= num_cs
;
819 master
->bus_num
= pdev
->id
;
820 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
821 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
822 master
->max_speed_hz
= max_freq
;
823 master
->transfer_one
= spi_qup_transfer_one
;
824 master
->dev
.of_node
= pdev
->dev
.of_node
;
825 master
->auto_runtime_pm
= true;
826 master
->dma_alignment
= dma_get_cache_alignment();
827 master
->max_dma_len
= SPI_MAX_DMA_XFER
;
829 platform_set_drvdata(pdev
, master
);
831 controller
= spi_master_get_devdata(master
);
833 controller
->dev
= dev
;
834 controller
->base
= base
;
835 controller
->iclk
= iclk
;
836 controller
->cclk
= cclk
;
837 controller
->irq
= irq
;
839 ret
= spi_qup_init_dma(master
, res
->start
);
840 if (ret
== -EPROBE_DEFER
)
843 master
->can_dma
= spi_qup_can_dma
;
845 /* set v1 flag if device is version 1 */
846 if (of_device_is_compatible(dev
->of_node
, "qcom,spi-qup-v1.1.1"))
847 controller
->qup_v1
= 1;
849 spin_lock_init(&controller
->lock
);
850 init_completion(&controller
->done
);
852 iomode
= readl_relaxed(base
+ QUP_IO_M_MODES
);
854 size
= QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode
);
856 controller
->out_blk_sz
= size
* 16;
858 controller
->out_blk_sz
= 4;
860 size
= QUP_IO_M_INPUT_BLOCK_SIZE(iomode
);
862 controller
->in_blk_sz
= size
* 16;
864 controller
->in_blk_sz
= 4;
866 size
= QUP_IO_M_OUTPUT_FIFO_SIZE(iomode
);
867 controller
->out_fifo_sz
= controller
->out_blk_sz
* (2 << size
);
869 size
= QUP_IO_M_INPUT_FIFO_SIZE(iomode
);
870 controller
->in_fifo_sz
= controller
->in_blk_sz
* (2 << size
);
872 dev_info(dev
, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
873 controller
->in_blk_sz
, controller
->in_fifo_sz
,
874 controller
->out_blk_sz
, controller
->out_fifo_sz
);
876 writel_relaxed(1, base
+ QUP_SW_RESET
);
878 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
880 dev_err(dev
, "cannot set RESET state\n");
884 writel_relaxed(0, base
+ QUP_OPERATIONAL
);
885 writel_relaxed(0, base
+ QUP_IO_M_MODES
);
887 if (!controller
->qup_v1
)
888 writel_relaxed(0, base
+ QUP_OPERATIONAL_MASK
);
890 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN
| SPI_ERROR_CLK_OVER_RUN
,
891 base
+ SPI_ERROR_FLAGS_EN
);
893 /* if earlier version of the QUP, disable INPUT_OVERRUN */
894 if (controller
->qup_v1
)
895 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN
|
896 QUP_ERROR_INPUT_UNDER_RUN
| QUP_ERROR_OUTPUT_UNDER_RUN
,
897 base
+ QUP_ERROR_FLAGS_EN
);
899 writel_relaxed(0, base
+ SPI_CONFIG
);
900 writel_relaxed(SPI_IO_C_NO_TRI_STATE
, base
+ SPI_IO_CONTROL
);
902 ret
= devm_request_irq(dev
, irq
, spi_qup_qup_irq
,
903 IRQF_TRIGGER_HIGH
, pdev
->name
, controller
);
907 pm_runtime_set_autosuspend_delay(dev
, MSEC_PER_SEC
);
908 pm_runtime_use_autosuspend(dev
);
909 pm_runtime_set_active(dev
);
910 pm_runtime_enable(dev
);
912 ret
= devm_spi_register_master(dev
, master
);
919 pm_runtime_disable(&pdev
->dev
);
921 spi_qup_release_dma(master
);
923 clk_disable_unprepare(cclk
);
924 clk_disable_unprepare(iclk
);
925 spi_master_put(master
);
930 static int spi_qup_pm_suspend_runtime(struct device
*device
)
932 struct spi_master
*master
= dev_get_drvdata(device
);
933 struct spi_qup
*controller
= spi_master_get_devdata(master
);
936 /* Enable clocks auto gaiting */
937 config
= readl(controller
->base
+ QUP_CONFIG
);
938 config
|= QUP_CONFIG_CLOCK_AUTO_GATE
;
939 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
943 static int spi_qup_pm_resume_runtime(struct device
*device
)
945 struct spi_master
*master
= dev_get_drvdata(device
);
946 struct spi_qup
*controller
= spi_master_get_devdata(master
);
949 /* Disable clocks auto gaiting */
950 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
951 config
&= ~QUP_CONFIG_CLOCK_AUTO_GATE
;
952 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
955 #endif /* CONFIG_PM */
957 #ifdef CONFIG_PM_SLEEP
958 static int spi_qup_suspend(struct device
*device
)
960 struct spi_master
*master
= dev_get_drvdata(device
);
961 struct spi_qup
*controller
= spi_master_get_devdata(master
);
964 ret
= spi_master_suspend(master
);
968 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
972 clk_disable_unprepare(controller
->cclk
);
973 clk_disable_unprepare(controller
->iclk
);
977 static int spi_qup_resume(struct device
*device
)
979 struct spi_master
*master
= dev_get_drvdata(device
);
980 struct spi_qup
*controller
= spi_master_get_devdata(master
);
983 ret
= clk_prepare_enable(controller
->iclk
);
987 ret
= clk_prepare_enable(controller
->cclk
);
991 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
995 return spi_master_resume(master
);
997 #endif /* CONFIG_PM_SLEEP */
999 static int spi_qup_remove(struct platform_device
*pdev
)
1001 struct spi_master
*master
= dev_get_drvdata(&pdev
->dev
);
1002 struct spi_qup
*controller
= spi_master_get_devdata(master
);
1005 ret
= pm_runtime_get_sync(&pdev
->dev
);
1009 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
1013 spi_qup_release_dma(master
);
1015 clk_disable_unprepare(controller
->cclk
);
1016 clk_disable_unprepare(controller
->iclk
);
1018 pm_runtime_put_noidle(&pdev
->dev
);
1019 pm_runtime_disable(&pdev
->dev
);
1023 static const struct of_device_id spi_qup_dt_match
[] = {
1024 { .compatible
= "qcom,spi-qup-v1.1.1", },
1025 { .compatible
= "qcom,spi-qup-v2.1.1", },
1026 { .compatible
= "qcom,spi-qup-v2.2.1", },
1029 MODULE_DEVICE_TABLE(of
, spi_qup_dt_match
);
1031 static const struct dev_pm_ops spi_qup_dev_pm_ops
= {
1032 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend
, spi_qup_resume
)
1033 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime
,
1034 spi_qup_pm_resume_runtime
,
1038 static struct platform_driver spi_qup_driver
= {
1041 .pm
= &spi_qup_dev_pm_ops
,
1042 .of_match_table
= spi_qup_dt_match
,
1044 .probe
= spi_qup_probe
,
1045 .remove
= spi_qup_remove
,
1047 module_platform_driver(spi_qup_driver
);
1049 MODULE_LICENSE("GPL v2");
1050 MODULE_ALIAS("platform:spi_qup");