2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
26 #define QUP_CONFIG 0x0000
27 #define QUP_STATE 0x0004
28 #define QUP_IO_M_MODES 0x0008
29 #define QUP_SW_RESET 0x000c
30 #define QUP_OPERATIONAL 0x0018
31 #define QUP_ERROR_FLAGS 0x001c
32 #define QUP_ERROR_FLAGS_EN 0x0020
33 #define QUP_OPERATIONAL_MASK 0x0028
34 #define QUP_HW_VERSION 0x0030
35 #define QUP_MX_OUTPUT_CNT 0x0100
36 #define QUP_OUTPUT_FIFO 0x0110
37 #define QUP_MX_WRITE_CNT 0x0150
38 #define QUP_MX_INPUT_CNT 0x0200
39 #define QUP_MX_READ_CNT 0x0208
40 #define QUP_INPUT_FIFO 0x0218
42 #define SPI_CONFIG 0x0300
43 #define SPI_IO_CONTROL 0x0304
44 #define SPI_ERROR_FLAGS 0x0308
45 #define SPI_ERROR_FLAGS_EN 0x030c
47 /* QUP_CONFIG fields */
48 #define QUP_CONFIG_SPI_MODE (1 << 8)
49 #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
50 #define QUP_CONFIG_NO_INPUT BIT(7)
51 #define QUP_CONFIG_NO_OUTPUT BIT(6)
52 #define QUP_CONFIG_N 0x001f
54 /* QUP_STATE fields */
55 #define QUP_STATE_VALID BIT(2)
56 #define QUP_STATE_RESET 0
57 #define QUP_STATE_RUN 1
58 #define QUP_STATE_PAUSE 3
59 #define QUP_STATE_MASK 3
60 #define QUP_STATE_CLEAR 2
62 #define QUP_HW_VERSION_2_1_1 0x20010001
64 /* QUP_IO_M_MODES fields */
65 #define QUP_IO_M_PACK_EN BIT(15)
66 #define QUP_IO_M_UNPACK_EN BIT(14)
67 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
68 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
69 #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
70 #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
72 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
73 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
74 #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
75 #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
77 #define QUP_IO_M_MODE_FIFO 0
78 #define QUP_IO_M_MODE_BLOCK 1
79 #define QUP_IO_M_MODE_DMOV 2
80 #define QUP_IO_M_MODE_BAM 3
82 /* QUP_OPERATIONAL fields */
83 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
84 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
85 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
86 #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
87 #define QUP_OP_IN_FIFO_FULL BIT(7)
88 #define QUP_OP_OUT_FIFO_FULL BIT(6)
89 #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
90 #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
92 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
93 #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
94 #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
95 #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
96 #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
98 /* SPI_CONFIG fields */
99 #define SPI_CONFIG_HS_MODE BIT(10)
100 #define SPI_CONFIG_INPUT_FIRST BIT(9)
101 #define SPI_CONFIG_LOOPBACK BIT(8)
103 /* SPI_IO_CONTROL fields */
104 #define SPI_IO_C_FORCE_CS BIT(11)
105 #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
106 #define SPI_IO_C_MX_CS_MODE BIT(8)
107 #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
108 #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
109 #define SPI_IO_C_CS_SELECT_MASK 0x000c
110 #define SPI_IO_C_TRISTATE_CS BIT(1)
111 #define SPI_IO_C_NO_TRI_STATE BIT(0)
113 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
114 #define SPI_ERROR_CLK_OVER_RUN BIT(1)
115 #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
117 #define SPI_NUM_CHIPSELECTS 4
119 /* high speed mode is when bus rate is greater then 26MHz */
120 #define SPI_HS_MIN_RATE 26000000
121 #define SPI_MAX_RATE 50000000
123 #define SPI_DELAY_THRESHOLD 1
124 #define SPI_DELAY_RETRY 10
129 struct clk
*cclk
; /* core clock */
130 struct clk
*iclk
; /* interface clock */
139 struct spi_transfer
*xfer
;
140 struct completion done
;
142 int w_size
; /* bytes per SPI word */
148 static inline bool spi_qup_is_valid_state(struct spi_qup
*controller
)
150 u32 opstate
= readl_relaxed(controller
->base
+ QUP_STATE
);
152 return opstate
& QUP_STATE_VALID
;
155 static int spi_qup_set_state(struct spi_qup
*controller
, u32 state
)
161 while (!spi_qup_is_valid_state(controller
)) {
163 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
165 if (++loop
> SPI_DELAY_RETRY
)
170 dev_dbg(controller
->dev
, "invalid state for %ld,us %d\n",
173 cur_state
= readl_relaxed(controller
->base
+ QUP_STATE
);
175 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
176 * of (b10) are required
178 if (((cur_state
& QUP_STATE_MASK
) == QUP_STATE_PAUSE
) &&
179 (state
== QUP_STATE_RESET
)) {
180 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
181 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
183 cur_state
&= ~QUP_STATE_MASK
;
185 writel_relaxed(cur_state
, controller
->base
+ QUP_STATE
);
189 while (!spi_qup_is_valid_state(controller
)) {
191 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
193 if (++loop
> SPI_DELAY_RETRY
)
201 static void spi_qup_fifo_read(struct spi_qup
*controller
,
202 struct spi_transfer
*xfer
)
204 u8
*rx_buf
= xfer
->rx_buf
;
206 int idx
, shift
, w_size
;
208 w_size
= controller
->w_size
;
210 while (controller
->rx_bytes
< xfer
->len
) {
212 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
213 if (0 == (state
& QUP_OP_IN_FIFO_NOT_EMPTY
))
216 word
= readl_relaxed(controller
->base
+ QUP_INPUT_FIFO
);
219 controller
->rx_bytes
+= w_size
;
223 for (idx
= 0; idx
< w_size
; idx
++, controller
->rx_bytes
++) {
225 * The data format depends on bytes per SPI word:
226 * 4 bytes: 0x12345678
227 * 2 bytes: 0x00001234
228 * 1 byte : 0x00000012
230 shift
= BITS_PER_BYTE
;
231 shift
*= (w_size
- idx
- 1);
232 rx_buf
[controller
->rx_bytes
] = word
>> shift
;
237 static void spi_qup_fifo_write(struct spi_qup
*controller
,
238 struct spi_transfer
*xfer
)
240 const u8
*tx_buf
= xfer
->tx_buf
;
241 u32 word
, state
, data
;
244 w_size
= controller
->w_size
;
246 while (controller
->tx_bytes
< xfer
->len
) {
248 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
249 if (state
& QUP_OP_OUT_FIFO_FULL
)
253 for (idx
= 0; idx
< w_size
; idx
++, controller
->tx_bytes
++) {
256 controller
->tx_bytes
+= w_size
;
260 data
= tx_buf
[controller
->tx_bytes
];
261 word
|= data
<< (BITS_PER_BYTE
* (3 - idx
));
264 writel_relaxed(word
, controller
->base
+ QUP_OUTPUT_FIFO
);
268 static irqreturn_t
spi_qup_qup_irq(int irq
, void *dev_id
)
270 struct spi_qup
*controller
= dev_id
;
271 struct spi_transfer
*xfer
;
272 u32 opflags
, qup_err
, spi_err
;
276 spin_lock_irqsave(&controller
->lock
, flags
);
277 xfer
= controller
->xfer
;
278 controller
->xfer
= NULL
;
279 spin_unlock_irqrestore(&controller
->lock
, flags
);
281 qup_err
= readl_relaxed(controller
->base
+ QUP_ERROR_FLAGS
);
282 spi_err
= readl_relaxed(controller
->base
+ SPI_ERROR_FLAGS
);
283 opflags
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
285 writel_relaxed(qup_err
, controller
->base
+ QUP_ERROR_FLAGS
);
286 writel_relaxed(spi_err
, controller
->base
+ SPI_ERROR_FLAGS
);
287 writel_relaxed(opflags
, controller
->base
+ QUP_OPERATIONAL
);
290 dev_err_ratelimited(controller
->dev
, "unexpected irq %x08 %x08 %x08\n",
291 qup_err
, spi_err
, opflags
);
296 if (qup_err
& QUP_ERROR_OUTPUT_OVER_RUN
)
297 dev_warn(controller
->dev
, "OUTPUT_OVER_RUN\n");
298 if (qup_err
& QUP_ERROR_INPUT_UNDER_RUN
)
299 dev_warn(controller
->dev
, "INPUT_UNDER_RUN\n");
300 if (qup_err
& QUP_ERROR_OUTPUT_UNDER_RUN
)
301 dev_warn(controller
->dev
, "OUTPUT_UNDER_RUN\n");
302 if (qup_err
& QUP_ERROR_INPUT_OVER_RUN
)
303 dev_warn(controller
->dev
, "INPUT_OVER_RUN\n");
309 if (spi_err
& SPI_ERROR_CLK_OVER_RUN
)
310 dev_warn(controller
->dev
, "CLK_OVER_RUN\n");
311 if (spi_err
& SPI_ERROR_CLK_UNDER_RUN
)
312 dev_warn(controller
->dev
, "CLK_UNDER_RUN\n");
317 if (opflags
& QUP_OP_IN_SERVICE_FLAG
)
318 spi_qup_fifo_read(controller
, xfer
);
320 if (opflags
& QUP_OP_OUT_SERVICE_FLAG
)
321 spi_qup_fifo_write(controller
, xfer
);
323 spin_lock_irqsave(&controller
->lock
, flags
);
324 controller
->error
= error
;
325 controller
->xfer
= xfer
;
326 spin_unlock_irqrestore(&controller
->lock
, flags
);
328 if (controller
->rx_bytes
== xfer
->len
|| error
)
329 complete(&controller
->done
);
335 /* set clock freq ... bits per word */
336 static int spi_qup_io_config(struct spi_device
*spi
, struct spi_transfer
*xfer
)
338 struct spi_qup
*controller
= spi_master_get_devdata(spi
->master
);
339 u32 config
, iomode
, mode
;
340 int ret
, n_words
, w_size
;
342 if (spi
->mode
& SPI_LOOP
&& xfer
->len
> controller
->in_fifo_sz
) {
343 dev_err(controller
->dev
, "too big size for loopback %d > %d\n",
344 xfer
->len
, controller
->in_fifo_sz
);
348 ret
= clk_set_rate(controller
->cclk
, xfer
->speed_hz
);
350 dev_err(controller
->dev
, "fail to set frequency %d",
355 if (spi_qup_set_state(controller
, QUP_STATE_RESET
)) {
356 dev_err(controller
->dev
, "cannot set RESET state\n");
361 if (xfer
->bits_per_word
<= 8)
363 else if (xfer
->bits_per_word
<= 16)
366 n_words
= xfer
->len
/ w_size
;
367 controller
->w_size
= w_size
;
369 if (n_words
<= controller
->in_fifo_sz
) {
370 mode
= QUP_IO_M_MODE_FIFO
;
371 writel_relaxed(n_words
, controller
->base
+ QUP_MX_READ_CNT
);
372 writel_relaxed(n_words
, controller
->base
+ QUP_MX_WRITE_CNT
);
373 /* must be zero for FIFO */
374 writel_relaxed(0, controller
->base
+ QUP_MX_INPUT_CNT
);
375 writel_relaxed(0, controller
->base
+ QUP_MX_OUTPUT_CNT
);
377 mode
= QUP_IO_M_MODE_BLOCK
;
378 writel_relaxed(n_words
, controller
->base
+ QUP_MX_INPUT_CNT
);
379 writel_relaxed(n_words
, controller
->base
+ QUP_MX_OUTPUT_CNT
);
380 /* must be zero for BLOCK and BAM */
381 writel_relaxed(0, controller
->base
+ QUP_MX_READ_CNT
);
382 writel_relaxed(0, controller
->base
+ QUP_MX_WRITE_CNT
);
385 iomode
= readl_relaxed(controller
->base
+ QUP_IO_M_MODES
);
386 /* Set input and output transfer mode */
387 iomode
&= ~(QUP_IO_M_INPUT_MODE_MASK
| QUP_IO_M_OUTPUT_MODE_MASK
);
388 iomode
&= ~(QUP_IO_M_PACK_EN
| QUP_IO_M_UNPACK_EN
);
389 iomode
|= (mode
<< QUP_IO_M_OUTPUT_MODE_MASK_SHIFT
);
390 iomode
|= (mode
<< QUP_IO_M_INPUT_MODE_MASK_SHIFT
);
392 writel_relaxed(iomode
, controller
->base
+ QUP_IO_M_MODES
);
394 config
= readl_relaxed(controller
->base
+ SPI_CONFIG
);
396 if (spi
->mode
& SPI_LOOP
)
397 config
|= SPI_CONFIG_LOOPBACK
;
399 config
&= ~SPI_CONFIG_LOOPBACK
;
401 if (spi
->mode
& SPI_CPHA
)
402 config
&= ~SPI_CONFIG_INPUT_FIRST
;
404 config
|= SPI_CONFIG_INPUT_FIRST
;
407 * HS_MODE improves signal stability for spi-clk high rates,
408 * but is invalid in loop back mode.
410 if ((xfer
->speed_hz
>= SPI_HS_MIN_RATE
) && !(spi
->mode
& SPI_LOOP
))
411 config
|= SPI_CONFIG_HS_MODE
;
413 config
&= ~SPI_CONFIG_HS_MODE
;
415 writel_relaxed(config
, controller
->base
+ SPI_CONFIG
);
417 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
418 config
&= ~(QUP_CONFIG_NO_INPUT
| QUP_CONFIG_NO_OUTPUT
| QUP_CONFIG_N
);
419 config
|= xfer
->bits_per_word
- 1;
420 config
|= QUP_CONFIG_SPI_MODE
;
421 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
423 writel_relaxed(0, controller
->base
+ QUP_OPERATIONAL_MASK
);
427 static void spi_qup_set_cs(struct spi_device
*spi
, bool enable
)
429 struct spi_qup
*controller
= spi_master_get_devdata(spi
->master
);
433 iocontol
= readl_relaxed(controller
->base
+ SPI_IO_CONTROL
);
435 /* Disable auto CS toggle and use manual */
436 iocontol
&= ~SPI_IO_C_MX_CS_MODE
;
437 iocontol
|= SPI_IO_C_FORCE_CS
;
439 iocontol
&= ~SPI_IO_C_CS_SELECT_MASK
;
440 iocontol
|= SPI_IO_C_CS_SELECT(spi
->chip_select
);
442 mask
= SPI_IO_C_CS_N_POLARITY_0
<< spi
->chip_select
;
449 writel_relaxed(iocontol
, controller
->base
+ SPI_IO_CONTROL
);
452 static int spi_qup_transfer_one(struct spi_master
*master
,
453 struct spi_device
*spi
,
454 struct spi_transfer
*xfer
)
456 struct spi_qup
*controller
= spi_master_get_devdata(master
);
457 unsigned long timeout
, flags
;
460 ret
= spi_qup_io_config(spi
, xfer
);
464 timeout
= DIV_ROUND_UP(xfer
->speed_hz
, MSEC_PER_SEC
);
465 timeout
= DIV_ROUND_UP(xfer
->len
* 8, timeout
);
466 timeout
= 100 * msecs_to_jiffies(timeout
);
468 reinit_completion(&controller
->done
);
470 spin_lock_irqsave(&controller
->lock
, flags
);
471 controller
->xfer
= xfer
;
472 controller
->error
= 0;
473 controller
->rx_bytes
= 0;
474 controller
->tx_bytes
= 0;
475 spin_unlock_irqrestore(&controller
->lock
, flags
);
477 if (spi_qup_set_state(controller
, QUP_STATE_RUN
)) {
478 dev_warn(controller
->dev
, "cannot set RUN state\n");
482 if (spi_qup_set_state(controller
, QUP_STATE_PAUSE
)) {
483 dev_warn(controller
->dev
, "cannot set PAUSE state\n");
487 spi_qup_fifo_write(controller
, xfer
);
489 if (spi_qup_set_state(controller
, QUP_STATE_RUN
)) {
490 dev_warn(controller
->dev
, "cannot set EXECUTE state\n");
494 if (!wait_for_completion_timeout(&controller
->done
, timeout
))
497 spi_qup_set_state(controller
, QUP_STATE_RESET
);
498 spin_lock_irqsave(&controller
->lock
, flags
);
499 controller
->xfer
= NULL
;
501 ret
= controller
->error
;
502 spin_unlock_irqrestore(&controller
->lock
, flags
);
506 static int spi_qup_probe(struct platform_device
*pdev
)
508 struct spi_master
*master
;
509 struct clk
*iclk
, *cclk
;
510 struct spi_qup
*controller
;
511 struct resource
*res
;
514 u32 data
, max_freq
, iomode
;
518 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
519 base
= devm_ioremap_resource(dev
, res
);
521 return PTR_ERR(base
);
523 irq
= platform_get_irq(pdev
, 0);
527 cclk
= devm_clk_get(dev
, "core");
529 return PTR_ERR(cclk
);
531 iclk
= devm_clk_get(dev
, "iface");
533 return PTR_ERR(iclk
);
535 /* This is optional parameter */
536 if (of_property_read_u32(dev
->of_node
, "spi-max-frequency", &max_freq
))
537 max_freq
= SPI_MAX_RATE
;
539 if (!max_freq
|| max_freq
> SPI_MAX_RATE
) {
540 dev_err(dev
, "invalid clock frequency %d\n", max_freq
);
544 ret
= clk_prepare_enable(cclk
);
546 dev_err(dev
, "cannot enable core clock\n");
550 ret
= clk_prepare_enable(iclk
);
552 clk_disable_unprepare(cclk
);
553 dev_err(dev
, "cannot enable iface clock\n");
557 data
= readl_relaxed(base
+ QUP_HW_VERSION
);
559 if (data
< QUP_HW_VERSION_2_1_1
) {
560 clk_disable_unprepare(cclk
);
561 clk_disable_unprepare(iclk
);
562 dev_err(dev
, "v.%08x is not supported\n", data
);
566 master
= spi_alloc_master(dev
, sizeof(struct spi_qup
));
568 clk_disable_unprepare(cclk
);
569 clk_disable_unprepare(iclk
);
570 dev_err(dev
, "cannot allocate master\n");
574 master
->bus_num
= pdev
->id
;
575 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
576 master
->num_chipselect
= SPI_NUM_CHIPSELECTS
;
577 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
578 master
->max_speed_hz
= max_freq
;
579 master
->set_cs
= spi_qup_set_cs
;
580 master
->transfer_one
= spi_qup_transfer_one
;
581 master
->dev
.of_node
= pdev
->dev
.of_node
;
582 master
->auto_runtime_pm
= true;
584 platform_set_drvdata(pdev
, master
);
586 controller
= spi_master_get_devdata(master
);
588 controller
->dev
= dev
;
589 controller
->base
= base
;
590 controller
->iclk
= iclk
;
591 controller
->cclk
= cclk
;
592 controller
->irq
= irq
;
594 spin_lock_init(&controller
->lock
);
595 init_completion(&controller
->done
);
597 iomode
= readl_relaxed(base
+ QUP_IO_M_MODES
);
599 size
= QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode
);
601 controller
->out_blk_sz
= size
* 16;
603 controller
->out_blk_sz
= 4;
605 size
= QUP_IO_M_INPUT_BLOCK_SIZE(iomode
);
607 controller
->in_blk_sz
= size
* 16;
609 controller
->in_blk_sz
= 4;
611 size
= QUP_IO_M_OUTPUT_FIFO_SIZE(iomode
);
612 controller
->out_fifo_sz
= controller
->out_blk_sz
* (2 << size
);
614 size
= QUP_IO_M_INPUT_FIFO_SIZE(iomode
);
615 controller
->in_fifo_sz
= controller
->in_blk_sz
* (2 << size
);
617 dev_info(dev
, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
618 data
, controller
->in_blk_sz
, controller
->in_fifo_sz
,
619 controller
->out_blk_sz
, controller
->out_fifo_sz
);
621 writel_relaxed(1, base
+ QUP_SW_RESET
);
623 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
625 dev_err(dev
, "cannot set RESET state\n");
629 writel_relaxed(0, base
+ QUP_OPERATIONAL
);
630 writel_relaxed(0, base
+ QUP_IO_M_MODES
);
631 writel_relaxed(0, base
+ QUP_OPERATIONAL_MASK
);
632 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN
| SPI_ERROR_CLK_OVER_RUN
,
633 base
+ SPI_ERROR_FLAGS_EN
);
635 writel_relaxed(0, base
+ SPI_CONFIG
);
636 writel_relaxed(SPI_IO_C_NO_TRI_STATE
, base
+ SPI_IO_CONTROL
);
638 ret
= devm_request_irq(dev
, irq
, spi_qup_qup_irq
,
639 IRQF_TRIGGER_HIGH
, pdev
->name
, controller
);
643 ret
= devm_spi_register_master(dev
, master
);
647 pm_runtime_set_autosuspend_delay(dev
, MSEC_PER_SEC
);
648 pm_runtime_use_autosuspend(dev
);
649 pm_runtime_set_active(dev
);
650 pm_runtime_enable(dev
);
654 clk_disable_unprepare(cclk
);
655 clk_disable_unprepare(iclk
);
656 spi_master_put(master
);
660 #ifdef CONFIG_PM_RUNTIME
661 static int spi_qup_pm_suspend_runtime(struct device
*device
)
663 struct spi_master
*master
= dev_get_drvdata(device
);
664 struct spi_qup
*controller
= spi_master_get_devdata(master
);
667 /* Enable clocks auto gaiting */
668 config
= readl(controller
->base
+ QUP_CONFIG
);
669 config
|= QUP_CONFIG_CLOCK_AUTO_GATE
;
670 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
674 static int spi_qup_pm_resume_runtime(struct device
*device
)
676 struct spi_master
*master
= dev_get_drvdata(device
);
677 struct spi_qup
*controller
= spi_master_get_devdata(master
);
680 /* Disable clocks auto gaiting */
681 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
682 config
&= ~QUP_CONFIG_CLOCK_AUTO_GATE
;
683 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
686 #endif /* CONFIG_PM_RUNTIME */
688 #ifdef CONFIG_PM_SLEEP
689 static int spi_qup_suspend(struct device
*device
)
691 struct spi_master
*master
= dev_get_drvdata(device
);
692 struct spi_qup
*controller
= spi_master_get_devdata(master
);
695 ret
= spi_master_suspend(master
);
699 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
703 clk_disable_unprepare(controller
->cclk
);
704 clk_disable_unprepare(controller
->iclk
);
708 static int spi_qup_resume(struct device
*device
)
710 struct spi_master
*master
= dev_get_drvdata(device
);
711 struct spi_qup
*controller
= spi_master_get_devdata(master
);
714 ret
= clk_prepare_enable(controller
->iclk
);
718 ret
= clk_prepare_enable(controller
->cclk
);
722 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
726 return spi_master_resume(master
);
728 #endif /* CONFIG_PM_SLEEP */
730 static int spi_qup_remove(struct platform_device
*pdev
)
732 struct spi_master
*master
= dev_get_drvdata(&pdev
->dev
);
733 struct spi_qup
*controller
= spi_master_get_devdata(master
);
736 ret
= pm_runtime_get_sync(&pdev
->dev
);
740 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
744 clk_disable_unprepare(controller
->cclk
);
745 clk_disable_unprepare(controller
->iclk
);
747 pm_runtime_put_noidle(&pdev
->dev
);
748 pm_runtime_disable(&pdev
->dev
);
752 static struct of_device_id spi_qup_dt_match
[] = {
753 { .compatible
= "qcom,spi-qup-v2.1.1", },
754 { .compatible
= "qcom,spi-qup-v2.2.1", },
757 MODULE_DEVICE_TABLE(of
, spi_qup_dt_match
);
759 static const struct dev_pm_ops spi_qup_dev_pm_ops
= {
760 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend
, spi_qup_resume
)
761 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime
,
762 spi_qup_pm_resume_runtime
,
766 static struct platform_driver spi_qup_driver
= {
769 .owner
= THIS_MODULE
,
770 .pm
= &spi_qup_dev_pm_ops
,
771 .of_match_table
= spi_qup_dt_match
,
773 .probe
= spi_qup_probe
,
774 .remove
= spi_qup_remove
,
776 module_platform_driver(spi_qup_driver
);
778 MODULE_LICENSE("GPL v2");
779 MODULE_ALIAS("platform:spi_qup");