2 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License rev 2 and
6 * only rev 2 as published by the free Software foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/spi/spi.h>
26 #define QUP_CONFIG 0x0000
27 #define QUP_STATE 0x0004
28 #define QUP_IO_M_MODES 0x0008
29 #define QUP_SW_RESET 0x000c
30 #define QUP_OPERATIONAL 0x0018
31 #define QUP_ERROR_FLAGS 0x001c
32 #define QUP_ERROR_FLAGS_EN 0x0020
33 #define QUP_OPERATIONAL_MASK 0x0028
34 #define QUP_HW_VERSION 0x0030
35 #define QUP_MX_OUTPUT_CNT 0x0100
36 #define QUP_OUTPUT_FIFO 0x0110
37 #define QUP_MX_WRITE_CNT 0x0150
38 #define QUP_MX_INPUT_CNT 0x0200
39 #define QUP_MX_READ_CNT 0x0208
40 #define QUP_INPUT_FIFO 0x0218
42 #define SPI_CONFIG 0x0300
43 #define SPI_IO_CONTROL 0x0304
44 #define SPI_ERROR_FLAGS 0x0308
45 #define SPI_ERROR_FLAGS_EN 0x030c
47 /* QUP_CONFIG fields */
48 #define QUP_CONFIG_SPI_MODE (1 << 8)
49 #define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
50 #define QUP_CONFIG_NO_INPUT BIT(7)
51 #define QUP_CONFIG_NO_OUTPUT BIT(6)
52 #define QUP_CONFIG_N 0x001f
54 /* QUP_STATE fields */
55 #define QUP_STATE_VALID BIT(2)
56 #define QUP_STATE_RESET 0
57 #define QUP_STATE_RUN 1
58 #define QUP_STATE_PAUSE 3
59 #define QUP_STATE_MASK 3
60 #define QUP_STATE_CLEAR 2
62 #define QUP_HW_VERSION_2_1_1 0x20010001
64 /* QUP_IO_M_MODES fields */
65 #define QUP_IO_M_PACK_EN BIT(15)
66 #define QUP_IO_M_UNPACK_EN BIT(14)
67 #define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
68 #define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
69 #define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
70 #define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
72 #define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
73 #define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
74 #define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
75 #define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
77 #define QUP_IO_M_MODE_FIFO 0
78 #define QUP_IO_M_MODE_BLOCK 1
79 #define QUP_IO_M_MODE_DMOV 2
80 #define QUP_IO_M_MODE_BAM 3
82 /* QUP_OPERATIONAL fields */
83 #define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
84 #define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
85 #define QUP_OP_IN_SERVICE_FLAG BIT(9)
86 #define QUP_OP_OUT_SERVICE_FLAG BIT(8)
87 #define QUP_OP_IN_FIFO_FULL BIT(7)
88 #define QUP_OP_OUT_FIFO_FULL BIT(6)
89 #define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
90 #define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
92 /* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
93 #define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
94 #define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
95 #define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
96 #define QUP_ERROR_INPUT_OVER_RUN BIT(2)
98 /* SPI_CONFIG fields */
99 #define SPI_CONFIG_HS_MODE BIT(10)
100 #define SPI_CONFIG_INPUT_FIRST BIT(9)
101 #define SPI_CONFIG_LOOPBACK BIT(8)
103 /* SPI_IO_CONTROL fields */
104 #define SPI_IO_C_FORCE_CS BIT(11)
105 #define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
106 #define SPI_IO_C_MX_CS_MODE BIT(8)
107 #define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
108 #define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
109 #define SPI_IO_C_CS_SELECT_MASK 0x000c
110 #define SPI_IO_C_TRISTATE_CS BIT(1)
111 #define SPI_IO_C_NO_TRI_STATE BIT(0)
113 /* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
114 #define SPI_ERROR_CLK_OVER_RUN BIT(1)
115 #define SPI_ERROR_CLK_UNDER_RUN BIT(0)
117 #define SPI_NUM_CHIPSELECTS 4
119 /* high speed mode is when bus rate is greater then 26MHz */
120 #define SPI_HS_MIN_RATE 26000000
121 #define SPI_MAX_RATE 50000000
123 #define SPI_DELAY_THRESHOLD 1
124 #define SPI_DELAY_RETRY 10
129 struct clk
*cclk
; /* core clock */
130 struct clk
*iclk
; /* interface clock */
139 struct spi_transfer
*xfer
;
140 struct completion done
;
142 int w_size
; /* bytes per SPI word */
148 static inline bool spi_qup_is_valid_state(struct spi_qup
*controller
)
150 u32 opstate
= readl_relaxed(controller
->base
+ QUP_STATE
);
152 return opstate
& QUP_STATE_VALID
;
155 static int spi_qup_set_state(struct spi_qup
*controller
, u32 state
)
161 while (!spi_qup_is_valid_state(controller
)) {
163 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
165 if (++loop
> SPI_DELAY_RETRY
)
170 dev_dbg(controller
->dev
, "invalid state for %ld,us %d\n",
173 cur_state
= readl_relaxed(controller
->base
+ QUP_STATE
);
175 * Per spec: for PAUSE_STATE to RESET_STATE, two writes
176 * of (b10) are required
178 if (((cur_state
& QUP_STATE_MASK
) == QUP_STATE_PAUSE
) &&
179 (state
== QUP_STATE_RESET
)) {
180 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
181 writel_relaxed(QUP_STATE_CLEAR
, controller
->base
+ QUP_STATE
);
183 cur_state
&= ~QUP_STATE_MASK
;
185 writel_relaxed(cur_state
, controller
->base
+ QUP_STATE
);
189 while (!spi_qup_is_valid_state(controller
)) {
191 usleep_range(SPI_DELAY_THRESHOLD
, SPI_DELAY_THRESHOLD
* 2);
193 if (++loop
> SPI_DELAY_RETRY
)
201 static void spi_qup_fifo_read(struct spi_qup
*controller
,
202 struct spi_transfer
*xfer
)
204 u8
*rx_buf
= xfer
->rx_buf
;
206 int idx
, shift
, w_size
;
208 w_size
= controller
->w_size
;
210 while (controller
->rx_bytes
< xfer
->len
) {
212 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
213 if (0 == (state
& QUP_OP_IN_FIFO_NOT_EMPTY
))
216 word
= readl_relaxed(controller
->base
+ QUP_INPUT_FIFO
);
219 controller
->rx_bytes
+= w_size
;
223 for (idx
= 0; idx
< w_size
; idx
++, controller
->rx_bytes
++) {
225 * The data format depends on bytes per SPI word:
226 * 4 bytes: 0x12345678
227 * 2 bytes: 0x00001234
228 * 1 byte : 0x00000012
230 shift
= BITS_PER_BYTE
;
231 shift
*= (w_size
- idx
- 1);
232 rx_buf
[controller
->rx_bytes
] = word
>> shift
;
237 static void spi_qup_fifo_write(struct spi_qup
*controller
,
238 struct spi_transfer
*xfer
)
240 const u8
*tx_buf
= xfer
->tx_buf
;
241 u32 word
, state
, data
;
244 w_size
= controller
->w_size
;
246 while (controller
->tx_bytes
< xfer
->len
) {
248 state
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
249 if (state
& QUP_OP_OUT_FIFO_FULL
)
253 for (idx
= 0; idx
< w_size
; idx
++, controller
->tx_bytes
++) {
256 controller
->tx_bytes
+= w_size
;
260 data
= tx_buf
[controller
->tx_bytes
];
261 word
|= data
<< (BITS_PER_BYTE
* (3 - idx
));
264 writel_relaxed(word
, controller
->base
+ QUP_OUTPUT_FIFO
);
268 static irqreturn_t
spi_qup_qup_irq(int irq
, void *dev_id
)
270 struct spi_qup
*controller
= dev_id
;
271 struct spi_transfer
*xfer
;
272 u32 opflags
, qup_err
, spi_err
;
276 spin_lock_irqsave(&controller
->lock
, flags
);
277 xfer
= controller
->xfer
;
278 controller
->xfer
= NULL
;
279 spin_unlock_irqrestore(&controller
->lock
, flags
);
281 qup_err
= readl_relaxed(controller
->base
+ QUP_ERROR_FLAGS
);
282 spi_err
= readl_relaxed(controller
->base
+ SPI_ERROR_FLAGS
);
283 opflags
= readl_relaxed(controller
->base
+ QUP_OPERATIONAL
);
285 writel_relaxed(qup_err
, controller
->base
+ QUP_ERROR_FLAGS
);
286 writel_relaxed(spi_err
, controller
->base
+ SPI_ERROR_FLAGS
);
287 writel_relaxed(opflags
, controller
->base
+ QUP_OPERATIONAL
);
290 dev_err_ratelimited(controller
->dev
, "unexpected irq %08x %08x %08x\n",
291 qup_err
, spi_err
, opflags
);
296 if (qup_err
& QUP_ERROR_OUTPUT_OVER_RUN
)
297 dev_warn(controller
->dev
, "OUTPUT_OVER_RUN\n");
298 if (qup_err
& QUP_ERROR_INPUT_UNDER_RUN
)
299 dev_warn(controller
->dev
, "INPUT_UNDER_RUN\n");
300 if (qup_err
& QUP_ERROR_OUTPUT_UNDER_RUN
)
301 dev_warn(controller
->dev
, "OUTPUT_UNDER_RUN\n");
302 if (qup_err
& QUP_ERROR_INPUT_OVER_RUN
)
303 dev_warn(controller
->dev
, "INPUT_OVER_RUN\n");
309 if (spi_err
& SPI_ERROR_CLK_OVER_RUN
)
310 dev_warn(controller
->dev
, "CLK_OVER_RUN\n");
311 if (spi_err
& SPI_ERROR_CLK_UNDER_RUN
)
312 dev_warn(controller
->dev
, "CLK_UNDER_RUN\n");
317 if (opflags
& QUP_OP_IN_SERVICE_FLAG
)
318 spi_qup_fifo_read(controller
, xfer
);
320 if (opflags
& QUP_OP_OUT_SERVICE_FLAG
)
321 spi_qup_fifo_write(controller
, xfer
);
323 spin_lock_irqsave(&controller
->lock
, flags
);
324 controller
->error
= error
;
325 controller
->xfer
= xfer
;
326 spin_unlock_irqrestore(&controller
->lock
, flags
);
328 if (controller
->rx_bytes
== xfer
->len
|| error
)
329 complete(&controller
->done
);
335 /* set clock freq ... bits per word */
336 static int spi_qup_io_config(struct spi_device
*spi
, struct spi_transfer
*xfer
)
338 struct spi_qup
*controller
= spi_master_get_devdata(spi
->master
);
339 u32 config
, iomode
, mode
;
340 int ret
, n_words
, w_size
;
342 if (spi
->mode
& SPI_LOOP
&& xfer
->len
> controller
->in_fifo_sz
) {
343 dev_err(controller
->dev
, "too big size for loopback %d > %d\n",
344 xfer
->len
, controller
->in_fifo_sz
);
348 ret
= clk_set_rate(controller
->cclk
, xfer
->speed_hz
);
350 dev_err(controller
->dev
, "fail to set frequency %d",
355 if (spi_qup_set_state(controller
, QUP_STATE_RESET
)) {
356 dev_err(controller
->dev
, "cannot set RESET state\n");
361 if (xfer
->bits_per_word
<= 8)
363 else if (xfer
->bits_per_word
<= 16)
366 n_words
= xfer
->len
/ w_size
;
367 controller
->w_size
= w_size
;
369 if (n_words
<= (controller
->in_fifo_sz
/ sizeof(u32
))) {
370 mode
= QUP_IO_M_MODE_FIFO
;
371 writel_relaxed(n_words
, controller
->base
+ QUP_MX_READ_CNT
);
372 writel_relaxed(n_words
, controller
->base
+ QUP_MX_WRITE_CNT
);
373 /* must be zero for FIFO */
374 writel_relaxed(0, controller
->base
+ QUP_MX_INPUT_CNT
);
375 writel_relaxed(0, controller
->base
+ QUP_MX_OUTPUT_CNT
);
377 mode
= QUP_IO_M_MODE_BLOCK
;
378 writel_relaxed(n_words
, controller
->base
+ QUP_MX_INPUT_CNT
);
379 writel_relaxed(n_words
, controller
->base
+ QUP_MX_OUTPUT_CNT
);
380 /* must be zero for BLOCK and BAM */
381 writel_relaxed(0, controller
->base
+ QUP_MX_READ_CNT
);
382 writel_relaxed(0, controller
->base
+ QUP_MX_WRITE_CNT
);
385 iomode
= readl_relaxed(controller
->base
+ QUP_IO_M_MODES
);
386 /* Set input and output transfer mode */
387 iomode
&= ~(QUP_IO_M_INPUT_MODE_MASK
| QUP_IO_M_OUTPUT_MODE_MASK
);
388 iomode
&= ~(QUP_IO_M_PACK_EN
| QUP_IO_M_UNPACK_EN
);
389 iomode
|= (mode
<< QUP_IO_M_OUTPUT_MODE_MASK_SHIFT
);
390 iomode
|= (mode
<< QUP_IO_M_INPUT_MODE_MASK_SHIFT
);
392 writel_relaxed(iomode
, controller
->base
+ QUP_IO_M_MODES
);
394 config
= readl_relaxed(controller
->base
+ SPI_CONFIG
);
396 if (spi
->mode
& SPI_LOOP
)
397 config
|= SPI_CONFIG_LOOPBACK
;
399 config
&= ~SPI_CONFIG_LOOPBACK
;
401 if (spi
->mode
& SPI_CPHA
)
402 config
&= ~SPI_CONFIG_INPUT_FIRST
;
404 config
|= SPI_CONFIG_INPUT_FIRST
;
407 * HS_MODE improves signal stability for spi-clk high rates,
408 * but is invalid in loop back mode.
410 if ((xfer
->speed_hz
>= SPI_HS_MIN_RATE
) && !(spi
->mode
& SPI_LOOP
))
411 config
|= SPI_CONFIG_HS_MODE
;
413 config
&= ~SPI_CONFIG_HS_MODE
;
415 writel_relaxed(config
, controller
->base
+ SPI_CONFIG
);
417 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
418 config
&= ~(QUP_CONFIG_NO_INPUT
| QUP_CONFIG_NO_OUTPUT
| QUP_CONFIG_N
);
419 config
|= xfer
->bits_per_word
- 1;
420 config
|= QUP_CONFIG_SPI_MODE
;
421 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
423 writel_relaxed(0, controller
->base
+ QUP_OPERATIONAL_MASK
);
427 static int spi_qup_transfer_one(struct spi_master
*master
,
428 struct spi_device
*spi
,
429 struct spi_transfer
*xfer
)
431 struct spi_qup
*controller
= spi_master_get_devdata(master
);
432 unsigned long timeout
, flags
;
435 ret
= spi_qup_io_config(spi
, xfer
);
439 timeout
= DIV_ROUND_UP(xfer
->speed_hz
, MSEC_PER_SEC
);
440 timeout
= DIV_ROUND_UP(xfer
->len
* 8, timeout
);
441 timeout
= 100 * msecs_to_jiffies(timeout
);
443 reinit_completion(&controller
->done
);
445 spin_lock_irqsave(&controller
->lock
, flags
);
446 controller
->xfer
= xfer
;
447 controller
->error
= 0;
448 controller
->rx_bytes
= 0;
449 controller
->tx_bytes
= 0;
450 spin_unlock_irqrestore(&controller
->lock
, flags
);
452 if (spi_qup_set_state(controller
, QUP_STATE_RUN
)) {
453 dev_warn(controller
->dev
, "cannot set RUN state\n");
457 if (spi_qup_set_state(controller
, QUP_STATE_PAUSE
)) {
458 dev_warn(controller
->dev
, "cannot set PAUSE state\n");
462 spi_qup_fifo_write(controller
, xfer
);
464 if (spi_qup_set_state(controller
, QUP_STATE_RUN
)) {
465 dev_warn(controller
->dev
, "cannot set EXECUTE state\n");
469 if (!wait_for_completion_timeout(&controller
->done
, timeout
))
472 spi_qup_set_state(controller
, QUP_STATE_RESET
);
473 spin_lock_irqsave(&controller
->lock
, flags
);
474 controller
->xfer
= NULL
;
476 ret
= controller
->error
;
477 spin_unlock_irqrestore(&controller
->lock
, flags
);
481 static int spi_qup_probe(struct platform_device
*pdev
)
483 struct spi_master
*master
;
484 struct clk
*iclk
, *cclk
;
485 struct spi_qup
*controller
;
486 struct resource
*res
;
489 u32 data
, max_freq
, iomode
, num_cs
;
493 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
494 base
= devm_ioremap_resource(dev
, res
);
496 return PTR_ERR(base
);
498 irq
= platform_get_irq(pdev
, 0);
502 cclk
= devm_clk_get(dev
, "core");
504 return PTR_ERR(cclk
);
506 iclk
= devm_clk_get(dev
, "iface");
508 return PTR_ERR(iclk
);
510 /* This is optional parameter */
511 if (of_property_read_u32(dev
->of_node
, "spi-max-frequency", &max_freq
))
512 max_freq
= SPI_MAX_RATE
;
514 if (!max_freq
|| max_freq
> SPI_MAX_RATE
) {
515 dev_err(dev
, "invalid clock frequency %d\n", max_freq
);
519 ret
= clk_prepare_enable(cclk
);
521 dev_err(dev
, "cannot enable core clock\n");
525 ret
= clk_prepare_enable(iclk
);
527 clk_disable_unprepare(cclk
);
528 dev_err(dev
, "cannot enable iface clock\n");
532 data
= readl_relaxed(base
+ QUP_HW_VERSION
);
534 if (data
< QUP_HW_VERSION_2_1_1
) {
535 clk_disable_unprepare(cclk
);
536 clk_disable_unprepare(iclk
);
537 dev_err(dev
, "v.%08x is not supported\n", data
);
541 master
= spi_alloc_master(dev
, sizeof(struct spi_qup
));
543 clk_disable_unprepare(cclk
);
544 clk_disable_unprepare(iclk
);
545 dev_err(dev
, "cannot allocate master\n");
549 /* use num-cs unless not present or out of range */
550 if (of_property_read_u32(dev
->of_node
, "num-cs", &num_cs
) ||
551 num_cs
> SPI_NUM_CHIPSELECTS
)
552 master
->num_chipselect
= SPI_NUM_CHIPSELECTS
;
554 master
->num_chipselect
= num_cs
;
556 master
->bus_num
= pdev
->id
;
557 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
558 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
559 master
->max_speed_hz
= max_freq
;
560 master
->transfer_one
= spi_qup_transfer_one
;
561 master
->dev
.of_node
= pdev
->dev
.of_node
;
562 master
->auto_runtime_pm
= true;
564 platform_set_drvdata(pdev
, master
);
566 controller
= spi_master_get_devdata(master
);
568 controller
->dev
= dev
;
569 controller
->base
= base
;
570 controller
->iclk
= iclk
;
571 controller
->cclk
= cclk
;
572 controller
->irq
= irq
;
574 spin_lock_init(&controller
->lock
);
575 init_completion(&controller
->done
);
577 iomode
= readl_relaxed(base
+ QUP_IO_M_MODES
);
579 size
= QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode
);
581 controller
->out_blk_sz
= size
* 16;
583 controller
->out_blk_sz
= 4;
585 size
= QUP_IO_M_INPUT_BLOCK_SIZE(iomode
);
587 controller
->in_blk_sz
= size
* 16;
589 controller
->in_blk_sz
= 4;
591 size
= QUP_IO_M_OUTPUT_FIFO_SIZE(iomode
);
592 controller
->out_fifo_sz
= controller
->out_blk_sz
* (2 << size
);
594 size
= QUP_IO_M_INPUT_FIFO_SIZE(iomode
);
595 controller
->in_fifo_sz
= controller
->in_blk_sz
* (2 << size
);
597 dev_info(dev
, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
598 data
, controller
->in_blk_sz
, controller
->in_fifo_sz
,
599 controller
->out_blk_sz
, controller
->out_fifo_sz
);
601 writel_relaxed(1, base
+ QUP_SW_RESET
);
603 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
605 dev_err(dev
, "cannot set RESET state\n");
609 writel_relaxed(0, base
+ QUP_OPERATIONAL
);
610 writel_relaxed(0, base
+ QUP_IO_M_MODES
);
611 writel_relaxed(0, base
+ QUP_OPERATIONAL_MASK
);
612 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN
| SPI_ERROR_CLK_OVER_RUN
,
613 base
+ SPI_ERROR_FLAGS_EN
);
615 writel_relaxed(0, base
+ SPI_CONFIG
);
616 writel_relaxed(SPI_IO_C_NO_TRI_STATE
, base
+ SPI_IO_CONTROL
);
618 ret
= devm_request_irq(dev
, irq
, spi_qup_qup_irq
,
619 IRQF_TRIGGER_HIGH
, pdev
->name
, controller
);
623 pm_runtime_set_autosuspend_delay(dev
, MSEC_PER_SEC
);
624 pm_runtime_use_autosuspend(dev
);
625 pm_runtime_set_active(dev
);
626 pm_runtime_enable(dev
);
628 ret
= devm_spi_register_master(dev
, master
);
635 pm_runtime_disable(&pdev
->dev
);
637 clk_disable_unprepare(cclk
);
638 clk_disable_unprepare(iclk
);
639 spi_master_put(master
);
643 #ifdef CONFIG_PM_RUNTIME
644 static int spi_qup_pm_suspend_runtime(struct device
*device
)
646 struct spi_master
*master
= dev_get_drvdata(device
);
647 struct spi_qup
*controller
= spi_master_get_devdata(master
);
650 /* Enable clocks auto gaiting */
651 config
= readl(controller
->base
+ QUP_CONFIG
);
652 config
|= QUP_CONFIG_CLOCK_AUTO_GATE
;
653 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
657 static int spi_qup_pm_resume_runtime(struct device
*device
)
659 struct spi_master
*master
= dev_get_drvdata(device
);
660 struct spi_qup
*controller
= spi_master_get_devdata(master
);
663 /* Disable clocks auto gaiting */
664 config
= readl_relaxed(controller
->base
+ QUP_CONFIG
);
665 config
&= ~QUP_CONFIG_CLOCK_AUTO_GATE
;
666 writel_relaxed(config
, controller
->base
+ QUP_CONFIG
);
669 #endif /* CONFIG_PM_RUNTIME */
671 #ifdef CONFIG_PM_SLEEP
672 static int spi_qup_suspend(struct device
*device
)
674 struct spi_master
*master
= dev_get_drvdata(device
);
675 struct spi_qup
*controller
= spi_master_get_devdata(master
);
678 ret
= spi_master_suspend(master
);
682 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
686 clk_disable_unprepare(controller
->cclk
);
687 clk_disable_unprepare(controller
->iclk
);
691 static int spi_qup_resume(struct device
*device
)
693 struct spi_master
*master
= dev_get_drvdata(device
);
694 struct spi_qup
*controller
= spi_master_get_devdata(master
);
697 ret
= clk_prepare_enable(controller
->iclk
);
701 ret
= clk_prepare_enable(controller
->cclk
);
705 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
709 return spi_master_resume(master
);
711 #endif /* CONFIG_PM_SLEEP */
713 static int spi_qup_remove(struct platform_device
*pdev
)
715 struct spi_master
*master
= dev_get_drvdata(&pdev
->dev
);
716 struct spi_qup
*controller
= spi_master_get_devdata(master
);
719 ret
= pm_runtime_get_sync(&pdev
->dev
);
723 ret
= spi_qup_set_state(controller
, QUP_STATE_RESET
);
727 clk_disable_unprepare(controller
->cclk
);
728 clk_disable_unprepare(controller
->iclk
);
730 pm_runtime_put_noidle(&pdev
->dev
);
731 pm_runtime_disable(&pdev
->dev
);
735 static const struct of_device_id spi_qup_dt_match
[] = {
736 { .compatible
= "qcom,spi-qup-v2.1.1", },
737 { .compatible
= "qcom,spi-qup-v2.2.1", },
740 MODULE_DEVICE_TABLE(of
, spi_qup_dt_match
);
742 static const struct dev_pm_ops spi_qup_dev_pm_ops
= {
743 SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend
, spi_qup_resume
)
744 SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime
,
745 spi_qup_pm_resume_runtime
,
749 static struct platform_driver spi_qup_driver
= {
752 .owner
= THIS_MODULE
,
753 .pm
= &spi_qup_dev_pm_ops
,
754 .of_match_table
= spi_qup_dt_match
,
756 .probe
= spi_qup_probe
,
757 .remove
= spi_qup_remove
,
759 module_platform_driver(spi_qup_driver
);
761 MODULE_LICENSE("GPL v2");
762 MODULE_ALIAS("platform:spi_qup");