2 * OMAP2 McSPI controller driver
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrj�l� <juha.yrjola@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/omap-dma.h>
33 #include <linux/platform_device.h>
34 #include <linux/err.h>
35 #include <linux/clk.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
40 #include <linux/of_device.h>
41 #include <linux/gcd.h>
43 #include <linux/spi/spi.h>
45 #include <linux/platform_data/spi-omap2-mcspi.h>
47 #define OMAP2_MCSPI_MAX_FREQ 48000000
48 #define OMAP2_MCSPI_MAX_FIFODEPTH 64
49 #define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
50 #define SPI_AUTOSUSPEND_TIMEOUT 2000
52 #define OMAP2_MCSPI_REVISION 0x00
53 #define OMAP2_MCSPI_SYSSTATUS 0x14
54 #define OMAP2_MCSPI_IRQSTATUS 0x18
55 #define OMAP2_MCSPI_IRQENABLE 0x1c
56 #define OMAP2_MCSPI_WAKEUPENABLE 0x20
57 #define OMAP2_MCSPI_SYST 0x24
58 #define OMAP2_MCSPI_MODULCTRL 0x28
59 #define OMAP2_MCSPI_XFERLEVEL 0x7c
61 /* per-channel banks, 0x14 bytes each, first is: */
62 #define OMAP2_MCSPI_CHCONF0 0x2c
63 #define OMAP2_MCSPI_CHSTAT0 0x30
64 #define OMAP2_MCSPI_CHCTRL0 0x34
65 #define OMAP2_MCSPI_TX0 0x38
66 #define OMAP2_MCSPI_RX0 0x3c
68 /* per-register bitmasks: */
69 #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
71 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
72 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
73 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
75 #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
76 #define OMAP2_MCSPI_CHCONF_POL BIT(1)
77 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
78 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
79 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
80 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
81 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
82 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
83 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
84 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
85 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
86 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
87 #define OMAP2_MCSPI_CHCONF_IS BIT(18)
88 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
89 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
90 #define OMAP2_MCSPI_CHCONF_FFET BIT(27)
91 #define OMAP2_MCSPI_CHCONF_FFER BIT(28)
93 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
94 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
95 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
96 #define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
98 #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
100 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
102 /* We have 2 DMA channels per CS, one for RX and one for TX */
103 struct omap2_mcspi_dma
{
104 struct dma_chan
*dma_tx
;
105 struct dma_chan
*dma_rx
;
110 struct completion dma_tx_completion
;
111 struct completion dma_rx_completion
;
113 char dma_rx_ch_name
[14];
114 char dma_tx_ch_name
[14];
117 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
118 * cache operations; better heuristics consider wordsize and bitrate.
120 #define DMA_MIN_BYTES 160
124 * Used for context save and restore, structure members to be updated whenever
125 * corresponding registers are modified.
127 struct omap2_mcspi_regs
{
134 struct spi_master
*master
;
135 /* Virtual base address of the controller */
138 /* SPI1 has 4 channels, while SPI2 has 2 */
139 struct omap2_mcspi_dma
*dma_channels
;
141 struct omap2_mcspi_regs ctx
;
143 unsigned int pin_dir
:1;
146 struct omap2_mcspi_cs
{
150 struct list_head node
;
151 /* Context save and restore shadow register */
155 static inline void mcspi_write_reg(struct spi_master
*master
,
158 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
160 writel_relaxed(val
, mcspi
->base
+ idx
);
163 static inline u32
mcspi_read_reg(struct spi_master
*master
, int idx
)
165 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
167 return readl_relaxed(mcspi
->base
+ idx
);
170 static inline void mcspi_write_cs_reg(const struct spi_device
*spi
,
173 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
175 writel_relaxed(val
, cs
->base
+ idx
);
178 static inline u32
mcspi_read_cs_reg(const struct spi_device
*spi
, int idx
)
180 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
182 return readl_relaxed(cs
->base
+ idx
);
185 static inline u32
mcspi_cached_chconf0(const struct spi_device
*spi
)
187 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
192 static inline void mcspi_write_chconf0(const struct spi_device
*spi
, u32 val
)
194 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
197 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCONF0
, val
);
198 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHCONF0
);
201 static inline int mcspi_bytes_per_word(int word_len
)
205 else if (word_len
<= 16)
207 else /* word_len <= 32 */
211 static void omap2_mcspi_set_dma_req(const struct spi_device
*spi
,
212 int is_read
, int enable
)
216 l
= mcspi_cached_chconf0(spi
);
218 if (is_read
) /* 1 is read, 0 write */
219 rw
= OMAP2_MCSPI_CHCONF_DMAR
;
221 rw
= OMAP2_MCSPI_CHCONF_DMAW
;
228 mcspi_write_chconf0(spi
, l
);
231 static void omap2_mcspi_set_enable(const struct spi_device
*spi
, int enable
)
235 l
= enable
? OMAP2_MCSPI_CHCTRL_EN
: 0;
236 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCTRL0
, l
);
237 /* Flash post-writes */
238 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHCTRL0
);
241 static void omap2_mcspi_force_cs(struct spi_device
*spi
, int cs_active
)
245 l
= mcspi_cached_chconf0(spi
);
247 l
|= OMAP2_MCSPI_CHCONF_FORCE
;
249 l
&= ~OMAP2_MCSPI_CHCONF_FORCE
;
251 mcspi_write_chconf0(spi
, l
);
254 static void omap2_mcspi_set_master_mode(struct spi_master
*master
)
256 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
257 struct omap2_mcspi_regs
*ctx
= &mcspi
->ctx
;
261 * Setup when switching from (reset default) slave mode
262 * to single-channel master mode
264 l
= mcspi_read_reg(master
, OMAP2_MCSPI_MODULCTRL
);
265 l
&= ~(OMAP2_MCSPI_MODULCTRL_STEST
| OMAP2_MCSPI_MODULCTRL_MS
);
266 l
|= OMAP2_MCSPI_MODULCTRL_SINGLE
;
267 mcspi_write_reg(master
, OMAP2_MCSPI_MODULCTRL
, l
);
272 static void omap2_mcspi_set_fifo(const struct spi_device
*spi
,
273 struct spi_transfer
*t
, int enable
)
275 struct spi_master
*master
= spi
->master
;
276 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
277 struct omap2_mcspi
*mcspi
;
279 int max_fifo_depth
, fifo_depth
, bytes_per_word
;
280 u32 chconf
, xferlevel
;
282 mcspi
= spi_master_get_devdata(master
);
284 chconf
= mcspi_cached_chconf0(spi
);
286 bytes_per_word
= mcspi_bytes_per_word(cs
->word_len
);
287 if (t
->len
% bytes_per_word
!= 0)
290 if (t
->rx_buf
!= NULL
&& t
->tx_buf
!= NULL
)
291 max_fifo_depth
= OMAP2_MCSPI_MAX_FIFODEPTH
/ 2;
293 max_fifo_depth
= OMAP2_MCSPI_MAX_FIFODEPTH
;
295 fifo_depth
= gcd(t
->len
, max_fifo_depth
);
296 if (fifo_depth
< 2 || fifo_depth
% bytes_per_word
!= 0)
299 wcnt
= t
->len
/ bytes_per_word
;
300 if (wcnt
> OMAP2_MCSPI_MAX_FIFOWCNT
)
303 xferlevel
= wcnt
<< 16;
304 if (t
->rx_buf
!= NULL
) {
305 chconf
|= OMAP2_MCSPI_CHCONF_FFER
;
306 xferlevel
|= (fifo_depth
- 1) << 8;
308 if (t
->tx_buf
!= NULL
) {
309 chconf
|= OMAP2_MCSPI_CHCONF_FFET
;
310 xferlevel
|= fifo_depth
- 1;
313 mcspi_write_reg(master
, OMAP2_MCSPI_XFERLEVEL
, xferlevel
);
314 mcspi_write_chconf0(spi
, chconf
);
315 mcspi
->fifo_depth
= fifo_depth
;
321 if (t
->rx_buf
!= NULL
)
322 chconf
&= ~OMAP2_MCSPI_CHCONF_FFER
;
324 chconf
&= ~OMAP2_MCSPI_CHCONF_FFET
;
326 mcspi_write_chconf0(spi
, chconf
);
327 mcspi
->fifo_depth
= 0;
330 static void omap2_mcspi_restore_ctx(struct omap2_mcspi
*mcspi
)
332 struct spi_master
*spi_cntrl
= mcspi
->master
;
333 struct omap2_mcspi_regs
*ctx
= &mcspi
->ctx
;
334 struct omap2_mcspi_cs
*cs
;
336 /* McSPI: context restore */
337 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_MODULCTRL
, ctx
->modulctrl
);
338 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_WAKEUPENABLE
, ctx
->wakeupenable
);
340 list_for_each_entry(cs
, &ctx
->cs
, node
)
341 writel_relaxed(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
344 static int mcspi_wait_for_reg_bit(void __iomem
*reg
, unsigned long bit
)
346 unsigned long timeout
;
348 timeout
= jiffies
+ msecs_to_jiffies(1000);
349 while (!(readl_relaxed(reg
) & bit
)) {
350 if (time_after(jiffies
, timeout
)) {
351 if (!(readl_relaxed(reg
) & bit
))
361 static void omap2_mcspi_rx_callback(void *data
)
363 struct spi_device
*spi
= data
;
364 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(spi
->master
);
365 struct omap2_mcspi_dma
*mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
367 /* We must disable the DMA RX request */
368 omap2_mcspi_set_dma_req(spi
, 1, 0);
370 complete(&mcspi_dma
->dma_rx_completion
);
373 static void omap2_mcspi_tx_callback(void *data
)
375 struct spi_device
*spi
= data
;
376 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(spi
->master
);
377 struct omap2_mcspi_dma
*mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
379 /* We must disable the DMA TX request */
380 omap2_mcspi_set_dma_req(spi
, 0, 0);
382 complete(&mcspi_dma
->dma_tx_completion
);
385 static void omap2_mcspi_tx_dma(struct spi_device
*spi
,
386 struct spi_transfer
*xfer
,
387 struct dma_slave_config cfg
)
389 struct omap2_mcspi
*mcspi
;
390 struct omap2_mcspi_dma
*mcspi_dma
;
393 mcspi
= spi_master_get_devdata(spi
->master
);
394 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
397 if (mcspi_dma
->dma_tx
) {
398 struct dma_async_tx_descriptor
*tx
;
399 struct scatterlist sg
;
401 dmaengine_slave_config(mcspi_dma
->dma_tx
, &cfg
);
403 sg_init_table(&sg
, 1);
404 sg_dma_address(&sg
) = xfer
->tx_dma
;
405 sg_dma_len(&sg
) = xfer
->len
;
407 tx
= dmaengine_prep_slave_sg(mcspi_dma
->dma_tx
, &sg
, 1,
408 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
410 tx
->callback
= omap2_mcspi_tx_callback
;
411 tx
->callback_param
= spi
;
412 dmaengine_submit(tx
);
414 /* FIXME: fall back to PIO? */
417 dma_async_issue_pending(mcspi_dma
->dma_tx
);
418 omap2_mcspi_set_dma_req(spi
, 0, 1);
423 omap2_mcspi_rx_dma(struct spi_device
*spi
, struct spi_transfer
*xfer
,
424 struct dma_slave_config cfg
,
427 struct omap2_mcspi
*mcspi
;
428 struct omap2_mcspi_dma
*mcspi_dma
;
429 unsigned int count
, dma_count
;
432 int word_len
, element_count
;
433 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
434 mcspi
= spi_master_get_devdata(spi
->master
);
435 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
437 dma_count
= xfer
->len
;
439 if (mcspi
->fifo_depth
== 0)
442 word_len
= cs
->word_len
;
443 l
= mcspi_cached_chconf0(spi
);
446 element_count
= count
;
447 else if (word_len
<= 16)
448 element_count
= count
>> 1;
449 else /* word_len <= 32 */
450 element_count
= count
>> 2;
452 if (mcspi_dma
->dma_rx
) {
453 struct dma_async_tx_descriptor
*tx
;
454 struct scatterlist sg
;
456 dmaengine_slave_config(mcspi_dma
->dma_rx
, &cfg
);
458 if ((l
& OMAP2_MCSPI_CHCONF_TURBO
) && mcspi
->fifo_depth
== 0)
461 sg_init_table(&sg
, 1);
462 sg_dma_address(&sg
) = xfer
->rx_dma
;
463 sg_dma_len(&sg
) = dma_count
;
465 tx
= dmaengine_prep_slave_sg(mcspi_dma
->dma_rx
, &sg
, 1,
466 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
|
469 tx
->callback
= omap2_mcspi_rx_callback
;
470 tx
->callback_param
= spi
;
471 dmaengine_submit(tx
);
473 /* FIXME: fall back to PIO? */
477 dma_async_issue_pending(mcspi_dma
->dma_rx
);
478 omap2_mcspi_set_dma_req(spi
, 1, 1);
480 wait_for_completion(&mcspi_dma
->dma_rx_completion
);
481 dma_unmap_single(mcspi
->dev
, xfer
->rx_dma
, count
,
484 if (mcspi
->fifo_depth
> 0)
487 omap2_mcspi_set_enable(spi
, 0);
489 elements
= element_count
- 1;
491 if (l
& OMAP2_MCSPI_CHCONF_TURBO
) {
494 if (likely(mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHSTAT0
)
495 & OMAP2_MCSPI_CHSTAT_RXS
)) {
498 w
= mcspi_read_cs_reg(spi
, OMAP2_MCSPI_RX0
);
500 ((u8
*)xfer
->rx_buf
)[elements
++] = w
;
501 else if (word_len
<= 16)
502 ((u16
*)xfer
->rx_buf
)[elements
++] = w
;
503 else /* word_len <= 32 */
504 ((u32
*)xfer
->rx_buf
)[elements
++] = w
;
506 int bytes_per_word
= mcspi_bytes_per_word(word_len
);
507 dev_err(&spi
->dev
, "DMA RX penultimate word empty\n");
508 count
-= (bytes_per_word
<< 1);
509 omap2_mcspi_set_enable(spi
, 1);
513 if (likely(mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHSTAT0
)
514 & OMAP2_MCSPI_CHSTAT_RXS
)) {
517 w
= mcspi_read_cs_reg(spi
, OMAP2_MCSPI_RX0
);
519 ((u8
*)xfer
->rx_buf
)[elements
] = w
;
520 else if (word_len
<= 16)
521 ((u16
*)xfer
->rx_buf
)[elements
] = w
;
522 else /* word_len <= 32 */
523 ((u32
*)xfer
->rx_buf
)[elements
] = w
;
525 dev_err(&spi
->dev
, "DMA RX last word empty\n");
526 count
-= mcspi_bytes_per_word(word_len
);
528 omap2_mcspi_set_enable(spi
, 1);
533 omap2_mcspi_txrx_dma(struct spi_device
*spi
, struct spi_transfer
*xfer
)
535 struct omap2_mcspi
*mcspi
;
536 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
537 struct omap2_mcspi_dma
*mcspi_dma
;
542 struct dma_slave_config cfg
;
543 enum dma_slave_buswidth width
;
546 void __iomem
*chstat_reg
;
547 void __iomem
*irqstat_reg
;
550 mcspi
= spi_master_get_devdata(spi
->master
);
551 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
552 l
= mcspi_cached_chconf0(spi
);
555 if (cs
->word_len
<= 8) {
556 width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
558 } else if (cs
->word_len
<= 16) {
559 width
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
562 width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
569 if (mcspi
->fifo_depth
> 0) {
570 if (count
> mcspi
->fifo_depth
)
571 burst
= mcspi
->fifo_depth
/ es
;
576 memset(&cfg
, 0, sizeof(cfg
));
577 cfg
.src_addr
= cs
->phys
+ OMAP2_MCSPI_RX0
;
578 cfg
.dst_addr
= cs
->phys
+ OMAP2_MCSPI_TX0
;
579 cfg
.src_addr_width
= width
;
580 cfg
.dst_addr_width
= width
;
581 cfg
.src_maxburst
= burst
;
582 cfg
.dst_maxburst
= burst
;
588 omap2_mcspi_tx_dma(spi
, xfer
, cfg
);
591 count
= omap2_mcspi_rx_dma(spi
, xfer
, cfg
, es
);
594 wait_for_completion(&mcspi_dma
->dma_tx_completion
);
595 dma_unmap_single(mcspi
->dev
, xfer
->tx_dma
, xfer
->len
,
598 if (mcspi
->fifo_depth
> 0) {
599 irqstat_reg
= mcspi
->base
+ OMAP2_MCSPI_IRQSTATUS
;
601 if (mcspi_wait_for_reg_bit(irqstat_reg
,
602 OMAP2_MCSPI_IRQSTATUS_EOW
) < 0)
603 dev_err(&spi
->dev
, "EOW timed out\n");
605 mcspi_write_reg(mcspi
->master
, OMAP2_MCSPI_IRQSTATUS
,
606 OMAP2_MCSPI_IRQSTATUS_EOW
);
609 /* for TX_ONLY mode, be sure all words have shifted out */
611 chstat_reg
= cs
->base
+ OMAP2_MCSPI_CHSTAT0
;
612 if (mcspi
->fifo_depth
> 0) {
613 wait_res
= mcspi_wait_for_reg_bit(chstat_reg
,
614 OMAP2_MCSPI_CHSTAT_TXFFE
);
616 dev_err(&spi
->dev
, "TXFFE timed out\n");
618 wait_res
= mcspi_wait_for_reg_bit(chstat_reg
,
619 OMAP2_MCSPI_CHSTAT_TXS
);
621 dev_err(&spi
->dev
, "TXS timed out\n");
624 (mcspi_wait_for_reg_bit(chstat_reg
,
625 OMAP2_MCSPI_CHSTAT_EOT
) < 0))
626 dev_err(&spi
->dev
, "EOT timed out\n");
633 omap2_mcspi_txrx_pio(struct spi_device
*spi
, struct spi_transfer
*xfer
)
635 struct omap2_mcspi
*mcspi
;
636 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
637 unsigned int count
, c
;
639 void __iomem
*base
= cs
->base
;
640 void __iomem
*tx_reg
;
641 void __iomem
*rx_reg
;
642 void __iomem
*chstat_reg
;
645 mcspi
= spi_master_get_devdata(spi
->master
);
648 word_len
= cs
->word_len
;
650 l
= mcspi_cached_chconf0(spi
);
652 /* We store the pre-calculated register addresses on stack to speed
653 * up the transfer loop. */
654 tx_reg
= base
+ OMAP2_MCSPI_TX0
;
655 rx_reg
= base
+ OMAP2_MCSPI_RX0
;
656 chstat_reg
= base
+ OMAP2_MCSPI_CHSTAT0
;
658 if (c
< (word_len
>>3))
671 if (mcspi_wait_for_reg_bit(chstat_reg
,
672 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
673 dev_err(&spi
->dev
, "TXS timed out\n");
676 dev_vdbg(&spi
->dev
, "write-%d %02x\n",
678 writel_relaxed(*tx
++, tx_reg
);
681 if (mcspi_wait_for_reg_bit(chstat_reg
,
682 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
683 dev_err(&spi
->dev
, "RXS timed out\n");
687 if (c
== 1 && tx
== NULL
&&
688 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
689 omap2_mcspi_set_enable(spi
, 0);
690 *rx
++ = readl_relaxed(rx_reg
);
691 dev_vdbg(&spi
->dev
, "read-%d %02x\n",
692 word_len
, *(rx
- 1));
693 if (mcspi_wait_for_reg_bit(chstat_reg
,
694 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
700 } else if (c
== 0 && tx
== NULL
) {
701 omap2_mcspi_set_enable(spi
, 0);
704 *rx
++ = readl_relaxed(rx_reg
);
705 dev_vdbg(&spi
->dev
, "read-%d %02x\n",
706 word_len
, *(rx
- 1));
709 } else if (word_len
<= 16) {
718 if (mcspi_wait_for_reg_bit(chstat_reg
,
719 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
720 dev_err(&spi
->dev
, "TXS timed out\n");
723 dev_vdbg(&spi
->dev
, "write-%d %04x\n",
725 writel_relaxed(*tx
++, tx_reg
);
728 if (mcspi_wait_for_reg_bit(chstat_reg
,
729 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
730 dev_err(&spi
->dev
, "RXS timed out\n");
734 if (c
== 2 && tx
== NULL
&&
735 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
736 omap2_mcspi_set_enable(spi
, 0);
737 *rx
++ = readl_relaxed(rx_reg
);
738 dev_vdbg(&spi
->dev
, "read-%d %04x\n",
739 word_len
, *(rx
- 1));
740 if (mcspi_wait_for_reg_bit(chstat_reg
,
741 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
747 } else if (c
== 0 && tx
== NULL
) {
748 omap2_mcspi_set_enable(spi
, 0);
751 *rx
++ = readl_relaxed(rx_reg
);
752 dev_vdbg(&spi
->dev
, "read-%d %04x\n",
753 word_len
, *(rx
- 1));
756 } else if (word_len
<= 32) {
765 if (mcspi_wait_for_reg_bit(chstat_reg
,
766 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
767 dev_err(&spi
->dev
, "TXS timed out\n");
770 dev_vdbg(&spi
->dev
, "write-%d %08x\n",
772 writel_relaxed(*tx
++, tx_reg
);
775 if (mcspi_wait_for_reg_bit(chstat_reg
,
776 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
777 dev_err(&spi
->dev
, "RXS timed out\n");
781 if (c
== 4 && tx
== NULL
&&
782 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
783 omap2_mcspi_set_enable(spi
, 0);
784 *rx
++ = readl_relaxed(rx_reg
);
785 dev_vdbg(&spi
->dev
, "read-%d %08x\n",
786 word_len
, *(rx
- 1));
787 if (mcspi_wait_for_reg_bit(chstat_reg
,
788 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
794 } else if (c
== 0 && tx
== NULL
) {
795 omap2_mcspi_set_enable(spi
, 0);
798 *rx
++ = readl_relaxed(rx_reg
);
799 dev_vdbg(&spi
->dev
, "read-%d %08x\n",
800 word_len
, *(rx
- 1));
805 /* for TX_ONLY mode, be sure all words have shifted out */
806 if (xfer
->rx_buf
== NULL
) {
807 if (mcspi_wait_for_reg_bit(chstat_reg
,
808 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
809 dev_err(&spi
->dev
, "TXS timed out\n");
810 } else if (mcspi_wait_for_reg_bit(chstat_reg
,
811 OMAP2_MCSPI_CHSTAT_EOT
) < 0)
812 dev_err(&spi
->dev
, "EOT timed out\n");
814 /* disable chan to purge rx datas received in TX_ONLY transfer,
815 * otherwise these rx datas will affect the direct following
818 omap2_mcspi_set_enable(spi
, 0);
821 omap2_mcspi_set_enable(spi
, 1);
825 static u32
omap2_mcspi_calc_divisor(u32 speed_hz
)
829 for (div
= 0; div
< 15; div
++)
830 if (speed_hz
>= (OMAP2_MCSPI_MAX_FREQ
>> div
))
836 /* called only when no transfer is active to this device */
837 static int omap2_mcspi_setup_transfer(struct spi_device
*spi
,
838 struct spi_transfer
*t
)
840 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
841 struct omap2_mcspi
*mcspi
;
842 struct spi_master
*spi_cntrl
;
844 u8 word_len
= spi
->bits_per_word
;
845 u32 speed_hz
= spi
->max_speed_hz
;
847 mcspi
= spi_master_get_devdata(spi
->master
);
848 spi_cntrl
= mcspi
->master
;
850 if (t
!= NULL
&& t
->bits_per_word
)
851 word_len
= t
->bits_per_word
;
853 cs
->word_len
= word_len
;
855 if (t
&& t
->speed_hz
)
856 speed_hz
= t
->speed_hz
;
858 speed_hz
= min_t(u32
, speed_hz
, OMAP2_MCSPI_MAX_FREQ
);
859 div
= omap2_mcspi_calc_divisor(speed_hz
);
861 l
= mcspi_cached_chconf0(spi
);
863 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
864 * REVISIT: this controller could support SPI_3WIRE mode.
866 if (mcspi
->pin_dir
== MCSPI_PINDIR_D0_IN_D1_OUT
) {
867 l
&= ~OMAP2_MCSPI_CHCONF_IS
;
868 l
&= ~OMAP2_MCSPI_CHCONF_DPE1
;
869 l
|= OMAP2_MCSPI_CHCONF_DPE0
;
871 l
|= OMAP2_MCSPI_CHCONF_IS
;
872 l
|= OMAP2_MCSPI_CHCONF_DPE1
;
873 l
&= ~OMAP2_MCSPI_CHCONF_DPE0
;
877 l
&= ~OMAP2_MCSPI_CHCONF_WL_MASK
;
878 l
|= (word_len
- 1) << 7;
880 /* set chipselect polarity; manage with FORCE */
881 if (!(spi
->mode
& SPI_CS_HIGH
))
882 l
|= OMAP2_MCSPI_CHCONF_EPOL
; /* active-low; normal */
884 l
&= ~OMAP2_MCSPI_CHCONF_EPOL
;
886 /* set clock divisor */
887 l
&= ~OMAP2_MCSPI_CHCONF_CLKD_MASK
;
890 /* set SPI mode 0..3 */
891 if (spi
->mode
& SPI_CPOL
)
892 l
|= OMAP2_MCSPI_CHCONF_POL
;
894 l
&= ~OMAP2_MCSPI_CHCONF_POL
;
895 if (spi
->mode
& SPI_CPHA
)
896 l
|= OMAP2_MCSPI_CHCONF_PHA
;
898 l
&= ~OMAP2_MCSPI_CHCONF_PHA
;
900 mcspi_write_chconf0(spi
, l
);
902 dev_dbg(&spi
->dev
, "setup: speed %d, sample %s edge, clk %s\n",
903 OMAP2_MCSPI_MAX_FREQ
>> div
,
904 (spi
->mode
& SPI_CPHA
) ? "trailing" : "leading",
905 (spi
->mode
& SPI_CPOL
) ? "inverted" : "normal");
911 * Note that we currently allow DMA only if we get a channel
912 * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
914 static int omap2_mcspi_request_dma(struct spi_device
*spi
)
916 struct spi_master
*master
= spi
->master
;
917 struct omap2_mcspi
*mcspi
;
918 struct omap2_mcspi_dma
*mcspi_dma
;
922 mcspi
= spi_master_get_devdata(master
);
923 mcspi_dma
= mcspi
->dma_channels
+ spi
->chip_select
;
925 init_completion(&mcspi_dma
->dma_rx_completion
);
926 init_completion(&mcspi_dma
->dma_tx_completion
);
929 dma_cap_set(DMA_SLAVE
, mask
);
930 sig
= mcspi_dma
->dma_rx_sync_dev
;
933 dma_request_slave_channel_compat(mask
, omap_dma_filter_fn
,
935 mcspi_dma
->dma_rx_ch_name
);
936 if (!mcspi_dma
->dma_rx
)
939 sig
= mcspi_dma
->dma_tx_sync_dev
;
941 dma_request_slave_channel_compat(mask
, omap_dma_filter_fn
,
943 mcspi_dma
->dma_tx_ch_name
);
945 if (!mcspi_dma
->dma_tx
) {
946 dma_release_channel(mcspi_dma
->dma_rx
);
947 mcspi_dma
->dma_rx
= NULL
;
954 dev_warn(&spi
->dev
, "not using DMA for McSPI\n");
958 static int omap2_mcspi_setup(struct spi_device
*spi
)
961 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(spi
->master
);
962 struct omap2_mcspi_regs
*ctx
= &mcspi
->ctx
;
963 struct omap2_mcspi_dma
*mcspi_dma
;
964 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
966 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
969 cs
= kzalloc(sizeof *cs
, GFP_KERNEL
);
972 cs
->base
= mcspi
->base
+ spi
->chip_select
* 0x14;
973 cs
->phys
= mcspi
->phys
+ spi
->chip_select
* 0x14;
975 spi
->controller_state
= cs
;
976 /* Link this to context save list */
977 list_add_tail(&cs
->node
, &ctx
->cs
);
980 if (!mcspi_dma
->dma_rx
|| !mcspi_dma
->dma_tx
) {
981 ret
= omap2_mcspi_request_dma(spi
);
982 if (ret
< 0 && ret
!= -EAGAIN
)
986 ret
= pm_runtime_get_sync(mcspi
->dev
);
990 ret
= omap2_mcspi_setup_transfer(spi
, NULL
);
991 pm_runtime_mark_last_busy(mcspi
->dev
);
992 pm_runtime_put_autosuspend(mcspi
->dev
);
997 static void omap2_mcspi_cleanup(struct spi_device
*spi
)
999 struct omap2_mcspi
*mcspi
;
1000 struct omap2_mcspi_dma
*mcspi_dma
;
1001 struct omap2_mcspi_cs
*cs
;
1003 mcspi
= spi_master_get_devdata(spi
->master
);
1005 if (spi
->controller_state
) {
1006 /* Unlink controller state from context save list */
1007 cs
= spi
->controller_state
;
1008 list_del(&cs
->node
);
1013 if (spi
->chip_select
< spi
->master
->num_chipselect
) {
1014 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
1016 if (mcspi_dma
->dma_rx
) {
1017 dma_release_channel(mcspi_dma
->dma_rx
);
1018 mcspi_dma
->dma_rx
= NULL
;
1020 if (mcspi_dma
->dma_tx
) {
1021 dma_release_channel(mcspi_dma
->dma_tx
);
1022 mcspi_dma
->dma_tx
= NULL
;
1027 static void omap2_mcspi_work(struct omap2_mcspi
*mcspi
, struct spi_message
*m
)
1030 /* We only enable one channel at a time -- the one whose message is
1031 * -- although this controller would gladly
1032 * arbitrate among multiple channels. This corresponds to "single
1033 * channel" master mode. As a side effect, we need to manage the
1034 * chipselect with the FORCE bit ... CS != channel enable.
1037 struct spi_device
*spi
;
1038 struct spi_transfer
*t
= NULL
;
1039 struct spi_master
*master
;
1040 struct omap2_mcspi_dma
*mcspi_dma
;
1042 struct omap2_mcspi_cs
*cs
;
1043 struct omap2_mcspi_device_config
*cd
;
1044 int par_override
= 0;
1049 master
= spi
->master
;
1050 mcspi_dma
= mcspi
->dma_channels
+ spi
->chip_select
;
1051 cs
= spi
->controller_state
;
1052 cd
= spi
->controller_data
;
1054 omap2_mcspi_set_enable(spi
, 0);
1055 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
1056 if (t
->tx_buf
== NULL
&& t
->rx_buf
== NULL
&& t
->len
) {
1060 if (par_override
|| t
->speed_hz
|| t
->bits_per_word
) {
1062 status
= omap2_mcspi_setup_transfer(spi
, t
);
1065 if (!t
->speed_hz
&& !t
->bits_per_word
)
1068 if (cd
&& cd
->cs_per_word
) {
1069 chconf
= mcspi
->ctx
.modulctrl
;
1070 chconf
&= ~OMAP2_MCSPI_MODULCTRL_SINGLE
;
1071 mcspi_write_reg(master
, OMAP2_MCSPI_MODULCTRL
, chconf
);
1072 mcspi
->ctx
.modulctrl
=
1073 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_MODULCTRL
);
1078 omap2_mcspi_force_cs(spi
, 1);
1082 chconf
= mcspi_cached_chconf0(spi
);
1083 chconf
&= ~OMAP2_MCSPI_CHCONF_TRM_MASK
;
1084 chconf
&= ~OMAP2_MCSPI_CHCONF_TURBO
;
1086 if (t
->tx_buf
== NULL
)
1087 chconf
|= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY
;
1088 else if (t
->rx_buf
== NULL
)
1089 chconf
|= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY
;
1091 if (cd
&& cd
->turbo_mode
&& t
->tx_buf
== NULL
) {
1092 /* Turbo mode is for more than one word */
1093 if (t
->len
> ((cs
->word_len
+ 7) >> 3))
1094 chconf
|= OMAP2_MCSPI_CHCONF_TURBO
;
1097 mcspi_write_chconf0(spi
, chconf
);
1102 if ((mcspi_dma
->dma_rx
&& mcspi_dma
->dma_tx
) &&
1103 (m
->is_dma_mapped
|| t
->len
>= DMA_MIN_BYTES
))
1104 omap2_mcspi_set_fifo(spi
, t
, 1);
1106 omap2_mcspi_set_enable(spi
, 1);
1108 /* RX_ONLY mode needs dummy data in TX reg */
1109 if (t
->tx_buf
== NULL
)
1110 writel_relaxed(0, cs
->base
1113 if ((mcspi_dma
->dma_rx
&& mcspi_dma
->dma_tx
) &&
1114 (m
->is_dma_mapped
|| t
->len
>= DMA_MIN_BYTES
))
1115 count
= omap2_mcspi_txrx_dma(spi
, t
);
1117 count
= omap2_mcspi_txrx_pio(spi
, t
);
1118 m
->actual_length
+= count
;
1120 if (count
!= t
->len
) {
1127 udelay(t
->delay_usecs
);
1129 /* ignore the "leave it on after last xfer" hint */
1131 omap2_mcspi_force_cs(spi
, 0);
1135 omap2_mcspi_set_enable(spi
, 0);
1137 if (mcspi
->fifo_depth
> 0)
1138 omap2_mcspi_set_fifo(spi
, t
, 0);
1140 /* Restore defaults if they were overriden */
1143 status
= omap2_mcspi_setup_transfer(spi
, NULL
);
1147 omap2_mcspi_force_cs(spi
, 0);
1149 if (cd
&& cd
->cs_per_word
) {
1150 chconf
= mcspi
->ctx
.modulctrl
;
1151 chconf
|= OMAP2_MCSPI_MODULCTRL_SINGLE
;
1152 mcspi_write_reg(master
, OMAP2_MCSPI_MODULCTRL
, chconf
);
1153 mcspi
->ctx
.modulctrl
=
1154 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_MODULCTRL
);
1157 omap2_mcspi_set_enable(spi
, 0);
1159 if (mcspi
->fifo_depth
> 0 && t
)
1160 omap2_mcspi_set_fifo(spi
, t
, 0);
1165 static int omap2_mcspi_transfer_one_message(struct spi_master
*master
,
1166 struct spi_message
*m
)
1168 struct spi_device
*spi
;
1169 struct omap2_mcspi
*mcspi
;
1170 struct omap2_mcspi_dma
*mcspi_dma
;
1171 struct spi_transfer
*t
;
1174 mcspi
= spi_master_get_devdata(master
);
1175 mcspi_dma
= mcspi
->dma_channels
+ spi
->chip_select
;
1176 m
->actual_length
= 0;
1179 /* reject invalid messages and transfers */
1180 if (list_empty(&m
->transfers
))
1182 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
1183 const void *tx_buf
= t
->tx_buf
;
1184 void *rx_buf
= t
->rx_buf
;
1185 unsigned len
= t
->len
;
1187 if (t
->speed_hz
> OMAP2_MCSPI_MAX_FREQ
1188 || (len
&& !(rx_buf
|| tx_buf
))) {
1189 dev_dbg(mcspi
->dev
, "transfer: %d Hz, %d %s%s, %d bpw\n",
1197 if (t
->speed_hz
&& t
->speed_hz
< (OMAP2_MCSPI_MAX_FREQ
>> 15)) {
1198 dev_dbg(mcspi
->dev
, "speed_hz %d below minimum %d Hz\n",
1200 OMAP2_MCSPI_MAX_FREQ
>> 15);
1204 if (m
->is_dma_mapped
|| len
< DMA_MIN_BYTES
)
1207 if (mcspi_dma
->dma_tx
&& tx_buf
!= NULL
) {
1208 t
->tx_dma
= dma_map_single(mcspi
->dev
, (void *) tx_buf
,
1209 len
, DMA_TO_DEVICE
);
1210 if (dma_mapping_error(mcspi
->dev
, t
->tx_dma
)) {
1211 dev_dbg(mcspi
->dev
, "dma %cX %d bytes error\n",
1216 if (mcspi_dma
->dma_rx
&& rx_buf
!= NULL
) {
1217 t
->rx_dma
= dma_map_single(mcspi
->dev
, rx_buf
, t
->len
,
1219 if (dma_mapping_error(mcspi
->dev
, t
->rx_dma
)) {
1220 dev_dbg(mcspi
->dev
, "dma %cX %d bytes error\n",
1223 dma_unmap_single(mcspi
->dev
, t
->tx_dma
,
1224 len
, DMA_TO_DEVICE
);
1230 omap2_mcspi_work(mcspi
, m
);
1231 spi_finalize_current_message(master
);
1235 static int omap2_mcspi_master_setup(struct omap2_mcspi
*mcspi
)
1237 struct spi_master
*master
= mcspi
->master
;
1238 struct omap2_mcspi_regs
*ctx
= &mcspi
->ctx
;
1241 ret
= pm_runtime_get_sync(mcspi
->dev
);
1245 mcspi_write_reg(master
, OMAP2_MCSPI_WAKEUPENABLE
,
1246 OMAP2_MCSPI_WAKEUPENABLE_WKEN
);
1247 ctx
->wakeupenable
= OMAP2_MCSPI_WAKEUPENABLE_WKEN
;
1249 omap2_mcspi_set_master_mode(master
);
1250 pm_runtime_mark_last_busy(mcspi
->dev
);
1251 pm_runtime_put_autosuspend(mcspi
->dev
);
1255 static int omap_mcspi_runtime_resume(struct device
*dev
)
1257 struct omap2_mcspi
*mcspi
;
1258 struct spi_master
*master
;
1260 master
= dev_get_drvdata(dev
);
1261 mcspi
= spi_master_get_devdata(master
);
1262 omap2_mcspi_restore_ctx(mcspi
);
1267 static struct omap2_mcspi_platform_config omap2_pdata
= {
1271 static struct omap2_mcspi_platform_config omap4_pdata
= {
1272 .regs_offset
= OMAP4_MCSPI_REG_OFFSET
,
1275 static const struct of_device_id omap_mcspi_of_match
[] = {
1277 .compatible
= "ti,omap2-mcspi",
1278 .data
= &omap2_pdata
,
1281 .compatible
= "ti,omap4-mcspi",
1282 .data
= &omap4_pdata
,
1286 MODULE_DEVICE_TABLE(of
, omap_mcspi_of_match
);
1288 static int omap2_mcspi_probe(struct platform_device
*pdev
)
1290 struct spi_master
*master
;
1291 const struct omap2_mcspi_platform_config
*pdata
;
1292 struct omap2_mcspi
*mcspi
;
1295 u32 regs_offset
= 0;
1296 static int bus_num
= 1;
1297 struct device_node
*node
= pdev
->dev
.of_node
;
1298 const struct of_device_id
*match
;
1300 master
= spi_alloc_master(&pdev
->dev
, sizeof *mcspi
);
1301 if (master
== NULL
) {
1302 dev_dbg(&pdev
->dev
, "master allocation failed\n");
1306 /* the spi->mode bits understood by this driver: */
1307 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
;
1308 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
1309 master
->setup
= omap2_mcspi_setup
;
1310 master
->auto_runtime_pm
= true;
1311 master
->transfer_one_message
= omap2_mcspi_transfer_one_message
;
1312 master
->cleanup
= omap2_mcspi_cleanup
;
1313 master
->dev
.of_node
= node
;
1315 platform_set_drvdata(pdev
, master
);
1317 mcspi
= spi_master_get_devdata(master
);
1318 mcspi
->master
= master
;
1320 match
= of_match_device(omap_mcspi_of_match
, &pdev
->dev
);
1322 u32 num_cs
= 1; /* default number of chipselect */
1323 pdata
= match
->data
;
1325 of_property_read_u32(node
, "ti,spi-num-cs", &num_cs
);
1326 master
->num_chipselect
= num_cs
;
1327 master
->bus_num
= bus_num
++;
1328 if (of_get_property(node
, "ti,pindir-d0-out-d1-in", NULL
))
1329 mcspi
->pin_dir
= MCSPI_PINDIR_D0_OUT_D1_IN
;
1331 pdata
= dev_get_platdata(&pdev
->dev
);
1332 master
->num_chipselect
= pdata
->num_cs
;
1334 master
->bus_num
= pdev
->id
;
1335 mcspi
->pin_dir
= pdata
->pin_dir
;
1337 regs_offset
= pdata
->regs_offset
;
1339 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1345 r
->start
+= regs_offset
;
1346 r
->end
+= regs_offset
;
1347 mcspi
->phys
= r
->start
;
1349 mcspi
->base
= devm_ioremap_resource(&pdev
->dev
, r
);
1350 if (IS_ERR(mcspi
->base
)) {
1351 status
= PTR_ERR(mcspi
->base
);
1355 mcspi
->dev
= &pdev
->dev
;
1357 INIT_LIST_HEAD(&mcspi
->ctx
.cs
);
1359 mcspi
->dma_channels
= kcalloc(master
->num_chipselect
,
1360 sizeof(struct omap2_mcspi_dma
),
1363 if (mcspi
->dma_channels
== NULL
)
1366 for (i
= 0; i
< master
->num_chipselect
; i
++) {
1367 char *dma_rx_ch_name
= mcspi
->dma_channels
[i
].dma_rx_ch_name
;
1368 char *dma_tx_ch_name
= mcspi
->dma_channels
[i
].dma_tx_ch_name
;
1369 struct resource
*dma_res
;
1371 sprintf(dma_rx_ch_name
, "rx%d", i
);
1372 if (!pdev
->dev
.of_node
) {
1374 platform_get_resource_byname(pdev
,
1379 "cannot get DMA RX channel\n");
1384 mcspi
->dma_channels
[i
].dma_rx_sync_dev
=
1387 sprintf(dma_tx_ch_name
, "tx%d", i
);
1388 if (!pdev
->dev
.of_node
) {
1390 platform_get_resource_byname(pdev
,
1395 "cannot get DMA TX channel\n");
1400 mcspi
->dma_channels
[i
].dma_tx_sync_dev
=
1408 pm_runtime_use_autosuspend(&pdev
->dev
);
1409 pm_runtime_set_autosuspend_delay(&pdev
->dev
, SPI_AUTOSUSPEND_TIMEOUT
);
1410 pm_runtime_enable(&pdev
->dev
);
1412 status
= omap2_mcspi_master_setup(mcspi
);
1416 status
= devm_spi_register_master(&pdev
->dev
, master
);
1423 pm_runtime_disable(&pdev
->dev
);
1425 kfree(mcspi
->dma_channels
);
1427 spi_master_put(master
);
1431 static int omap2_mcspi_remove(struct platform_device
*pdev
)
1433 struct spi_master
*master
;
1434 struct omap2_mcspi
*mcspi
;
1435 struct omap2_mcspi_dma
*dma_channels
;
1437 master
= platform_get_drvdata(pdev
);
1438 mcspi
= spi_master_get_devdata(master
);
1439 dma_channels
= mcspi
->dma_channels
;
1441 pm_runtime_put_sync(mcspi
->dev
);
1442 pm_runtime_disable(&pdev
->dev
);
1444 kfree(dma_channels
);
1449 /* work with hotplug and coldplug */
1450 MODULE_ALIAS("platform:omap2_mcspi");
1452 #ifdef CONFIG_SUSPEND
1454 * When SPI wake up from off-mode, CS is in activate state. If it was in
1455 * unactive state when driver was suspend, then force it to unactive state at
1458 static int omap2_mcspi_resume(struct device
*dev
)
1460 struct spi_master
*master
= dev_get_drvdata(dev
);
1461 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
1462 struct omap2_mcspi_regs
*ctx
= &mcspi
->ctx
;
1463 struct omap2_mcspi_cs
*cs
;
1465 pm_runtime_get_sync(mcspi
->dev
);
1466 list_for_each_entry(cs
, &ctx
->cs
, node
) {
1467 if ((cs
->chconf0
& OMAP2_MCSPI_CHCONF_FORCE
) == 0) {
1469 * We need to toggle CS state for OMAP take this
1470 * change in account.
1472 cs
->chconf0
|= OMAP2_MCSPI_CHCONF_FORCE
;
1473 writel_relaxed(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
1474 cs
->chconf0
&= ~OMAP2_MCSPI_CHCONF_FORCE
;
1475 writel_relaxed(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
1478 pm_runtime_mark_last_busy(mcspi
->dev
);
1479 pm_runtime_put_autosuspend(mcspi
->dev
);
1483 #define omap2_mcspi_resume NULL
1486 static const struct dev_pm_ops omap2_mcspi_pm_ops
= {
1487 .resume
= omap2_mcspi_resume
,
1488 .runtime_resume
= omap_mcspi_runtime_resume
,
1491 static struct platform_driver omap2_mcspi_driver
= {
1493 .name
= "omap2_mcspi",
1494 .owner
= THIS_MODULE
,
1495 .pm
= &omap2_mcspi_pm_ops
,
1496 .of_match_table
= omap_mcspi_of_match
,
1498 .probe
= omap2_mcspi_probe
,
1499 .remove
= omap2_mcspi_remove
,
1502 module_platform_driver(omap2_mcspi_driver
);
1503 MODULE_LICENSE("GPL");