2 * OMAP2 McSPI controller driver
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrj�l� <juha.yrjola@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/err.h>
33 #include <linux/clk.h>
35 #include <linux/slab.h>
36 #include <linux/pm_runtime.h>
38 #include <linux/spi/spi.h>
41 #include <plat/clock.h>
42 #include <plat/mcspi.h>
44 #define OMAP2_MCSPI_MAX_FREQ 48000000
46 /* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
47 #define OMAP2_MCSPI_MAX_CTRL 4
49 #define OMAP2_MCSPI_REVISION 0x00
50 #define OMAP2_MCSPI_SYSSTATUS 0x14
51 #define OMAP2_MCSPI_IRQSTATUS 0x18
52 #define OMAP2_MCSPI_IRQENABLE 0x1c
53 #define OMAP2_MCSPI_WAKEUPENABLE 0x20
54 #define OMAP2_MCSPI_SYST 0x24
55 #define OMAP2_MCSPI_MODULCTRL 0x28
57 /* per-channel banks, 0x14 bytes each, first is: */
58 #define OMAP2_MCSPI_CHCONF0 0x2c
59 #define OMAP2_MCSPI_CHSTAT0 0x30
60 #define OMAP2_MCSPI_CHCTRL0 0x34
61 #define OMAP2_MCSPI_TX0 0x38
62 #define OMAP2_MCSPI_RX0 0x3c
64 /* per-register bitmasks: */
66 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
67 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
68 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
70 #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
71 #define OMAP2_MCSPI_CHCONF_POL BIT(1)
72 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
73 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
74 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
75 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
76 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
77 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
78 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
79 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
80 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
81 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
82 #define OMAP2_MCSPI_CHCONF_IS BIT(18)
83 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
84 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
86 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
87 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
88 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
90 #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
92 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
94 /* We have 2 DMA channels per CS, one for RX and one for TX */
95 struct omap2_mcspi_dma
{
102 struct completion dma_tx_completion
;
103 struct completion dma_rx_completion
;
106 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107 * cache operations; better heuristics consider wordsize and bitrate.
109 #define DMA_MIN_BYTES 160
113 struct work_struct work
;
114 /* lock protects queue and registers */
116 struct list_head msg_queue
;
117 struct spi_master
*master
;
118 /* Virtual base address of the controller */
121 /* SPI1 has 4 channels, while SPI2 has 2 */
122 struct omap2_mcspi_dma
*dma_channels
;
126 struct omap2_mcspi_cs
{
130 struct list_head node
;
131 /* Context save and restore shadow register */
135 /* used for context save and restore, structure members to be updated whenever
136 * corresponding registers are modified.
138 struct omap2_mcspi_regs
{
144 static struct omap2_mcspi_regs omap2_mcspi_ctx
[OMAP2_MCSPI_MAX_CTRL
];
146 static struct workqueue_struct
*omap2_mcspi_wq
;
148 #define MOD_REG_BIT(val, mask, set) do { \
155 static inline void mcspi_write_reg(struct spi_master
*master
,
158 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
160 __raw_writel(val
, mcspi
->base
+ idx
);
163 static inline u32
mcspi_read_reg(struct spi_master
*master
, int idx
)
165 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
167 return __raw_readl(mcspi
->base
+ idx
);
170 static inline void mcspi_write_cs_reg(const struct spi_device
*spi
,
173 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
175 __raw_writel(val
, cs
->base
+ idx
);
178 static inline u32
mcspi_read_cs_reg(const struct spi_device
*spi
, int idx
)
180 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
182 return __raw_readl(cs
->base
+ idx
);
185 static inline u32
mcspi_cached_chconf0(const struct spi_device
*spi
)
187 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
192 static inline void mcspi_write_chconf0(const struct spi_device
*spi
, u32 val
)
194 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
197 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCONF0
, val
);
198 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHCONF0
);
201 static void omap2_mcspi_set_dma_req(const struct spi_device
*spi
,
202 int is_read
, int enable
)
206 l
= mcspi_cached_chconf0(spi
);
208 if (is_read
) /* 1 is read, 0 write */
209 rw
= OMAP2_MCSPI_CHCONF_DMAR
;
211 rw
= OMAP2_MCSPI_CHCONF_DMAW
;
213 MOD_REG_BIT(l
, rw
, enable
);
214 mcspi_write_chconf0(spi
, l
);
217 static void omap2_mcspi_set_enable(const struct spi_device
*spi
, int enable
)
221 l
= enable
? OMAP2_MCSPI_CHCTRL_EN
: 0;
222 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCTRL0
, l
);
223 /* Flash post-writes */
224 mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHCTRL0
);
227 static void omap2_mcspi_force_cs(struct spi_device
*spi
, int cs_active
)
231 l
= mcspi_cached_chconf0(spi
);
232 MOD_REG_BIT(l
, OMAP2_MCSPI_CHCONF_FORCE
, cs_active
);
233 mcspi_write_chconf0(spi
, l
);
236 static void omap2_mcspi_set_master_mode(struct spi_master
*master
)
240 /* setup when switching from (reset default) slave mode
241 * to single-channel master mode
243 l
= mcspi_read_reg(master
, OMAP2_MCSPI_MODULCTRL
);
244 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_STEST
, 0);
245 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_MS
, 0);
246 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_SINGLE
, 1);
247 mcspi_write_reg(master
, OMAP2_MCSPI_MODULCTRL
, l
);
249 omap2_mcspi_ctx
[master
->bus_num
- 1].modulctrl
= l
;
252 static void omap2_mcspi_restore_ctx(struct omap2_mcspi
*mcspi
)
254 struct spi_master
*spi_cntrl
;
255 struct omap2_mcspi_cs
*cs
;
256 spi_cntrl
= mcspi
->master
;
258 /* McSPI: context restore */
259 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_MODULCTRL
,
260 omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].modulctrl
);
262 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_WAKEUPENABLE
,
263 omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].wakeupenable
);
265 list_for_each_entry(cs
, &omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].cs
,
267 __raw_writel(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
269 static void omap2_mcspi_disable_clocks(struct omap2_mcspi
*mcspi
)
271 pm_runtime_put_sync(mcspi
->dev
);
274 static int omap2_mcspi_enable_clocks(struct omap2_mcspi
*mcspi
)
276 return pm_runtime_get_sync(mcspi
->dev
);
279 static int mcspi_wait_for_reg_bit(void __iomem
*reg
, unsigned long bit
)
281 unsigned long timeout
;
283 timeout
= jiffies
+ msecs_to_jiffies(1000);
284 while (!(__raw_readl(reg
) & bit
)) {
285 if (time_after(jiffies
, timeout
))
293 omap2_mcspi_txrx_dma(struct spi_device
*spi
, struct spi_transfer
*xfer
)
295 struct omap2_mcspi
*mcspi
;
296 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
297 struct omap2_mcspi_dma
*mcspi_dma
;
298 unsigned int count
, c
;
299 unsigned long base
, tx_reg
, rx_reg
;
300 int word_len
, data_type
, element_count
;
305 void __iomem
*chstat_reg
;
307 mcspi
= spi_master_get_devdata(spi
->master
);
308 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
309 l
= mcspi_cached_chconf0(spi
);
311 chstat_reg
= cs
->base
+ OMAP2_MCSPI_CHSTAT0
;
315 word_len
= cs
->word_len
;
318 tx_reg
= base
+ OMAP2_MCSPI_TX0
;
319 rx_reg
= base
+ OMAP2_MCSPI_RX0
;
324 data_type
= OMAP_DMA_DATA_TYPE_S8
;
325 element_count
= count
;
326 } else if (word_len
<= 16) {
327 data_type
= OMAP_DMA_DATA_TYPE_S16
;
328 element_count
= count
>> 1;
329 } else /* word_len <= 32 */ {
330 data_type
= OMAP_DMA_DATA_TYPE_S32
;
331 element_count
= count
>> 2;
335 omap_set_dma_transfer_params(mcspi_dma
->dma_tx_channel
,
336 data_type
, element_count
, 1,
337 OMAP_DMA_SYNC_ELEMENT
,
338 mcspi_dma
->dma_tx_sync_dev
, 0);
340 omap_set_dma_dest_params(mcspi_dma
->dma_tx_channel
, 0,
341 OMAP_DMA_AMODE_CONSTANT
,
344 omap_set_dma_src_params(mcspi_dma
->dma_tx_channel
, 0,
345 OMAP_DMA_AMODE_POST_INC
,
350 elements
= element_count
- 1;
351 if (l
& OMAP2_MCSPI_CHCONF_TURBO
)
354 omap_set_dma_transfer_params(mcspi_dma
->dma_rx_channel
,
355 data_type
, elements
, 1,
356 OMAP_DMA_SYNC_ELEMENT
,
357 mcspi_dma
->dma_rx_sync_dev
, 1);
359 omap_set_dma_src_params(mcspi_dma
->dma_rx_channel
, 0,
360 OMAP_DMA_AMODE_CONSTANT
,
363 omap_set_dma_dest_params(mcspi_dma
->dma_rx_channel
, 0,
364 OMAP_DMA_AMODE_POST_INC
,
369 omap_start_dma(mcspi_dma
->dma_tx_channel
);
370 omap2_mcspi_set_dma_req(spi
, 0, 1);
374 omap_start_dma(mcspi_dma
->dma_rx_channel
);
375 omap2_mcspi_set_dma_req(spi
, 1, 1);
379 wait_for_completion(&mcspi_dma
->dma_tx_completion
);
380 dma_unmap_single(&spi
->dev
, xfer
->tx_dma
, count
, DMA_TO_DEVICE
);
382 /* for TX_ONLY mode, be sure all words have shifted out */
384 if (mcspi_wait_for_reg_bit(chstat_reg
,
385 OMAP2_MCSPI_CHSTAT_TXS
) < 0)
386 dev_err(&spi
->dev
, "TXS timed out\n");
387 else if (mcspi_wait_for_reg_bit(chstat_reg
,
388 OMAP2_MCSPI_CHSTAT_EOT
) < 0)
389 dev_err(&spi
->dev
, "EOT timed out\n");
394 wait_for_completion(&mcspi_dma
->dma_rx_completion
);
395 dma_unmap_single(&spi
->dev
, xfer
->rx_dma
, count
, DMA_FROM_DEVICE
);
396 omap2_mcspi_set_enable(spi
, 0);
398 if (l
& OMAP2_MCSPI_CHCONF_TURBO
) {
400 if (likely(mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHSTAT0
)
401 & OMAP2_MCSPI_CHSTAT_RXS
)) {
404 w
= mcspi_read_cs_reg(spi
, OMAP2_MCSPI_RX0
);
406 ((u8
*)xfer
->rx_buf
)[elements
++] = w
;
407 else if (word_len
<= 16)
408 ((u16
*)xfer
->rx_buf
)[elements
++] = w
;
409 else /* word_len <= 32 */
410 ((u32
*)xfer
->rx_buf
)[elements
++] = w
;
413 "DMA RX penultimate word empty");
414 count
-= (word_len
<= 8) ? 2 :
415 (word_len
<= 16) ? 4 :
416 /* word_len <= 32 */ 8;
417 omap2_mcspi_set_enable(spi
, 1);
422 if (likely(mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHSTAT0
)
423 & OMAP2_MCSPI_CHSTAT_RXS
)) {
426 w
= mcspi_read_cs_reg(spi
, OMAP2_MCSPI_RX0
);
428 ((u8
*)xfer
->rx_buf
)[elements
] = w
;
429 else if (word_len
<= 16)
430 ((u16
*)xfer
->rx_buf
)[elements
] = w
;
431 else /* word_len <= 32 */
432 ((u32
*)xfer
->rx_buf
)[elements
] = w
;
434 dev_err(&spi
->dev
, "DMA RX last word empty");
435 count
-= (word_len
<= 8) ? 1 :
436 (word_len
<= 16) ? 2 :
437 /* word_len <= 32 */ 4;
439 omap2_mcspi_set_enable(spi
, 1);
445 omap2_mcspi_txrx_pio(struct spi_device
*spi
, struct spi_transfer
*xfer
)
447 struct omap2_mcspi
*mcspi
;
448 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
449 unsigned int count
, c
;
451 void __iomem
*base
= cs
->base
;
452 void __iomem
*tx_reg
;
453 void __iomem
*rx_reg
;
454 void __iomem
*chstat_reg
;
457 mcspi
= spi_master_get_devdata(spi
->master
);
460 word_len
= cs
->word_len
;
462 l
= mcspi_cached_chconf0(spi
);
464 /* We store the pre-calculated register addresses on stack to speed
465 * up the transfer loop. */
466 tx_reg
= base
+ OMAP2_MCSPI_TX0
;
467 rx_reg
= base
+ OMAP2_MCSPI_RX0
;
468 chstat_reg
= base
+ OMAP2_MCSPI_CHSTAT0
;
470 if (c
< (word_len
>>3))
483 if (mcspi_wait_for_reg_bit(chstat_reg
,
484 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
485 dev_err(&spi
->dev
, "TXS timed out\n");
488 dev_vdbg(&spi
->dev
, "write-%d %02x\n",
490 __raw_writel(*tx
++, tx_reg
);
493 if (mcspi_wait_for_reg_bit(chstat_reg
,
494 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
495 dev_err(&spi
->dev
, "RXS timed out\n");
499 if (c
== 1 && tx
== NULL
&&
500 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
501 omap2_mcspi_set_enable(spi
, 0);
502 *rx
++ = __raw_readl(rx_reg
);
503 dev_vdbg(&spi
->dev
, "read-%d %02x\n",
504 word_len
, *(rx
- 1));
505 if (mcspi_wait_for_reg_bit(chstat_reg
,
506 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
512 } else if (c
== 0 && tx
== NULL
) {
513 omap2_mcspi_set_enable(spi
, 0);
516 *rx
++ = __raw_readl(rx_reg
);
517 dev_vdbg(&spi
->dev
, "read-%d %02x\n",
518 word_len
, *(rx
- 1));
521 } else if (word_len
<= 16) {
530 if (mcspi_wait_for_reg_bit(chstat_reg
,
531 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
532 dev_err(&spi
->dev
, "TXS timed out\n");
535 dev_vdbg(&spi
->dev
, "write-%d %04x\n",
537 __raw_writel(*tx
++, tx_reg
);
540 if (mcspi_wait_for_reg_bit(chstat_reg
,
541 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
542 dev_err(&spi
->dev
, "RXS timed out\n");
546 if (c
== 2 && tx
== NULL
&&
547 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
548 omap2_mcspi_set_enable(spi
, 0);
549 *rx
++ = __raw_readl(rx_reg
);
550 dev_vdbg(&spi
->dev
, "read-%d %04x\n",
551 word_len
, *(rx
- 1));
552 if (mcspi_wait_for_reg_bit(chstat_reg
,
553 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
559 } else if (c
== 0 && tx
== NULL
) {
560 omap2_mcspi_set_enable(spi
, 0);
563 *rx
++ = __raw_readl(rx_reg
);
564 dev_vdbg(&spi
->dev
, "read-%d %04x\n",
565 word_len
, *(rx
- 1));
568 } else if (word_len
<= 32) {
577 if (mcspi_wait_for_reg_bit(chstat_reg
,
578 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
579 dev_err(&spi
->dev
, "TXS timed out\n");
582 dev_vdbg(&spi
->dev
, "write-%d %08x\n",
584 __raw_writel(*tx
++, tx_reg
);
587 if (mcspi_wait_for_reg_bit(chstat_reg
,
588 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
589 dev_err(&spi
->dev
, "RXS timed out\n");
593 if (c
== 4 && tx
== NULL
&&
594 (l
& OMAP2_MCSPI_CHCONF_TURBO
)) {
595 omap2_mcspi_set_enable(spi
, 0);
596 *rx
++ = __raw_readl(rx_reg
);
597 dev_vdbg(&spi
->dev
, "read-%d %08x\n",
598 word_len
, *(rx
- 1));
599 if (mcspi_wait_for_reg_bit(chstat_reg
,
600 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
606 } else if (c
== 0 && tx
== NULL
) {
607 omap2_mcspi_set_enable(spi
, 0);
610 *rx
++ = __raw_readl(rx_reg
);
611 dev_vdbg(&spi
->dev
, "read-%d %08x\n",
612 word_len
, *(rx
- 1));
617 /* for TX_ONLY mode, be sure all words have shifted out */
618 if (xfer
->rx_buf
== NULL
) {
619 if (mcspi_wait_for_reg_bit(chstat_reg
,
620 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
621 dev_err(&spi
->dev
, "TXS timed out\n");
622 } else if (mcspi_wait_for_reg_bit(chstat_reg
,
623 OMAP2_MCSPI_CHSTAT_EOT
) < 0)
624 dev_err(&spi
->dev
, "EOT timed out\n");
626 /* disable chan to purge rx datas received in TX_ONLY transfer,
627 * otherwise these rx datas will affect the direct following
630 omap2_mcspi_set_enable(spi
, 0);
633 omap2_mcspi_set_enable(spi
, 1);
637 static u32
omap2_mcspi_calc_divisor(u32 speed_hz
)
641 for (div
= 0; div
< 15; div
++)
642 if (speed_hz
>= (OMAP2_MCSPI_MAX_FREQ
>> div
))
648 /* called only when no transfer is active to this device */
649 static int omap2_mcspi_setup_transfer(struct spi_device
*spi
,
650 struct spi_transfer
*t
)
652 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
653 struct omap2_mcspi
*mcspi
;
654 struct spi_master
*spi_cntrl
;
656 u8 word_len
= spi
->bits_per_word
;
657 u32 speed_hz
= spi
->max_speed_hz
;
659 mcspi
= spi_master_get_devdata(spi
->master
);
660 spi_cntrl
= mcspi
->master
;
662 if (t
!= NULL
&& t
->bits_per_word
)
663 word_len
= t
->bits_per_word
;
665 cs
->word_len
= word_len
;
667 if (t
&& t
->speed_hz
)
668 speed_hz
= t
->speed_hz
;
670 speed_hz
= min_t(u32
, speed_hz
, OMAP2_MCSPI_MAX_FREQ
);
671 div
= omap2_mcspi_calc_divisor(speed_hz
);
673 l
= mcspi_cached_chconf0(spi
);
675 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
676 * REVISIT: this controller could support SPI_3WIRE mode.
678 l
&= ~(OMAP2_MCSPI_CHCONF_IS
|OMAP2_MCSPI_CHCONF_DPE1
);
679 l
|= OMAP2_MCSPI_CHCONF_DPE0
;
682 l
&= ~OMAP2_MCSPI_CHCONF_WL_MASK
;
683 l
|= (word_len
- 1) << 7;
685 /* set chipselect polarity; manage with FORCE */
686 if (!(spi
->mode
& SPI_CS_HIGH
))
687 l
|= OMAP2_MCSPI_CHCONF_EPOL
; /* active-low; normal */
689 l
&= ~OMAP2_MCSPI_CHCONF_EPOL
;
691 /* set clock divisor */
692 l
&= ~OMAP2_MCSPI_CHCONF_CLKD_MASK
;
695 /* set SPI mode 0..3 */
696 if (spi
->mode
& SPI_CPOL
)
697 l
|= OMAP2_MCSPI_CHCONF_POL
;
699 l
&= ~OMAP2_MCSPI_CHCONF_POL
;
700 if (spi
->mode
& SPI_CPHA
)
701 l
|= OMAP2_MCSPI_CHCONF_PHA
;
703 l
&= ~OMAP2_MCSPI_CHCONF_PHA
;
705 mcspi_write_chconf0(spi
, l
);
707 dev_dbg(&spi
->dev
, "setup: speed %d, sample %s edge, clk %s\n",
708 OMAP2_MCSPI_MAX_FREQ
>> div
,
709 (spi
->mode
& SPI_CPHA
) ? "trailing" : "leading",
710 (spi
->mode
& SPI_CPOL
) ? "inverted" : "normal");
715 static void omap2_mcspi_dma_rx_callback(int lch
, u16 ch_status
, void *data
)
717 struct spi_device
*spi
= data
;
718 struct omap2_mcspi
*mcspi
;
719 struct omap2_mcspi_dma
*mcspi_dma
;
721 mcspi
= spi_master_get_devdata(spi
->master
);
722 mcspi_dma
= &(mcspi
->dma_channels
[spi
->chip_select
]);
724 complete(&mcspi_dma
->dma_rx_completion
);
726 /* We must disable the DMA RX request */
727 omap2_mcspi_set_dma_req(spi
, 1, 0);
730 static void omap2_mcspi_dma_tx_callback(int lch
, u16 ch_status
, void *data
)
732 struct spi_device
*spi
= data
;
733 struct omap2_mcspi
*mcspi
;
734 struct omap2_mcspi_dma
*mcspi_dma
;
736 mcspi
= spi_master_get_devdata(spi
->master
);
737 mcspi_dma
= &(mcspi
->dma_channels
[spi
->chip_select
]);
739 complete(&mcspi_dma
->dma_tx_completion
);
741 /* We must disable the DMA TX request */
742 omap2_mcspi_set_dma_req(spi
, 0, 0);
745 static int omap2_mcspi_request_dma(struct spi_device
*spi
)
747 struct spi_master
*master
= spi
->master
;
748 struct omap2_mcspi
*mcspi
;
749 struct omap2_mcspi_dma
*mcspi_dma
;
751 mcspi
= spi_master_get_devdata(master
);
752 mcspi_dma
= mcspi
->dma_channels
+ spi
->chip_select
;
754 if (omap_request_dma(mcspi_dma
->dma_rx_sync_dev
, "McSPI RX",
755 omap2_mcspi_dma_rx_callback
, spi
,
756 &mcspi_dma
->dma_rx_channel
)) {
757 dev_err(&spi
->dev
, "no RX DMA channel for McSPI\n");
761 if (omap_request_dma(mcspi_dma
->dma_tx_sync_dev
, "McSPI TX",
762 omap2_mcspi_dma_tx_callback
, spi
,
763 &mcspi_dma
->dma_tx_channel
)) {
764 omap_free_dma(mcspi_dma
->dma_rx_channel
);
765 mcspi_dma
->dma_rx_channel
= -1;
766 dev_err(&spi
->dev
, "no TX DMA channel for McSPI\n");
770 init_completion(&mcspi_dma
->dma_rx_completion
);
771 init_completion(&mcspi_dma
->dma_tx_completion
);
776 static int omap2_mcspi_setup(struct spi_device
*spi
)
779 struct omap2_mcspi
*mcspi
;
780 struct omap2_mcspi_dma
*mcspi_dma
;
781 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
783 if (spi
->bits_per_word
< 4 || spi
->bits_per_word
> 32) {
784 dev_dbg(&spi
->dev
, "setup: unsupported %d bit words\n",
789 mcspi
= spi_master_get_devdata(spi
->master
);
790 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
793 cs
= kzalloc(sizeof *cs
, GFP_KERNEL
);
796 cs
->base
= mcspi
->base
+ spi
->chip_select
* 0x14;
797 cs
->phys
= mcspi
->phys
+ spi
->chip_select
* 0x14;
799 spi
->controller_state
= cs
;
800 /* Link this to context save list */
801 list_add_tail(&cs
->node
,
802 &omap2_mcspi_ctx
[mcspi
->master
->bus_num
- 1].cs
);
805 if (mcspi_dma
->dma_rx_channel
== -1
806 || mcspi_dma
->dma_tx_channel
== -1) {
807 ret
= omap2_mcspi_request_dma(spi
);
812 ret
= omap2_mcspi_enable_clocks(mcspi
);
816 ret
= omap2_mcspi_setup_transfer(spi
, NULL
);
817 omap2_mcspi_disable_clocks(mcspi
);
822 static void omap2_mcspi_cleanup(struct spi_device
*spi
)
824 struct omap2_mcspi
*mcspi
;
825 struct omap2_mcspi_dma
*mcspi_dma
;
826 struct omap2_mcspi_cs
*cs
;
828 mcspi
= spi_master_get_devdata(spi
->master
);
830 if (spi
->controller_state
) {
831 /* Unlink controller state from context save list */
832 cs
= spi
->controller_state
;
835 kfree(spi
->controller_state
);
838 if (spi
->chip_select
< spi
->master
->num_chipselect
) {
839 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
841 if (mcspi_dma
->dma_rx_channel
!= -1) {
842 omap_free_dma(mcspi_dma
->dma_rx_channel
);
843 mcspi_dma
->dma_rx_channel
= -1;
845 if (mcspi_dma
->dma_tx_channel
!= -1) {
846 omap_free_dma(mcspi_dma
->dma_tx_channel
);
847 mcspi_dma
->dma_tx_channel
= -1;
852 static void omap2_mcspi_work(struct work_struct
*work
)
854 struct omap2_mcspi
*mcspi
;
856 mcspi
= container_of(work
, struct omap2_mcspi
, work
);
858 if (omap2_mcspi_enable_clocks(mcspi
) < 0)
861 spin_lock_irq(&mcspi
->lock
);
863 /* We only enable one channel at a time -- the one whose message is
864 * at the head of the queue -- although this controller would gladly
865 * arbitrate among multiple channels. This corresponds to "single
866 * channel" master mode. As a side effect, we need to manage the
867 * chipselect with the FORCE bit ... CS != channel enable.
869 while (!list_empty(&mcspi
->msg_queue
)) {
870 struct spi_message
*m
;
871 struct spi_device
*spi
;
872 struct spi_transfer
*t
= NULL
;
874 struct omap2_mcspi_cs
*cs
;
875 struct omap2_mcspi_device_config
*cd
;
876 int par_override
= 0;
880 m
= container_of(mcspi
->msg_queue
.next
, struct spi_message
,
883 list_del_init(&m
->queue
);
884 spin_unlock_irq(&mcspi
->lock
);
887 cs
= spi
->controller_state
;
888 cd
= spi
->controller_data
;
890 omap2_mcspi_set_enable(spi
, 1);
891 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
892 if (t
->tx_buf
== NULL
&& t
->rx_buf
== NULL
&& t
->len
) {
896 if (par_override
|| t
->speed_hz
|| t
->bits_per_word
) {
898 status
= omap2_mcspi_setup_transfer(spi
, t
);
901 if (!t
->speed_hz
&& !t
->bits_per_word
)
906 omap2_mcspi_force_cs(spi
, 1);
910 chconf
= mcspi_cached_chconf0(spi
);
911 chconf
&= ~OMAP2_MCSPI_CHCONF_TRM_MASK
;
912 chconf
&= ~OMAP2_MCSPI_CHCONF_TURBO
;
914 if (t
->tx_buf
== NULL
)
915 chconf
|= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY
;
916 else if (t
->rx_buf
== NULL
)
917 chconf
|= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY
;
919 if (cd
&& cd
->turbo_mode
&& t
->tx_buf
== NULL
) {
920 /* Turbo mode is for more than one word */
921 if (t
->len
> ((cs
->word_len
+ 7) >> 3))
922 chconf
|= OMAP2_MCSPI_CHCONF_TURBO
;
925 mcspi_write_chconf0(spi
, chconf
);
930 /* RX_ONLY mode needs dummy data in TX reg */
931 if (t
->tx_buf
== NULL
)
932 __raw_writel(0, cs
->base
935 if (m
->is_dma_mapped
|| t
->len
>= DMA_MIN_BYTES
)
936 count
= omap2_mcspi_txrx_dma(spi
, t
);
938 count
= omap2_mcspi_txrx_pio(spi
, t
);
939 m
->actual_length
+= count
;
941 if (count
!= t
->len
) {
948 udelay(t
->delay_usecs
);
950 /* ignore the "leave it on after last xfer" hint */
952 omap2_mcspi_force_cs(spi
, 0);
957 /* Restore defaults if they were overriden */
960 status
= omap2_mcspi_setup_transfer(spi
, NULL
);
964 omap2_mcspi_force_cs(spi
, 0);
966 omap2_mcspi_set_enable(spi
, 0);
969 m
->complete(m
->context
);
971 spin_lock_irq(&mcspi
->lock
);
974 spin_unlock_irq(&mcspi
->lock
);
976 omap2_mcspi_disable_clocks(mcspi
);
979 static int omap2_mcspi_transfer(struct spi_device
*spi
, struct spi_message
*m
)
981 struct omap2_mcspi
*mcspi
;
983 struct spi_transfer
*t
;
985 m
->actual_length
= 0;
988 /* reject invalid messages and transfers */
989 if (list_empty(&m
->transfers
) || !m
->complete
)
991 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
992 const void *tx_buf
= t
->tx_buf
;
993 void *rx_buf
= t
->rx_buf
;
994 unsigned len
= t
->len
;
996 if (t
->speed_hz
> OMAP2_MCSPI_MAX_FREQ
997 || (len
&& !(rx_buf
|| tx_buf
))
998 || (t
->bits_per_word
&&
999 ( t
->bits_per_word
< 4
1000 || t
->bits_per_word
> 32))) {
1001 dev_dbg(&spi
->dev
, "transfer: %d Hz, %d %s%s, %d bpw\n",
1009 if (t
->speed_hz
&& t
->speed_hz
< (OMAP2_MCSPI_MAX_FREQ
>> 15)) {
1010 dev_dbg(&spi
->dev
, "speed_hz %d below minimum %d Hz\n",
1012 OMAP2_MCSPI_MAX_FREQ
>> 15);
1016 if (m
->is_dma_mapped
|| len
< DMA_MIN_BYTES
)
1019 if (tx_buf
!= NULL
) {
1020 t
->tx_dma
= dma_map_single(&spi
->dev
, (void *) tx_buf
,
1021 len
, DMA_TO_DEVICE
);
1022 if (dma_mapping_error(&spi
->dev
, t
->tx_dma
)) {
1023 dev_dbg(&spi
->dev
, "dma %cX %d bytes error\n",
1028 if (rx_buf
!= NULL
) {
1029 t
->rx_dma
= dma_map_single(&spi
->dev
, rx_buf
, t
->len
,
1031 if (dma_mapping_error(&spi
->dev
, t
->rx_dma
)) {
1032 dev_dbg(&spi
->dev
, "dma %cX %d bytes error\n",
1035 dma_unmap_single(&spi
->dev
, t
->tx_dma
,
1036 len
, DMA_TO_DEVICE
);
1042 mcspi
= spi_master_get_devdata(spi
->master
);
1044 spin_lock_irqsave(&mcspi
->lock
, flags
);
1045 list_add_tail(&m
->queue
, &mcspi
->msg_queue
);
1046 queue_work(omap2_mcspi_wq
, &mcspi
->work
);
1047 spin_unlock_irqrestore(&mcspi
->lock
, flags
);
1052 static int __init
omap2_mcspi_master_setup(struct omap2_mcspi
*mcspi
)
1054 struct spi_master
*master
= mcspi
->master
;
1058 ret
= omap2_mcspi_enable_clocks(mcspi
);
1062 tmp
= OMAP2_MCSPI_WAKEUPENABLE_WKEN
;
1063 mcspi_write_reg(master
, OMAP2_MCSPI_WAKEUPENABLE
, tmp
);
1064 omap2_mcspi_ctx
[master
->bus_num
- 1].wakeupenable
= tmp
;
1066 omap2_mcspi_set_master_mode(master
);
1067 omap2_mcspi_disable_clocks(mcspi
);
1071 static int omap_mcspi_runtime_resume(struct device
*dev
)
1073 struct omap2_mcspi
*mcspi
;
1074 struct spi_master
*master
;
1076 master
= dev_get_drvdata(dev
);
1077 mcspi
= spi_master_get_devdata(master
);
1078 omap2_mcspi_restore_ctx(mcspi
);
1084 static int __init
omap2_mcspi_probe(struct platform_device
*pdev
)
1086 struct spi_master
*master
;
1087 struct omap2_mcspi_platform_config
*pdata
= pdev
->dev
.platform_data
;
1088 struct omap2_mcspi
*mcspi
;
1092 master
= spi_alloc_master(&pdev
->dev
, sizeof *mcspi
);
1093 if (master
== NULL
) {
1094 dev_dbg(&pdev
->dev
, "master allocation failed\n");
1098 /* the spi->mode bits understood by this driver: */
1099 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
;
1102 master
->bus_num
= pdev
->id
;
1104 master
->setup
= omap2_mcspi_setup
;
1105 master
->transfer
= omap2_mcspi_transfer
;
1106 master
->cleanup
= omap2_mcspi_cleanup
;
1107 master
->num_chipselect
= pdata
->num_cs
;
1109 dev_set_drvdata(&pdev
->dev
, master
);
1111 mcspi
= spi_master_get_devdata(master
);
1112 mcspi
->master
= master
;
1114 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1120 r
->start
+= pdata
->regs_offset
;
1121 r
->end
+= pdata
->regs_offset
;
1122 mcspi
->phys
= r
->start
;
1123 if (!request_mem_region(r
->start
, resource_size(r
),
1124 dev_name(&pdev
->dev
))) {
1129 mcspi
->base
= ioremap(r
->start
, resource_size(r
));
1131 dev_dbg(&pdev
->dev
, "can't ioremap MCSPI\n");
1136 mcspi
->dev
= &pdev
->dev
;
1137 INIT_WORK(&mcspi
->work
, omap2_mcspi_work
);
1139 spin_lock_init(&mcspi
->lock
);
1140 INIT_LIST_HEAD(&mcspi
->msg_queue
);
1141 INIT_LIST_HEAD(&omap2_mcspi_ctx
[master
->bus_num
- 1].cs
);
1143 mcspi
->dma_channels
= kcalloc(master
->num_chipselect
,
1144 sizeof(struct omap2_mcspi_dma
),
1147 if (mcspi
->dma_channels
== NULL
)
1150 for (i
= 0; i
< master
->num_chipselect
; i
++) {
1151 char dma_ch_name
[14];
1152 struct resource
*dma_res
;
1154 sprintf(dma_ch_name
, "rx%d", i
);
1155 dma_res
= platform_get_resource_byname(pdev
, IORESOURCE_DMA
,
1158 dev_dbg(&pdev
->dev
, "cannot get DMA RX channel\n");
1163 mcspi
->dma_channels
[i
].dma_rx_channel
= -1;
1164 mcspi
->dma_channels
[i
].dma_rx_sync_dev
= dma_res
->start
;
1165 sprintf(dma_ch_name
, "tx%d", i
);
1166 dma_res
= platform_get_resource_byname(pdev
, IORESOURCE_DMA
,
1169 dev_dbg(&pdev
->dev
, "cannot get DMA TX channel\n");
1174 mcspi
->dma_channels
[i
].dma_tx_channel
= -1;
1175 mcspi
->dma_channels
[i
].dma_tx_sync_dev
= dma_res
->start
;
1178 pm_runtime_enable(&pdev
->dev
);
1180 if (status
|| omap2_mcspi_master_setup(mcspi
) < 0)
1183 status
= spi_register_master(master
);
1190 spi_master_put(master
);
1192 kfree(mcspi
->dma_channels
);
1194 release_mem_region(r
->start
, resource_size(r
));
1195 iounmap(mcspi
->base
);
1200 static int __exit
omap2_mcspi_remove(struct platform_device
*pdev
)
1202 struct spi_master
*master
;
1203 struct omap2_mcspi
*mcspi
;
1204 struct omap2_mcspi_dma
*dma_channels
;
1208 master
= dev_get_drvdata(&pdev
->dev
);
1209 mcspi
= spi_master_get_devdata(master
);
1210 dma_channels
= mcspi
->dma_channels
;
1212 omap2_mcspi_disable_clocks(mcspi
);
1213 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1214 release_mem_region(r
->start
, resource_size(r
));
1217 spi_unregister_master(master
);
1219 kfree(dma_channels
);
1224 /* work with hotplug and coldplug */
1225 MODULE_ALIAS("platform:omap2_mcspi");
1227 #ifdef CONFIG_SUSPEND
1229 * When SPI wake up from off-mode, CS is in activate state. If it was in
1230 * unactive state when driver was suspend, then force it to unactive state at
1233 static int omap2_mcspi_resume(struct device
*dev
)
1235 struct spi_master
*master
= dev_get_drvdata(dev
);
1236 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
1237 struct omap2_mcspi_cs
*cs
;
1239 omap2_mcspi_enable_clocks(mcspi
);
1240 list_for_each_entry(cs
, &omap2_mcspi_ctx
[master
->bus_num
- 1].cs
,
1242 if ((cs
->chconf0
& OMAP2_MCSPI_CHCONF_FORCE
) == 0) {
1245 * We need to toggle CS state for OMAP take this
1246 * change in account.
1248 MOD_REG_BIT(cs
->chconf0
, OMAP2_MCSPI_CHCONF_FORCE
, 1);
1249 __raw_writel(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
1250 MOD_REG_BIT(cs
->chconf0
, OMAP2_MCSPI_CHCONF_FORCE
, 0);
1251 __raw_writel(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
1254 omap2_mcspi_disable_clocks(mcspi
);
1258 #define omap2_mcspi_resume NULL
1261 static const struct dev_pm_ops omap2_mcspi_pm_ops
= {
1262 .resume
= omap2_mcspi_resume
,
1263 .runtime_resume
= omap_mcspi_runtime_resume
,
1266 static struct platform_driver omap2_mcspi_driver
= {
1268 .name
= "omap2_mcspi",
1269 .owner
= THIS_MODULE
,
1270 .pm
= &omap2_mcspi_pm_ops
1272 .remove
= __exit_p(omap2_mcspi_remove
),
1276 static int __init
omap2_mcspi_init(void)
1278 omap2_mcspi_wq
= create_singlethread_workqueue(
1279 omap2_mcspi_driver
.driver
.name
);
1280 if (omap2_mcspi_wq
== NULL
)
1282 return platform_driver_probe(&omap2_mcspi_driver
, omap2_mcspi_probe
);
1284 subsys_initcall(omap2_mcspi_init
);
1286 static void __exit
omap2_mcspi_exit(void)
1288 platform_driver_unregister(&omap2_mcspi_driver
);
1290 destroy_workqueue(omap2_mcspi_wq
);
1292 module_exit(omap2_mcspi_exit
);
1294 MODULE_LICENSE("GPL");