2 * OMAP2 McSPI controller driver
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrjölä <juha.yrjola@nokia.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/interrupt.h>
27 #include <linux/module.h>
28 #include <linux/device.h>
29 #include <linux/delay.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/platform_device.h>
32 #include <linux/err.h>
33 #include <linux/clk.h>
36 #include <linux/spi/spi.h>
39 #include <mach/clock.h>
42 #define OMAP2_MCSPI_MAX_FREQ 48000000
44 /* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */
45 #define OMAP2_MCSPI_MAX_CTRL 4
47 #define OMAP2_MCSPI_REVISION 0x00
48 #define OMAP2_MCSPI_SYSCONFIG 0x10
49 #define OMAP2_MCSPI_SYSSTATUS 0x14
50 #define OMAP2_MCSPI_IRQSTATUS 0x18
51 #define OMAP2_MCSPI_IRQENABLE 0x1c
52 #define OMAP2_MCSPI_WAKEUPENABLE 0x20
53 #define OMAP2_MCSPI_SYST 0x24
54 #define OMAP2_MCSPI_MODULCTRL 0x28
56 /* per-channel banks, 0x14 bytes each, first is: */
57 #define OMAP2_MCSPI_CHCONF0 0x2c
58 #define OMAP2_MCSPI_CHSTAT0 0x30
59 #define OMAP2_MCSPI_CHCTRL0 0x34
60 #define OMAP2_MCSPI_TX0 0x38
61 #define OMAP2_MCSPI_RX0 0x3c
63 /* per-register bitmasks: */
65 #define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4)
66 #define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2)
67 #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0)
68 #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1)
70 #define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
72 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
73 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
74 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
76 #define OMAP2_MCSPI_CHCONF_PHA BIT(0)
77 #define OMAP2_MCSPI_CHCONF_POL BIT(1)
78 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
79 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
80 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
81 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
82 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
83 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
84 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
85 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
86 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
87 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
88 #define OMAP2_MCSPI_CHCONF_IS BIT(18)
89 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
90 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
92 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
93 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
94 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
96 #define OMAP2_MCSPI_CHCTRL_EN BIT(0)
98 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
100 /* We have 2 DMA channels per CS, one for RX and one for TX */
101 struct omap2_mcspi_dma
{
108 struct completion dma_tx_completion
;
109 struct completion dma_rx_completion
;
112 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and
113 * cache operations; better heuristics consider wordsize and bitrate.
115 #define DMA_MIN_BYTES 8
119 struct work_struct work
;
120 /* lock protects queue and registers */
122 struct list_head msg_queue
;
123 struct spi_master
*master
;
126 /* Virtual base address of the controller */
129 /* SPI1 has 4 channels, while SPI2 has 2 */
130 struct omap2_mcspi_dma
*dma_channels
;
133 struct omap2_mcspi_cs
{
137 struct list_head node
;
138 /* Context save and restore shadow register */
142 /* used for context save and restore, structure members to be updated whenever
143 * corresponding registers are modified.
145 struct omap2_mcspi_regs
{
152 static struct omap2_mcspi_regs omap2_mcspi_ctx
[OMAP2_MCSPI_MAX_CTRL
];
154 static struct workqueue_struct
*omap2_mcspi_wq
;
156 #define MOD_REG_BIT(val, mask, set) do { \
163 static inline void mcspi_write_reg(struct spi_master
*master
,
166 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
168 __raw_writel(val
, mcspi
->base
+ idx
);
171 static inline u32
mcspi_read_reg(struct spi_master
*master
, int idx
)
173 struct omap2_mcspi
*mcspi
= spi_master_get_devdata(master
);
175 return __raw_readl(mcspi
->base
+ idx
);
178 static inline void mcspi_write_cs_reg(const struct spi_device
*spi
,
181 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
183 __raw_writel(val
, cs
->base
+ idx
);
186 static inline u32
mcspi_read_cs_reg(const struct spi_device
*spi
, int idx
)
188 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
190 return __raw_readl(cs
->base
+ idx
);
193 static inline u32
mcspi_cached_chconf0(const struct spi_device
*spi
)
195 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
200 static inline void mcspi_write_chconf0(const struct spi_device
*spi
, u32 val
)
202 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
205 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCONF0
, val
);
208 static void omap2_mcspi_set_dma_req(const struct spi_device
*spi
,
209 int is_read
, int enable
)
213 l
= mcspi_cached_chconf0(spi
);
215 if (is_read
) /* 1 is read, 0 write */
216 rw
= OMAP2_MCSPI_CHCONF_DMAR
;
218 rw
= OMAP2_MCSPI_CHCONF_DMAW
;
220 MOD_REG_BIT(l
, rw
, enable
);
221 mcspi_write_chconf0(spi
, l
);
224 static void omap2_mcspi_set_enable(const struct spi_device
*spi
, int enable
)
228 l
= enable
? OMAP2_MCSPI_CHCTRL_EN
: 0;
229 mcspi_write_cs_reg(spi
, OMAP2_MCSPI_CHCTRL0
, l
);
232 static void omap2_mcspi_force_cs(struct spi_device
*spi
, int cs_active
)
236 l
= mcspi_cached_chconf0(spi
);
237 MOD_REG_BIT(l
, OMAP2_MCSPI_CHCONF_FORCE
, cs_active
);
238 mcspi_write_chconf0(spi
, l
);
241 static void omap2_mcspi_set_master_mode(struct spi_master
*master
)
245 /* setup when switching from (reset default) slave mode
246 * to single-channel master mode
248 l
= mcspi_read_reg(master
, OMAP2_MCSPI_MODULCTRL
);
249 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_STEST
, 0);
250 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_MS
, 0);
251 MOD_REG_BIT(l
, OMAP2_MCSPI_MODULCTRL_SINGLE
, 1);
252 mcspi_write_reg(master
, OMAP2_MCSPI_MODULCTRL
, l
);
254 omap2_mcspi_ctx
[master
->bus_num
- 1].modulctrl
= l
;
257 static void omap2_mcspi_restore_ctx(struct omap2_mcspi
*mcspi
)
259 struct spi_master
*spi_cntrl
;
260 struct omap2_mcspi_cs
*cs
;
261 spi_cntrl
= mcspi
->master
;
263 /* McSPI: context restore */
264 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_MODULCTRL
,
265 omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].modulctrl
);
267 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_SYSCONFIG
,
268 omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].sysconfig
);
270 mcspi_write_reg(spi_cntrl
, OMAP2_MCSPI_WAKEUPENABLE
,
271 omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].wakeupenable
);
273 list_for_each_entry(cs
, &omap2_mcspi_ctx
[spi_cntrl
->bus_num
- 1].cs
,
275 __raw_writel(cs
->chconf0
, cs
->base
+ OMAP2_MCSPI_CHCONF0
);
277 static void omap2_mcspi_disable_clocks(struct omap2_mcspi
*mcspi
)
279 clk_disable(mcspi
->ick
);
280 clk_disable(mcspi
->fck
);
283 static int omap2_mcspi_enable_clocks(struct omap2_mcspi
*mcspi
)
285 if (clk_enable(mcspi
->ick
))
287 if (clk_enable(mcspi
->fck
))
290 omap2_mcspi_restore_ctx(mcspi
);
296 omap2_mcspi_txrx_dma(struct spi_device
*spi
, struct spi_transfer
*xfer
)
298 struct omap2_mcspi
*mcspi
;
299 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
300 struct omap2_mcspi_dma
*mcspi_dma
;
301 unsigned int count
, c
;
302 unsigned long base
, tx_reg
, rx_reg
;
303 int word_len
, data_type
, element_count
;
307 mcspi
= spi_master_get_devdata(spi
->master
);
308 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
312 word_len
= cs
->word_len
;
315 tx_reg
= base
+ OMAP2_MCSPI_TX0
;
316 rx_reg
= base
+ OMAP2_MCSPI_RX0
;
321 data_type
= OMAP_DMA_DATA_TYPE_S8
;
322 element_count
= count
;
323 } else if (word_len
<= 16) {
324 data_type
= OMAP_DMA_DATA_TYPE_S16
;
325 element_count
= count
>> 1;
326 } else /* word_len <= 32 */ {
327 data_type
= OMAP_DMA_DATA_TYPE_S32
;
328 element_count
= count
>> 2;
332 omap_set_dma_transfer_params(mcspi_dma
->dma_tx_channel
,
333 data_type
, element_count
, 1,
334 OMAP_DMA_SYNC_ELEMENT
,
335 mcspi_dma
->dma_tx_sync_dev
, 0);
337 omap_set_dma_dest_params(mcspi_dma
->dma_tx_channel
, 0,
338 OMAP_DMA_AMODE_CONSTANT
,
341 omap_set_dma_src_params(mcspi_dma
->dma_tx_channel
, 0,
342 OMAP_DMA_AMODE_POST_INC
,
347 omap_set_dma_transfer_params(mcspi_dma
->dma_rx_channel
,
348 data_type
, element_count
- 1, 1,
349 OMAP_DMA_SYNC_ELEMENT
,
350 mcspi_dma
->dma_rx_sync_dev
, 1);
352 omap_set_dma_src_params(mcspi_dma
->dma_rx_channel
, 0,
353 OMAP_DMA_AMODE_CONSTANT
,
356 omap_set_dma_dest_params(mcspi_dma
->dma_rx_channel
, 0,
357 OMAP_DMA_AMODE_POST_INC
,
362 omap_start_dma(mcspi_dma
->dma_tx_channel
);
363 omap2_mcspi_set_dma_req(spi
, 0, 1);
367 omap_start_dma(mcspi_dma
->dma_rx_channel
);
368 omap2_mcspi_set_dma_req(spi
, 1, 1);
372 wait_for_completion(&mcspi_dma
->dma_tx_completion
);
373 dma_unmap_single(NULL
, xfer
->tx_dma
, count
, DMA_TO_DEVICE
);
377 wait_for_completion(&mcspi_dma
->dma_rx_completion
);
378 dma_unmap_single(NULL
, xfer
->rx_dma
, count
, DMA_FROM_DEVICE
);
379 omap2_mcspi_set_enable(spi
, 0);
380 if (likely(mcspi_read_cs_reg(spi
, OMAP2_MCSPI_CHSTAT0
)
381 & OMAP2_MCSPI_CHSTAT_RXS
)) {
384 w
= mcspi_read_cs_reg(spi
, OMAP2_MCSPI_RX0
);
386 ((u8
*)xfer
->rx_buf
)[element_count
- 1] = w
;
387 else if (word_len
<= 16)
388 ((u16
*)xfer
->rx_buf
)[element_count
- 1] = w
;
389 else /* word_len <= 32 */
390 ((u32
*)xfer
->rx_buf
)[element_count
- 1] = w
;
392 dev_err(&spi
->dev
, "DMA RX last word empty");
393 count
-= (word_len
<= 8) ? 1 :
394 (word_len
<= 16) ? 2 :
395 /* word_len <= 32 */ 4;
397 omap2_mcspi_set_enable(spi
, 1);
402 static int mcspi_wait_for_reg_bit(void __iomem
*reg
, unsigned long bit
)
404 unsigned long timeout
;
406 timeout
= jiffies
+ msecs_to_jiffies(1000);
407 while (!(__raw_readl(reg
) & bit
)) {
408 if (time_after(jiffies
, timeout
))
416 omap2_mcspi_txrx_pio(struct spi_device
*spi
, struct spi_transfer
*xfer
)
418 struct omap2_mcspi
*mcspi
;
419 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
420 unsigned int count
, c
;
422 void __iomem
*base
= cs
->base
;
423 void __iomem
*tx_reg
;
424 void __iomem
*rx_reg
;
425 void __iomem
*chstat_reg
;
428 mcspi
= spi_master_get_devdata(spi
->master
);
431 word_len
= cs
->word_len
;
433 l
= mcspi_cached_chconf0(spi
);
434 l
&= ~OMAP2_MCSPI_CHCONF_TRM_MASK
;
436 /* We store the pre-calculated register addresses on stack to speed
437 * up the transfer loop. */
438 tx_reg
= base
+ OMAP2_MCSPI_TX0
;
439 rx_reg
= base
+ OMAP2_MCSPI_RX0
;
440 chstat_reg
= base
+ OMAP2_MCSPI_CHSTAT0
;
452 if (mcspi_wait_for_reg_bit(chstat_reg
,
453 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
454 dev_err(&spi
->dev
, "TXS timed out\n");
458 dev_dbg(&spi
->dev
, "write-%d %02x\n",
461 __raw_writel(*tx
++, tx_reg
);
464 if (mcspi_wait_for_reg_bit(chstat_reg
,
465 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
466 dev_err(&spi
->dev
, "RXS timed out\n");
469 /* prevent last RX_ONLY read from triggering
470 * more word i/o: switch to rx+tx
472 if (c
== 0 && tx
== NULL
)
473 mcspi_write_chconf0(spi
, l
);
474 *rx
++ = __raw_readl(rx_reg
);
476 dev_dbg(&spi
->dev
, "read-%d %02x\n",
477 word_len
, *(rx
- 1));
481 } else if (word_len
<= 16) {
490 if (mcspi_wait_for_reg_bit(chstat_reg
,
491 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
492 dev_err(&spi
->dev
, "TXS timed out\n");
496 dev_dbg(&spi
->dev
, "write-%d %04x\n",
499 __raw_writel(*tx
++, tx_reg
);
502 if (mcspi_wait_for_reg_bit(chstat_reg
,
503 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
504 dev_err(&spi
->dev
, "RXS timed out\n");
507 /* prevent last RX_ONLY read from triggering
508 * more word i/o: switch to rx+tx
510 if (c
== 0 && tx
== NULL
)
511 mcspi_write_chconf0(spi
, l
);
512 *rx
++ = __raw_readl(rx_reg
);
514 dev_dbg(&spi
->dev
, "read-%d %04x\n",
515 word_len
, *(rx
- 1));
519 } else if (word_len
<= 32) {
528 if (mcspi_wait_for_reg_bit(chstat_reg
,
529 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
530 dev_err(&spi
->dev
, "TXS timed out\n");
534 dev_dbg(&spi
->dev
, "write-%d %04x\n",
537 __raw_writel(*tx
++, tx_reg
);
540 if (mcspi_wait_for_reg_bit(chstat_reg
,
541 OMAP2_MCSPI_CHSTAT_RXS
) < 0) {
542 dev_err(&spi
->dev
, "RXS timed out\n");
545 /* prevent last RX_ONLY read from triggering
546 * more word i/o: switch to rx+tx
548 if (c
== 0 && tx
== NULL
)
549 mcspi_write_chconf0(spi
, l
);
550 *rx
++ = __raw_readl(rx_reg
);
552 dev_dbg(&spi
->dev
, "read-%d %04x\n",
553 word_len
, *(rx
- 1));
559 /* for TX_ONLY mode, be sure all words have shifted out */
560 if (xfer
->rx_buf
== NULL
) {
561 if (mcspi_wait_for_reg_bit(chstat_reg
,
562 OMAP2_MCSPI_CHSTAT_TXS
) < 0) {
563 dev_err(&spi
->dev
, "TXS timed out\n");
564 } else if (mcspi_wait_for_reg_bit(chstat_reg
,
565 OMAP2_MCSPI_CHSTAT_EOT
) < 0)
566 dev_err(&spi
->dev
, "EOT timed out\n");
572 /* called only when no transfer is active to this device */
573 static int omap2_mcspi_setup_transfer(struct spi_device
*spi
,
574 struct spi_transfer
*t
)
576 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
577 struct omap2_mcspi
*mcspi
;
578 struct spi_master
*spi_cntrl
;
580 u8 word_len
= spi
->bits_per_word
;
582 mcspi
= spi_master_get_devdata(spi
->master
);
583 spi_cntrl
= mcspi
->master
;
585 if (t
!= NULL
&& t
->bits_per_word
)
586 word_len
= t
->bits_per_word
;
588 cs
->word_len
= word_len
;
590 if (spi
->max_speed_hz
) {
591 while (div
<= 15 && (OMAP2_MCSPI_MAX_FREQ
/ (1 << div
))
597 l
= mcspi_cached_chconf0(spi
);
599 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
600 * REVISIT: this controller could support SPI_3WIRE mode.
602 l
&= ~(OMAP2_MCSPI_CHCONF_IS
|OMAP2_MCSPI_CHCONF_DPE1
);
603 l
|= OMAP2_MCSPI_CHCONF_DPE0
;
606 l
&= ~OMAP2_MCSPI_CHCONF_WL_MASK
;
607 l
|= (word_len
- 1) << 7;
609 /* set chipselect polarity; manage with FORCE */
610 if (!(spi
->mode
& SPI_CS_HIGH
))
611 l
|= OMAP2_MCSPI_CHCONF_EPOL
; /* active-low; normal */
613 l
&= ~OMAP2_MCSPI_CHCONF_EPOL
;
615 /* set clock divisor */
616 l
&= ~OMAP2_MCSPI_CHCONF_CLKD_MASK
;
619 /* set SPI mode 0..3 */
620 if (spi
->mode
& SPI_CPOL
)
621 l
|= OMAP2_MCSPI_CHCONF_POL
;
623 l
&= ~OMAP2_MCSPI_CHCONF_POL
;
624 if (spi
->mode
& SPI_CPHA
)
625 l
|= OMAP2_MCSPI_CHCONF_PHA
;
627 l
&= ~OMAP2_MCSPI_CHCONF_PHA
;
629 mcspi_write_chconf0(spi
, l
);
631 dev_dbg(&spi
->dev
, "setup: speed %d, sample %s edge, clk %s\n",
632 OMAP2_MCSPI_MAX_FREQ
/ (1 << div
),
633 (spi
->mode
& SPI_CPHA
) ? "trailing" : "leading",
634 (spi
->mode
& SPI_CPOL
) ? "inverted" : "normal");
639 static void omap2_mcspi_dma_rx_callback(int lch
, u16 ch_status
, void *data
)
641 struct spi_device
*spi
= data
;
642 struct omap2_mcspi
*mcspi
;
643 struct omap2_mcspi_dma
*mcspi_dma
;
645 mcspi
= spi_master_get_devdata(spi
->master
);
646 mcspi_dma
= &(mcspi
->dma_channels
[spi
->chip_select
]);
648 complete(&mcspi_dma
->dma_rx_completion
);
650 /* We must disable the DMA RX request */
651 omap2_mcspi_set_dma_req(spi
, 1, 0);
654 static void omap2_mcspi_dma_tx_callback(int lch
, u16 ch_status
, void *data
)
656 struct spi_device
*spi
= data
;
657 struct omap2_mcspi
*mcspi
;
658 struct omap2_mcspi_dma
*mcspi_dma
;
660 mcspi
= spi_master_get_devdata(spi
->master
);
661 mcspi_dma
= &(mcspi
->dma_channels
[spi
->chip_select
]);
663 complete(&mcspi_dma
->dma_tx_completion
);
665 /* We must disable the DMA TX request */
666 omap2_mcspi_set_dma_req(spi
, 0, 0);
669 static int omap2_mcspi_request_dma(struct spi_device
*spi
)
671 struct spi_master
*master
= spi
->master
;
672 struct omap2_mcspi
*mcspi
;
673 struct omap2_mcspi_dma
*mcspi_dma
;
675 mcspi
= spi_master_get_devdata(master
);
676 mcspi_dma
= mcspi
->dma_channels
+ spi
->chip_select
;
678 if (omap_request_dma(mcspi_dma
->dma_rx_sync_dev
, "McSPI RX",
679 omap2_mcspi_dma_rx_callback
, spi
,
680 &mcspi_dma
->dma_rx_channel
)) {
681 dev_err(&spi
->dev
, "no RX DMA channel for McSPI\n");
685 if (omap_request_dma(mcspi_dma
->dma_tx_sync_dev
, "McSPI TX",
686 omap2_mcspi_dma_tx_callback
, spi
,
687 &mcspi_dma
->dma_tx_channel
)) {
688 omap_free_dma(mcspi_dma
->dma_rx_channel
);
689 mcspi_dma
->dma_rx_channel
= -1;
690 dev_err(&spi
->dev
, "no TX DMA channel for McSPI\n");
694 init_completion(&mcspi_dma
->dma_rx_completion
);
695 init_completion(&mcspi_dma
->dma_tx_completion
);
700 static int omap2_mcspi_setup(struct spi_device
*spi
)
703 struct omap2_mcspi
*mcspi
;
704 struct omap2_mcspi_dma
*mcspi_dma
;
705 struct omap2_mcspi_cs
*cs
= spi
->controller_state
;
707 if (spi
->bits_per_word
< 4 || spi
->bits_per_word
> 32) {
708 dev_dbg(&spi
->dev
, "setup: unsupported %d bit words\n",
713 mcspi
= spi_master_get_devdata(spi
->master
);
714 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
717 cs
= kzalloc(sizeof *cs
, GFP_KERNEL
);
720 cs
->base
= mcspi
->base
+ spi
->chip_select
* 0x14;
721 cs
->phys
= mcspi
->phys
+ spi
->chip_select
* 0x14;
723 spi
->controller_state
= cs
;
724 /* Link this to context save list */
725 list_add_tail(&cs
->node
,
726 &omap2_mcspi_ctx
[mcspi
->master
->bus_num
- 1].cs
);
729 if (mcspi_dma
->dma_rx_channel
== -1
730 || mcspi_dma
->dma_tx_channel
== -1) {
731 ret
= omap2_mcspi_request_dma(spi
);
736 if (omap2_mcspi_enable_clocks(mcspi
))
739 ret
= omap2_mcspi_setup_transfer(spi
, NULL
);
740 omap2_mcspi_disable_clocks(mcspi
);
745 static void omap2_mcspi_cleanup(struct spi_device
*spi
)
747 struct omap2_mcspi
*mcspi
;
748 struct omap2_mcspi_dma
*mcspi_dma
;
749 struct omap2_mcspi_cs
*cs
;
751 mcspi
= spi_master_get_devdata(spi
->master
);
752 mcspi_dma
= &mcspi
->dma_channels
[spi
->chip_select
];
754 /* Unlink controller state from context save list */
755 cs
= spi
->controller_state
;
758 kfree(spi
->controller_state
);
760 if (mcspi_dma
->dma_rx_channel
!= -1) {
761 omap_free_dma(mcspi_dma
->dma_rx_channel
);
762 mcspi_dma
->dma_rx_channel
= -1;
764 if (mcspi_dma
->dma_tx_channel
!= -1) {
765 omap_free_dma(mcspi_dma
->dma_tx_channel
);
766 mcspi_dma
->dma_tx_channel
= -1;
770 static void omap2_mcspi_work(struct work_struct
*work
)
772 struct omap2_mcspi
*mcspi
;
774 mcspi
= container_of(work
, struct omap2_mcspi
, work
);
775 spin_lock_irq(&mcspi
->lock
);
777 if (omap2_mcspi_enable_clocks(mcspi
))
780 /* We only enable one channel at a time -- the one whose message is
781 * at the head of the queue -- although this controller would gladly
782 * arbitrate among multiple channels. This corresponds to "single
783 * channel" master mode. As a side effect, we need to manage the
784 * chipselect with the FORCE bit ... CS != channel enable.
786 while (!list_empty(&mcspi
->msg_queue
)) {
787 struct spi_message
*m
;
788 struct spi_device
*spi
;
789 struct spi_transfer
*t
= NULL
;
791 struct omap2_mcspi_cs
*cs
;
792 int par_override
= 0;
796 m
= container_of(mcspi
->msg_queue
.next
, struct spi_message
,
799 list_del_init(&m
->queue
);
800 spin_unlock_irq(&mcspi
->lock
);
803 cs
= spi
->controller_state
;
805 omap2_mcspi_set_enable(spi
, 1);
806 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
807 if (t
->tx_buf
== NULL
&& t
->rx_buf
== NULL
&& t
->len
) {
811 if (par_override
|| t
->speed_hz
|| t
->bits_per_word
) {
813 status
= omap2_mcspi_setup_transfer(spi
, t
);
816 if (!t
->speed_hz
&& !t
->bits_per_word
)
821 omap2_mcspi_force_cs(spi
, 1);
825 chconf
= mcspi_cached_chconf0(spi
);
826 chconf
&= ~OMAP2_MCSPI_CHCONF_TRM_MASK
;
827 if (t
->tx_buf
== NULL
)
828 chconf
|= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY
;
829 else if (t
->rx_buf
== NULL
)
830 chconf
|= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY
;
831 mcspi_write_chconf0(spi
, chconf
);
836 /* RX_ONLY mode needs dummy data in TX reg */
837 if (t
->tx_buf
== NULL
)
838 __raw_writel(0, cs
->base
841 if (m
->is_dma_mapped
|| t
->len
>= DMA_MIN_BYTES
)
842 count
= omap2_mcspi_txrx_dma(spi
, t
);
844 count
= omap2_mcspi_txrx_pio(spi
, t
);
845 m
->actual_length
+= count
;
847 if (count
!= t
->len
) {
854 udelay(t
->delay_usecs
);
856 /* ignore the "leave it on after last xfer" hint */
858 omap2_mcspi_force_cs(spi
, 0);
863 /* Restore defaults if they were overriden */
866 status
= omap2_mcspi_setup_transfer(spi
, NULL
);
870 omap2_mcspi_force_cs(spi
, 0);
872 omap2_mcspi_set_enable(spi
, 0);
875 m
->complete(m
->context
);
877 spin_lock_irq(&mcspi
->lock
);
880 omap2_mcspi_disable_clocks(mcspi
);
883 spin_unlock_irq(&mcspi
->lock
);
886 static int omap2_mcspi_transfer(struct spi_device
*spi
, struct spi_message
*m
)
888 struct omap2_mcspi
*mcspi
;
890 struct spi_transfer
*t
;
892 m
->actual_length
= 0;
895 /* reject invalid messages and transfers */
896 if (list_empty(&m
->transfers
) || !m
->complete
)
898 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
899 const void *tx_buf
= t
->tx_buf
;
900 void *rx_buf
= t
->rx_buf
;
901 unsigned len
= t
->len
;
903 if (t
->speed_hz
> OMAP2_MCSPI_MAX_FREQ
904 || (len
&& !(rx_buf
|| tx_buf
))
905 || (t
->bits_per_word
&&
906 ( t
->bits_per_word
< 4
907 || t
->bits_per_word
> 32))) {
908 dev_dbg(&spi
->dev
, "transfer: %d Hz, %d %s%s, %d bpw\n",
916 if (t
->speed_hz
&& t
->speed_hz
< OMAP2_MCSPI_MAX_FREQ
/(1<<16)) {
917 dev_dbg(&spi
->dev
, "%d Hz max exceeds %d\n",
919 OMAP2_MCSPI_MAX_FREQ
/(1<<16));
923 if (m
->is_dma_mapped
|| len
< DMA_MIN_BYTES
)
926 /* Do DMA mapping "early" for better error reporting and
927 * dcache use. Note that if dma_unmap_single() ever starts
928 * to do real work on ARM, we'd need to clean up mappings
929 * for previous transfers on *ALL* exits of this loop...
931 if (tx_buf
!= NULL
) {
932 t
->tx_dma
= dma_map_single(&spi
->dev
, (void *) tx_buf
,
934 if (dma_mapping_error(&spi
->dev
, t
->tx_dma
)) {
935 dev_dbg(&spi
->dev
, "dma %cX %d bytes error\n",
940 if (rx_buf
!= NULL
) {
941 t
->rx_dma
= dma_map_single(&spi
->dev
, rx_buf
, t
->len
,
943 if (dma_mapping_error(&spi
->dev
, t
->rx_dma
)) {
944 dev_dbg(&spi
->dev
, "dma %cX %d bytes error\n",
947 dma_unmap_single(NULL
, t
->tx_dma
,
954 mcspi
= spi_master_get_devdata(spi
->master
);
956 spin_lock_irqsave(&mcspi
->lock
, flags
);
957 list_add_tail(&m
->queue
, &mcspi
->msg_queue
);
958 queue_work(omap2_mcspi_wq
, &mcspi
->work
);
959 spin_unlock_irqrestore(&mcspi
->lock
, flags
);
964 static int __init
omap2_mcspi_reset(struct omap2_mcspi
*mcspi
)
966 struct spi_master
*master
= mcspi
->master
;
969 if (omap2_mcspi_enable_clocks(mcspi
))
972 mcspi_write_reg(master
, OMAP2_MCSPI_SYSCONFIG
,
973 OMAP2_MCSPI_SYSCONFIG_SOFTRESET
);
975 tmp
= mcspi_read_reg(master
, OMAP2_MCSPI_SYSSTATUS
);
976 } while (!(tmp
& OMAP2_MCSPI_SYSSTATUS_RESETDONE
));
978 tmp
= OMAP2_MCSPI_SYSCONFIG_AUTOIDLE
|
979 OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP
|
980 OMAP2_MCSPI_SYSCONFIG_SMARTIDLE
;
981 mcspi_write_reg(master
, OMAP2_MCSPI_SYSCONFIG
, tmp
);
982 omap2_mcspi_ctx
[master
->bus_num
- 1].sysconfig
= tmp
;
984 tmp
= OMAP2_MCSPI_WAKEUPENABLE_WKEN
;
985 mcspi_write_reg(master
, OMAP2_MCSPI_WAKEUPENABLE
, tmp
);
986 omap2_mcspi_ctx
[master
->bus_num
- 1].wakeupenable
= tmp
;
988 omap2_mcspi_set_master_mode(master
);
989 omap2_mcspi_disable_clocks(mcspi
);
993 static u8 __initdata spi1_rxdma_id
[] = {
994 OMAP24XX_DMA_SPI1_RX0
,
995 OMAP24XX_DMA_SPI1_RX1
,
996 OMAP24XX_DMA_SPI1_RX2
,
997 OMAP24XX_DMA_SPI1_RX3
,
1000 static u8 __initdata spi1_txdma_id
[] = {
1001 OMAP24XX_DMA_SPI1_TX0
,
1002 OMAP24XX_DMA_SPI1_TX1
,
1003 OMAP24XX_DMA_SPI1_TX2
,
1004 OMAP24XX_DMA_SPI1_TX3
,
1007 static u8 __initdata spi2_rxdma_id
[] = {
1008 OMAP24XX_DMA_SPI2_RX0
,
1009 OMAP24XX_DMA_SPI2_RX1
,
1012 static u8 __initdata spi2_txdma_id
[] = {
1013 OMAP24XX_DMA_SPI2_TX0
,
1014 OMAP24XX_DMA_SPI2_TX1
,
1017 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) \
1018 || defined(CONFIG_ARCH_OMAP4)
1019 static u8 __initdata spi3_rxdma_id
[] = {
1020 OMAP24XX_DMA_SPI3_RX0
,
1021 OMAP24XX_DMA_SPI3_RX1
,
1024 static u8 __initdata spi3_txdma_id
[] = {
1025 OMAP24XX_DMA_SPI3_TX0
,
1026 OMAP24XX_DMA_SPI3_TX1
,
1030 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
1031 static u8 __initdata spi4_rxdma_id
[] = {
1032 OMAP34XX_DMA_SPI4_RX0
,
1035 static u8 __initdata spi4_txdma_id
[] = {
1036 OMAP34XX_DMA_SPI4_TX0
,
1040 static int __init
omap2_mcspi_probe(struct platform_device
*pdev
)
1042 struct spi_master
*master
;
1043 struct omap2_mcspi
*mcspi
;
1046 const u8
*rxdma_id
, *txdma_id
;
1047 unsigned num_chipselect
;
1051 rxdma_id
= spi1_rxdma_id
;
1052 txdma_id
= spi1_txdma_id
;
1056 rxdma_id
= spi2_rxdma_id
;
1057 txdma_id
= spi2_txdma_id
;
1060 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
1061 || defined(CONFIG_ARCH_OMAP4)
1063 rxdma_id
= spi3_rxdma_id
;
1064 txdma_id
= spi3_txdma_id
;
1068 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
1070 rxdma_id
= spi4_rxdma_id
;
1071 txdma_id
= spi4_txdma_id
;
1079 master
= spi_alloc_master(&pdev
->dev
, sizeof *mcspi
);
1080 if (master
== NULL
) {
1081 dev_dbg(&pdev
->dev
, "master allocation failed\n");
1085 /* the spi->mode bits understood by this driver: */
1086 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
;
1089 master
->bus_num
= pdev
->id
;
1091 master
->setup
= omap2_mcspi_setup
;
1092 master
->transfer
= omap2_mcspi_transfer
;
1093 master
->cleanup
= omap2_mcspi_cleanup
;
1094 master
->num_chipselect
= num_chipselect
;
1096 dev_set_drvdata(&pdev
->dev
, master
);
1098 mcspi
= spi_master_get_devdata(master
);
1099 mcspi
->master
= master
;
1101 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1106 if (!request_mem_region(r
->start
, (r
->end
- r
->start
) + 1,
1107 dev_name(&pdev
->dev
))) {
1112 mcspi
->phys
= r
->start
;
1113 mcspi
->base
= ioremap(r
->start
, r
->end
- r
->start
+ 1);
1115 dev_dbg(&pdev
->dev
, "can't ioremap MCSPI\n");
1120 INIT_WORK(&mcspi
->work
, omap2_mcspi_work
);
1122 spin_lock_init(&mcspi
->lock
);
1123 INIT_LIST_HEAD(&mcspi
->msg_queue
);
1124 INIT_LIST_HEAD(&omap2_mcspi_ctx
[master
->bus_num
- 1].cs
);
1126 mcspi
->ick
= clk_get(&pdev
->dev
, "ick");
1127 if (IS_ERR(mcspi
->ick
)) {
1128 dev_dbg(&pdev
->dev
, "can't get mcspi_ick\n");
1129 status
= PTR_ERR(mcspi
->ick
);
1132 mcspi
->fck
= clk_get(&pdev
->dev
, "fck");
1133 if (IS_ERR(mcspi
->fck
)) {
1134 dev_dbg(&pdev
->dev
, "can't get mcspi_fck\n");
1135 status
= PTR_ERR(mcspi
->fck
);
1139 mcspi
->dma_channels
= kcalloc(master
->num_chipselect
,
1140 sizeof(struct omap2_mcspi_dma
),
1143 if (mcspi
->dma_channels
== NULL
)
1146 for (i
= 0; i
< num_chipselect
; i
++) {
1147 mcspi
->dma_channels
[i
].dma_rx_channel
= -1;
1148 mcspi
->dma_channels
[i
].dma_rx_sync_dev
= rxdma_id
[i
];
1149 mcspi
->dma_channels
[i
].dma_tx_channel
= -1;
1150 mcspi
->dma_channels
[i
].dma_tx_sync_dev
= txdma_id
[i
];
1153 if (omap2_mcspi_reset(mcspi
) < 0)
1156 status
= spi_register_master(master
);
1163 kfree(mcspi
->dma_channels
);
1165 clk_put(mcspi
->fck
);
1167 clk_put(mcspi
->ick
);
1169 iounmap(mcspi
->base
);
1171 release_mem_region(r
->start
, (r
->end
- r
->start
) + 1);
1173 spi_master_put(master
);
1177 static int __exit
omap2_mcspi_remove(struct platform_device
*pdev
)
1179 struct spi_master
*master
;
1180 struct omap2_mcspi
*mcspi
;
1181 struct omap2_mcspi_dma
*dma_channels
;
1185 master
= dev_get_drvdata(&pdev
->dev
);
1186 mcspi
= spi_master_get_devdata(master
);
1187 dma_channels
= mcspi
->dma_channels
;
1189 clk_put(mcspi
->fck
);
1190 clk_put(mcspi
->ick
);
1192 r
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1193 release_mem_region(r
->start
, (r
->end
- r
->start
) + 1);
1196 spi_unregister_master(master
);
1198 kfree(dma_channels
);
1203 /* work with hotplug and coldplug */
1204 MODULE_ALIAS("platform:omap2_mcspi");
1206 static struct platform_driver omap2_mcspi_driver
= {
1208 .name
= "omap2_mcspi",
1209 .owner
= THIS_MODULE
,
1211 .remove
= __exit_p(omap2_mcspi_remove
),
1215 static int __init
omap2_mcspi_init(void)
1217 omap2_mcspi_wq
= create_singlethread_workqueue(
1218 omap2_mcspi_driver
.driver
.name
);
1219 if (omap2_mcspi_wq
== NULL
)
1221 return platform_driver_probe(&omap2_mcspi_driver
, omap2_mcspi_probe
);
1223 subsys_initcall(omap2_mcspi_init
);
1225 static void __exit
omap2_mcspi_exit(void)
1227 platform_driver_unregister(&omap2_mcspi_driver
);
1229 destroy_workqueue(omap2_mcspi_wq
);
1231 module_exit(omap2_mcspi_exit
);
1233 MODULE_LICENSE("GPL");