2 * File: drivers/spi/bfin5xx_spi.c
4 * Author: Luke Yang (Analog Devices Inc.)
6 * Created: March. 10th 2006
7 * Description: SPI controller driver for Blackfin 5xx
8 * Bugs: Enter bugs at http://blackfin.uclinux.org/
11 * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
12 * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
14 * Copyright 2004-2006 Analog Devices Inc.
16 * This program is free software ; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation ; either version 2, or (at your option)
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program ; see the file COPYING.
28 * If not, write to the Free Software Foundation,
29 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/device.h>
35 #include <linux/ioport.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/platform_device.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/spi/spi.h>
41 #include <linux/workqueue.h>
42 #include <linux/errno.h>
43 #include <linux/delay.h>
47 #include <asm/delay.h>
50 #include <asm/bfin5xx_spi.h>
52 MODULE_AUTHOR("Luke Yang");
53 MODULE_DESCRIPTION("Blackfin 5xx SPI Contoller");
54 MODULE_LICENSE("GPL");
56 #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0)
58 #define DEFINE_SPI_REG(reg, off) \
59 static inline u16 read_##reg(void) \
60 { return *(volatile unsigned short*)(SPI0_REGBASE + off); } \
61 static inline void write_##reg(u16 v) \
62 {*(volatile unsigned short*)(SPI0_REGBASE + off) = v;\
65 DEFINE_SPI_REG(CTRL
, 0x00)
66 DEFINE_SPI_REG(FLAG
, 0x04)
67 DEFINE_SPI_REG(STAT
, 0x08)
68 DEFINE_SPI_REG(TDBR
, 0x0C)
69 DEFINE_SPI_REG(RDBR
, 0x10)
70 DEFINE_SPI_REG(BAUD
, 0x14)
71 DEFINE_SPI_REG(SHAW
, 0x18)
72 #define START_STATE ((void*)0)
73 #define RUNNING_STATE ((void*)1)
74 #define DONE_STATE ((void*)2)
75 #define ERROR_STATE ((void*)-1)
76 #define QUEUE_RUNNING 0
77 #define QUEUE_STOPPED 1
81 /* Driver model hookup */
82 struct platform_device
*pdev
;
84 /* SPI framework hookup */
85 struct spi_master
*master
;
88 struct bfin5xx_spi_master
*master_info
;
90 /* Driver message queue */
91 struct workqueue_struct
*workqueue
;
92 struct work_struct pump_messages
;
94 struct list_head queue
;
98 /* Message Transfer pump */
99 struct tasklet_struct pump_transfers
;
101 /* Current message transfer state info */
102 struct spi_message
*cur_msg
;
103 struct spi_transfer
*cur_transfer
;
104 struct chip_data
*cur_chip
;
117 void (*write
) (struct driver_data
*);
118 void (*read
) (struct driver_data
*);
119 void (*duplex
) (struct driver_data
*);
129 u32 width
; /* 0 or 1 */
131 u8 bits_per_word
; /* 8 or 16 */
132 u8 cs_change_per_word
;
134 void (*write
) (struct driver_data
*);
135 void (*read
) (struct driver_data
*);
136 void (*duplex
) (struct driver_data
*);
139 void bfin_spi_enable(struct driver_data
*drv_data
)
144 write_CTRL(cr
| BIT_CTL_ENABLE
);
148 void bfin_spi_disable(struct driver_data
*drv_data
)
153 write_CTRL(cr
& (~BIT_CTL_ENABLE
));
157 /* Caculate the SPI_BAUD register value based on input HZ */
158 static u16
hz_to_spi_baud(u32 speed_hz
)
160 u_long sclk
= get_sclk();
161 u16 spi_baud
= (sclk
/ (2 * speed_hz
));
163 if ((sclk
% (2 * speed_hz
)) > 0)
166 pr_debug("sclk = %ld, speed_hz = %d, spi_baud = %d\n", sclk
, speed_hz
,
172 static int flush(struct driver_data
*drv_data
)
174 unsigned long limit
= loops_per_jiffy
<< 1;
176 /* wait for stop and clear stat */
177 while (!(read_STAT() & BIT_STAT_SPIF
) && limit
--)
180 write_STAT(BIT_STAT_CLR
);
185 /* stop controller and re-config current chip*/
186 static void restore_state(struct driver_data
*drv_data
)
188 struct chip_data
*chip
= drv_data
->cur_chip
;
190 /* Clear status and disable clock */
191 write_STAT(BIT_STAT_CLR
);
192 bfin_spi_disable(drv_data
);
193 pr_debug("restoring spi ctl state\n");
195 #if defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
196 pr_debug("chip select number is %d\n", chip
->chip_select_num
);
198 switch (chip
->chip_select_num
) {
200 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3c00);
206 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJSE_SPI
);
208 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
213 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS4E_SPI
);
215 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3840);
220 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS5E_SPI
);
222 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3820);
227 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PFS6E_SPI
);
229 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3810);
234 bfin_write_PORT_MUX(bfin_read_PORT_MUX() | PJCE_SPI
);
236 bfin_write_PORTF_FER(bfin_read_PORTF_FER() | 0x3800);
242 /* Load the registers */
243 write_CTRL(chip
->ctl_reg
);
244 write_BAUD(chip
->baud
);
245 write_FLAG(chip
->flag
);
248 /* used to kick off transfer in rx mode */
249 static unsigned short dummy_read(void)
256 static void null_writer(struct driver_data
*drv_data
)
258 u8 n_bytes
= drv_data
->n_bytes
;
260 while (drv_data
->tx
< drv_data
->tx_end
) {
262 while ((read_STAT() & BIT_STAT_TXS
))
264 drv_data
->tx
+= n_bytes
;
268 static void null_reader(struct driver_data
*drv_data
)
270 u8 n_bytes
= drv_data
->n_bytes
;
273 while (drv_data
->rx
< drv_data
->rx_end
) {
274 while (!(read_STAT() & BIT_STAT_RXS
))
277 drv_data
->rx
+= n_bytes
;
281 static void u8_writer(struct driver_data
*drv_data
)
283 pr_debug("cr8-s is 0x%x\n", read_STAT());
284 while (drv_data
->tx
< drv_data
->tx_end
) {
285 write_TDBR(*(u8
*) (drv_data
->tx
));
286 while (read_STAT() & BIT_STAT_TXS
)
291 /* poll for SPI completion before returning */
292 while (!(read_STAT() & BIT_STAT_SPIF
))
296 static void u8_cs_chg_writer(struct driver_data
*drv_data
)
298 struct chip_data
*chip
= drv_data
->cur_chip
;
300 while (drv_data
->tx
< drv_data
->tx_end
) {
301 write_FLAG(chip
->flag
);
304 write_TDBR(*(u8
*) (drv_data
->tx
));
305 while (read_STAT() & BIT_STAT_TXS
)
307 while (!(read_STAT() & BIT_STAT_SPIF
))
309 write_FLAG(0xFF00 | chip
->flag
);
311 if (chip
->cs_chg_udelay
)
312 udelay(chip
->cs_chg_udelay
);
319 static void u8_reader(struct driver_data
*drv_data
)
321 pr_debug("cr-8 is 0x%x\n", read_STAT());
323 /* clear TDBR buffer before read(else it will be shifted out) */
328 while (drv_data
->rx
< drv_data
->rx_end
- 1) {
329 while (!(read_STAT() & BIT_STAT_RXS
))
331 *(u8
*) (drv_data
->rx
) = read_RDBR();
335 while (!(read_STAT() & BIT_STAT_RXS
))
337 *(u8
*) (drv_data
->rx
) = read_SHAW();
341 static void u8_cs_chg_reader(struct driver_data
*drv_data
)
343 struct chip_data
*chip
= drv_data
->cur_chip
;
345 while (drv_data
->rx
< drv_data
->rx_end
) {
346 write_FLAG(chip
->flag
);
349 read_RDBR(); /* kick off */
350 while (!(read_STAT() & BIT_STAT_RXS
))
352 while (!(read_STAT() & BIT_STAT_SPIF
))
354 *(u8
*) (drv_data
->rx
) = read_SHAW();
355 write_FLAG(0xFF00 | chip
->flag
);
357 if (chip
->cs_chg_udelay
)
358 udelay(chip
->cs_chg_udelay
);
365 static void u8_duplex(struct driver_data
*drv_data
)
367 /* in duplex mode, clk is triggered by writing of TDBR */
368 while (drv_data
->rx
< drv_data
->rx_end
) {
369 write_TDBR(*(u8
*) (drv_data
->tx
));
370 while (!(read_STAT() & BIT_STAT_SPIF
))
372 while (!(read_STAT() & BIT_STAT_RXS
))
374 *(u8
*) (drv_data
->rx
) = read_RDBR();
380 static void u8_cs_chg_duplex(struct driver_data
*drv_data
)
382 struct chip_data
*chip
= drv_data
->cur_chip
;
384 while (drv_data
->rx
< drv_data
->rx_end
) {
385 write_FLAG(chip
->flag
);
388 write_TDBR(*(u8
*) (drv_data
->tx
));
389 while (!(read_STAT() & BIT_STAT_SPIF
))
391 while (!(read_STAT() & BIT_STAT_RXS
))
393 *(u8
*) (drv_data
->rx
) = read_RDBR();
394 write_FLAG(0xFF00 | chip
->flag
);
396 if (chip
->cs_chg_udelay
)
397 udelay(chip
->cs_chg_udelay
);
405 static void u16_writer(struct driver_data
*drv_data
)
407 pr_debug("cr16 is 0x%x\n", read_STAT());
408 while (drv_data
->tx
< drv_data
->tx_end
) {
409 write_TDBR(*(u16
*) (drv_data
->tx
));
410 while ((read_STAT() & BIT_STAT_TXS
))
415 /* poll for SPI completion before returning */
416 while (!(read_STAT() & BIT_STAT_SPIF
))
420 static void u16_cs_chg_writer(struct driver_data
*drv_data
)
422 struct chip_data
*chip
= drv_data
->cur_chip
;
424 while (drv_data
->tx
< drv_data
->tx_end
) {
425 write_FLAG(chip
->flag
);
428 write_TDBR(*(u16
*) (drv_data
->tx
));
429 while ((read_STAT() & BIT_STAT_TXS
))
431 while (!(read_STAT() & BIT_STAT_SPIF
))
433 write_FLAG(0xFF00 | chip
->flag
);
435 if (chip
->cs_chg_udelay
)
436 udelay(chip
->cs_chg_udelay
);
443 static void u16_reader(struct driver_data
*drv_data
)
445 pr_debug("cr-16 is 0x%x\n", read_STAT());
448 while (drv_data
->rx
< (drv_data
->rx_end
- 2)) {
449 while (!(read_STAT() & BIT_STAT_RXS
))
451 *(u16
*) (drv_data
->rx
) = read_RDBR();
455 while (!(read_STAT() & BIT_STAT_RXS
))
457 *(u16
*) (drv_data
->rx
) = read_SHAW();
461 static void u16_cs_chg_reader(struct driver_data
*drv_data
)
463 struct chip_data
*chip
= drv_data
->cur_chip
;
465 while (drv_data
->rx
< drv_data
->rx_end
) {
466 write_FLAG(chip
->flag
);
469 read_RDBR(); /* kick off */
470 while (!(read_STAT() & BIT_STAT_RXS
))
472 while (!(read_STAT() & BIT_STAT_SPIF
))
474 *(u16
*) (drv_data
->rx
) = read_SHAW();
475 write_FLAG(0xFF00 | chip
->flag
);
477 if (chip
->cs_chg_udelay
)
478 udelay(chip
->cs_chg_udelay
);
485 static void u16_duplex(struct driver_data
*drv_data
)
487 /* in duplex mode, clk is triggered by writing of TDBR */
488 while (drv_data
->tx
< drv_data
->tx_end
) {
489 write_TDBR(*(u16
*) (drv_data
->tx
));
490 while (!(read_STAT() & BIT_STAT_SPIF
))
492 while (!(read_STAT() & BIT_STAT_RXS
))
494 *(u16
*) (drv_data
->rx
) = read_RDBR();
500 static void u16_cs_chg_duplex(struct driver_data
*drv_data
)
502 struct chip_data
*chip
= drv_data
->cur_chip
;
504 while (drv_data
->tx
< drv_data
->tx_end
) {
505 write_FLAG(chip
->flag
);
508 write_TDBR(*(u16
*) (drv_data
->tx
));
509 while (!(read_STAT() & BIT_STAT_SPIF
))
511 while (!(read_STAT() & BIT_STAT_RXS
))
513 *(u16
*) (drv_data
->rx
) = read_RDBR();
514 write_FLAG(0xFF00 | chip
->flag
);
516 if (chip
->cs_chg_udelay
)
517 udelay(chip
->cs_chg_udelay
);
525 /* test if ther is more transfer to be done */
526 static void *next_transfer(struct driver_data
*drv_data
)
528 struct spi_message
*msg
= drv_data
->cur_msg
;
529 struct spi_transfer
*trans
= drv_data
->cur_transfer
;
531 /* Move to next transfer */
532 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
533 drv_data
->cur_transfer
=
534 list_entry(trans
->transfer_list
.next
,
535 struct spi_transfer
, transfer_list
);
536 return RUNNING_STATE
;
542 * caller already set message->status;
543 * dma and pio irqs are blocked give finished message back
545 static void giveback(struct driver_data
*drv_data
)
547 struct spi_transfer
*last_transfer
;
549 struct spi_message
*msg
;
551 spin_lock_irqsave(&drv_data
->lock
, flags
);
552 msg
= drv_data
->cur_msg
;
553 drv_data
->cur_msg
= NULL
;
554 drv_data
->cur_transfer
= NULL
;
555 drv_data
->cur_chip
= NULL
;
556 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
557 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
559 last_transfer
= list_entry(msg
->transfers
.prev
,
560 struct spi_transfer
, transfer_list
);
564 /* disable chip select signal. And not stop spi in autobuffer mode */
565 if (drv_data
->tx_dma
!= 0xFFFF) {
567 bfin_spi_disable(drv_data
);
571 msg
->complete(msg
->context
);
574 static irqreturn_t
dma_irq_handler(int irq
, void *dev_id
, struct pt_regs
*regs
)
576 struct driver_data
*drv_data
= (struct driver_data
*)dev_id
;
577 struct spi_message
*msg
= drv_data
->cur_msg
;
579 pr_debug("in dma_irq_handler\n");
580 clear_dma_irqstat(CH_SPI
);
583 * wait for the last transaction shifted out. yes, these two
584 * while loops are supposed to be the same (see the HRM).
586 if (drv_data
->tx
!= NULL
) {
587 while (bfin_read_SPI_STAT() & TXS
)
589 while (bfin_read_SPI_STAT() & TXS
)
593 while (!(bfin_read_SPI_STAT() & SPIF
))
596 bfin_spi_disable(drv_data
);
598 msg
->actual_length
+= drv_data
->len_in_bytes
;
600 /* Move to next transfer */
601 msg
->state
= next_transfer(drv_data
);
603 /* Schedule transfer tasklet */
604 tasklet_schedule(&drv_data
->pump_transfers
);
606 /* free the irq handler before next transfer */
607 pr_debug("disable dma channel irq%d\n", CH_SPI
);
608 dma_disable_irq(CH_SPI
);
613 static void pump_transfers(unsigned long data
)
615 struct driver_data
*drv_data
= (struct driver_data
*)data
;
616 struct spi_message
*message
= NULL
;
617 struct spi_transfer
*transfer
= NULL
;
618 struct spi_transfer
*previous
= NULL
;
619 struct chip_data
*chip
= NULL
;
620 u16 cr
, width
, dma_width
, dma_config
;
621 u32 tranf_success
= 1;
623 /* Get current state information */
624 message
= drv_data
->cur_msg
;
625 transfer
= drv_data
->cur_transfer
;
626 chip
= drv_data
->cur_chip
;
629 * if msg is error or done, report it back using complete() callback
632 /* Handle for abort */
633 if (message
->state
== ERROR_STATE
) {
634 message
->status
= -EIO
;
639 /* Handle end of message */
640 if (message
->state
== DONE_STATE
) {
646 /* Delay if requested at end of transfer */
647 if (message
->state
== RUNNING_STATE
) {
648 previous
= list_entry(transfer
->transfer_list
.prev
,
649 struct spi_transfer
, transfer_list
);
650 if (previous
->delay_usecs
)
651 udelay(previous
->delay_usecs
);
654 /* Setup the transfer state based on the type of transfer */
655 if (flush(drv_data
) == 0) {
656 dev_err(&drv_data
->pdev
->dev
, "pump_transfers: flush failed\n");
657 message
->status
= -EIO
;
662 if (transfer
->tx_buf
!= NULL
) {
663 drv_data
->tx
= (void *)transfer
->tx_buf
;
664 drv_data
->tx_end
= drv_data
->tx
+ transfer
->len
;
665 pr_debug("tx_buf is %p, tx_end is %p\n", transfer
->tx_buf
,
671 if (transfer
->rx_buf
!= NULL
) {
672 drv_data
->rx
= transfer
->rx_buf
;
673 drv_data
->rx_end
= drv_data
->rx
+ transfer
->len
;
674 pr_debug("rx_buf is %p, rx_end is %p\n", transfer
->rx_buf
,
680 drv_data
->rx_dma
= transfer
->rx_dma
;
681 drv_data
->tx_dma
= transfer
->tx_dma
;
682 drv_data
->len_in_bytes
= transfer
->len
;
685 if (width
== CFG_SPI_WORDSIZE16
) {
686 drv_data
->len
= (transfer
->len
) >> 1;
688 drv_data
->len
= transfer
->len
;
690 drv_data
->write
= drv_data
->tx
? chip
->write
: null_writer
;
691 drv_data
->read
= drv_data
->rx
? chip
->read
: null_reader
;
692 drv_data
->duplex
= chip
->duplex
? chip
->duplex
: null_writer
;
694 ("transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n",
695 drv_data
->write
, chip
->write
, null_writer
);
697 /* speed and width has been set on per message */
698 message
->state
= RUNNING_STATE
;
701 /* restore spi status for each spi transfer */
702 if (transfer
->speed_hz
) {
703 write_BAUD(hz_to_spi_baud(transfer
->speed_hz
));
705 write_BAUD(chip
->baud
);
707 write_FLAG(chip
->flag
);
709 pr_debug("now pumping a transfer: width is %d, len is %d\n", width
,
713 * Try to map dma buffer and do a dma transfer if
714 * successful use different way to r/w according to
715 * drv_data->cur_chip->enable_dma
717 if (drv_data
->cur_chip
->enable_dma
&& drv_data
->len
> 6) {
719 write_STAT(BIT_STAT_CLR
);
721 clear_dma_irqstat(CH_SPI
);
722 bfin_spi_disable(drv_data
);
724 /* config dma channel */
725 pr_debug("doing dma transfer\n");
726 if (width
== CFG_SPI_WORDSIZE16
) {
727 set_dma_x_count(CH_SPI
, drv_data
->len
);
728 set_dma_x_modify(CH_SPI
, 2);
729 dma_width
= WDSIZE_16
;
731 set_dma_x_count(CH_SPI
, drv_data
->len
);
732 set_dma_x_modify(CH_SPI
, 1);
733 dma_width
= WDSIZE_8
;
736 /* set transfer width,direction. And enable spi */
737 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
));
739 /* dirty hack for autobuffer DMA mode */
740 if (drv_data
->tx_dma
== 0xFFFF) {
741 pr_debug("doing autobuffer DMA out.\n");
743 /* no irq in autobuffer mode */
745 (DMAFLOW_AUTO
| RESTART
| dma_width
| DI_EN
);
746 set_dma_config(CH_SPI
, dma_config
);
747 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->tx
);
749 write_CTRL(cr
| CFG_SPI_DMAWRITE
| (width
<< 8) |
750 (CFG_SPI_ENABLE
<< 14));
752 /* just return here, there can only be one transfer in this mode */
758 /* In dma mode, rx or tx must be NULL in one transfer */
759 if (drv_data
->rx
!= NULL
) {
760 /* set transfer mode, and enable SPI */
761 pr_debug("doing DMA in.\n");
763 /* disable SPI before write to TDBR */
764 write_CTRL(cr
& ~BIT_CTL_ENABLE
);
766 /* clear tx reg soformer data is not shifted out */
769 set_dma_x_count(CH_SPI
, drv_data
->len
);
772 dma_enable_irq(CH_SPI
);
773 dma_config
= (WNR
| RESTART
| dma_width
| DI_EN
);
774 set_dma_config(CH_SPI
, dma_config
);
775 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->rx
);
779 CFG_SPI_DMAREAD
| (width
<< 8) | (CFG_SPI_ENABLE
<<
781 /* set transfer mode, and enable SPI */
783 } else if (drv_data
->tx
!= NULL
) {
784 pr_debug("doing DMA out.\n");
787 dma_enable_irq(CH_SPI
);
788 dma_config
= (RESTART
| dma_width
| DI_EN
);
789 set_dma_config(CH_SPI
, dma_config
);
790 set_dma_start_addr(CH_SPI
, (unsigned long)drv_data
->tx
);
793 write_CTRL(cr
| CFG_SPI_DMAWRITE
| (width
<< 8) |
794 (CFG_SPI_ENABLE
<< 14));
798 /* IO mode write then read */
799 pr_debug("doing IO transfer\n");
801 write_STAT(BIT_STAT_CLR
);
803 if (drv_data
->tx
!= NULL
&& drv_data
->rx
!= NULL
) {
804 /* full duplex mode */
805 BUG_ON((drv_data
->tx_end
- drv_data
->tx
) !=
806 (drv_data
->rx_end
- drv_data
->rx
));
807 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
)); /* clear the TIMOD bits */
809 CFG_SPI_WRITE
| (width
<< 8) | (CFG_SPI_ENABLE
<<
811 pr_debug("IO duplex: cr is 0x%x\n", cr
);
816 drv_data
->duplex(drv_data
);
818 if (drv_data
->tx
!= drv_data
->tx_end
)
820 } else if (drv_data
->tx
!= NULL
) {
821 /* write only half duplex */
822 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
)); /* clear the TIMOD bits */
824 CFG_SPI_WRITE
| (width
<< 8) | (CFG_SPI_ENABLE
<<
826 pr_debug("IO write: cr is 0x%x\n", cr
);
831 drv_data
->write(drv_data
);
833 if (drv_data
->tx
!= drv_data
->tx_end
)
835 } else if (drv_data
->rx
!= NULL
) {
836 /* read only half duplex */
837 cr
= (read_CTRL() & (~BIT_CTL_TIMOD
)); /* cleare the TIMOD bits */
839 CFG_SPI_READ
| (width
<< 8) | (CFG_SPI_ENABLE
<<
841 pr_debug("IO read: cr is 0x%x\n", cr
);
846 drv_data
->read(drv_data
);
847 if (drv_data
->rx
!= drv_data
->rx_end
)
851 if (!tranf_success
) {
852 pr_debug("IO write error!\n");
853 message
->state
= ERROR_STATE
;
855 /* Update total byte transfered */
856 message
->actual_length
+= drv_data
->len
;
858 /* Move to next transfer of this msg */
859 message
->state
= next_transfer(drv_data
);
862 /* Schedule next transfer tasklet */
863 tasklet_schedule(&drv_data
->pump_transfers
);
868 /* pop a msg from queue and kick off real transfer */
869 static void pump_messages(struct work_struct
*work
)
871 struct driver_data
*drv_data
= container_of(work
, struct driver_data
, pump_messages
);
874 /* Lock queue and check for queue work */
875 spin_lock_irqsave(&drv_data
->lock
, flags
);
876 if (list_empty(&drv_data
->queue
) || drv_data
->run
== QUEUE_STOPPED
) {
877 /* pumper kicked off but no work to do */
879 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
883 /* Make sure we are not already running a message */
884 if (drv_data
->cur_msg
) {
885 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
889 /* Extract head of queue */
890 drv_data
->cur_msg
= list_entry(drv_data
->queue
.next
,
891 struct spi_message
, queue
);
892 list_del_init(&drv_data
->cur_msg
->queue
);
894 /* Initial message state */
895 drv_data
->cur_msg
->state
= START_STATE
;
896 drv_data
->cur_transfer
= list_entry(drv_data
->cur_msg
->transfers
.next
,
897 struct spi_transfer
, transfer_list
);
899 /* Setup the SSP using the per chip configuration */
900 drv_data
->cur_chip
= spi_get_ctldata(drv_data
->cur_msg
->spi
);
901 restore_state(drv_data
);
903 ("got a message to pump, state is set to: baud %d, flag 0x%x, ctl 0x%x\n",
904 drv_data
->cur_chip
->baud
, drv_data
->cur_chip
->flag
,
905 drv_data
->cur_chip
->ctl_reg
);
906 pr_debug("the first transfer len is %d\n", drv_data
->cur_transfer
->len
);
908 /* Mark as busy and launch transfers */
909 tasklet_schedule(&drv_data
->pump_transfers
);
912 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
916 * got a msg to transfer, queue it in drv_data->queue.
917 * And kick off message pumper
919 static int transfer(struct spi_device
*spi
, struct spi_message
*msg
)
921 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
924 spin_lock_irqsave(&drv_data
->lock
, flags
);
926 if (drv_data
->run
== QUEUE_STOPPED
) {
927 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
931 msg
->actual_length
= 0;
932 msg
->status
= -EINPROGRESS
;
933 msg
->state
= START_STATE
;
935 pr_debug("adding an msg in transfer() \n");
936 list_add_tail(&msg
->queue
, &drv_data
->queue
);
938 if (drv_data
->run
== QUEUE_RUNNING
&& !drv_data
->busy
)
939 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
941 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
946 /* first setup for new devices */
947 static int setup(struct spi_device
*spi
)
949 struct bfin5xx_spi_chip
*chip_info
= NULL
;
950 struct chip_data
*chip
;
951 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
954 /* Abort device setup if requested features are not supported */
955 if (spi
->mode
& ~(SPI_CPOL
| SPI_CPHA
| SPI_LSB_FIRST
)) {
956 dev_err(&spi
->dev
, "requested mode not fully supported\n");
960 /* Zero (the default) here means 8 bits */
961 if (!spi
->bits_per_word
)
962 spi
->bits_per_word
= 8;
964 if (spi
->bits_per_word
!= 8 && spi
->bits_per_word
!= 16)
967 /* Only alloc (or use chip_info) on first setup */
968 chip
= spi_get_ctldata(spi
);
970 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
974 chip
->enable_dma
= 0;
975 chip_info
= spi
->controller_data
;
978 /* chip_info isn't always needed */
980 chip
->enable_dma
= chip_info
->enable_dma
!= 0
981 && drv_data
->master_info
->enable_dma
;
982 chip
->ctl_reg
= chip_info
->ctl_reg
;
983 chip
->bits_per_word
= chip_info
->bits_per_word
;
984 chip
->cs_change_per_word
= chip_info
->cs_change_per_word
;
985 chip
->cs_chg_udelay
= chip_info
->cs_chg_udelay
;
988 /* translate common spi framework into our register */
989 if (spi
->mode
& SPI_CPOL
)
990 chip
->ctl_reg
|= CPOL
;
991 if (spi
->mode
& SPI_CPHA
)
992 chip
->ctl_reg
|= CPHA
;
993 if (spi
->mode
& SPI_LSB_FIRST
)
994 chip
->ctl_reg
|= LSBF
;
995 /* we dont support running in slave mode (yet?) */
996 chip
->ctl_reg
|= MSTR
;
999 * if any one SPI chip is registered and wants DMA, request the
1000 * DMA channel for it
1002 if (chip
->enable_dma
&& !dma_requested
) {
1003 /* register dma irq handler */
1004 if (request_dma(CH_SPI
, "BF53x_SPI_DMA") < 0) {
1006 ("Unable to request BlackFin SPI DMA channel\n");
1009 if (set_dma_callback(CH_SPI
, (void *)dma_irq_handler
, drv_data
)
1011 pr_debug("Unable to set dma callback\n");
1014 dma_disable_irq(CH_SPI
);
1019 * Notice: for blackfin, the speed_hz is the value of register
1020 * SPI_BAUD, not the real baudrate
1022 chip
->baud
= hz_to_spi_baud(spi
->max_speed_hz
);
1023 spi_flg
= ~(1 << (spi
->chip_select
));
1024 chip
->flag
= ((u16
) spi_flg
<< 8) | (1 << (spi
->chip_select
));
1025 chip
->chip_select_num
= spi
->chip_select
;
1027 switch (chip
->bits_per_word
) {
1030 chip
->width
= CFG_SPI_WORDSIZE8
;
1031 chip
->read
= chip
->cs_change_per_word
?
1032 u8_cs_chg_reader
: u8_reader
;
1033 chip
->write
= chip
->cs_change_per_word
?
1034 u8_cs_chg_writer
: u8_writer
;
1035 chip
->duplex
= chip
->cs_change_per_word
?
1036 u8_cs_chg_duplex
: u8_duplex
;
1041 chip
->width
= CFG_SPI_WORDSIZE16
;
1042 chip
->read
= chip
->cs_change_per_word
?
1043 u16_cs_chg_reader
: u16_reader
;
1044 chip
->write
= chip
->cs_change_per_word
?
1045 u16_cs_chg_writer
: u16_writer
;
1046 chip
->duplex
= chip
->cs_change_per_word
?
1047 u16_cs_chg_duplex
: u16_duplex
;
1051 dev_err(&spi
->dev
, "%d bits_per_word is not supported\n",
1052 chip
->bits_per_word
);
1057 pr_debug("setup spi chip %s, width is %d, dma is %d,",
1058 spi
->modalias
, chip
->width
, chip
->enable_dma
);
1059 pr_debug("ctl_reg is 0x%x, flag_reg is 0x%x\n",
1060 chip
->ctl_reg
, chip
->flag
);
1062 spi_set_ctldata(spi
, chip
);
1068 * callback for spi framework.
1069 * clean driver specific data
1071 static void cleanup(const struct spi_device
*spi
)
1073 struct chip_data
*chip
= spi_get_ctldata((struct spi_device
*)spi
);
1078 static inline int init_queue(struct driver_data
*drv_data
)
1080 INIT_LIST_HEAD(&drv_data
->queue
);
1081 spin_lock_init(&drv_data
->lock
);
1083 drv_data
->run
= QUEUE_STOPPED
;
1086 /* init transfer tasklet */
1087 tasklet_init(&drv_data
->pump_transfers
,
1088 pump_transfers
, (unsigned long)drv_data
);
1090 /* init messages workqueue */
1091 INIT_WORK(&drv_data
->pump_messages
, pump_messages
);
1092 drv_data
->workqueue
=
1093 create_singlethread_workqueue(drv_data
->master
->cdev
.dev
->bus_id
);
1094 if (drv_data
->workqueue
== NULL
)
1100 static inline int start_queue(struct driver_data
*drv_data
)
1102 unsigned long flags
;
1104 spin_lock_irqsave(&drv_data
->lock
, flags
);
1106 if (drv_data
->run
== QUEUE_RUNNING
|| drv_data
->busy
) {
1107 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1111 drv_data
->run
= QUEUE_RUNNING
;
1112 drv_data
->cur_msg
= NULL
;
1113 drv_data
->cur_transfer
= NULL
;
1114 drv_data
->cur_chip
= NULL
;
1115 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1117 queue_work(drv_data
->workqueue
, &drv_data
->pump_messages
);
1122 static inline int stop_queue(struct driver_data
*drv_data
)
1124 unsigned long flags
;
1125 unsigned limit
= 500;
1128 spin_lock_irqsave(&drv_data
->lock
, flags
);
1131 * This is a bit lame, but is optimized for the common execution path.
1132 * A wait_queue on the drv_data->busy could be used, but then the common
1133 * execution path (pump_messages) would be required to call wake_up or
1134 * friends on every SPI message. Do this instead
1136 drv_data
->run
= QUEUE_STOPPED
;
1137 while (!list_empty(&drv_data
->queue
) && drv_data
->busy
&& limit
--) {
1138 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1140 spin_lock_irqsave(&drv_data
->lock
, flags
);
1143 if (!list_empty(&drv_data
->queue
) || drv_data
->busy
)
1146 spin_unlock_irqrestore(&drv_data
->lock
, flags
);
1151 static inline int destroy_queue(struct driver_data
*drv_data
)
1155 status
= stop_queue(drv_data
);
1159 destroy_workqueue(drv_data
->workqueue
);
1164 static int __init
bfin5xx_spi_probe(struct platform_device
*pdev
)
1166 struct device
*dev
= &pdev
->dev
;
1167 struct bfin5xx_spi_master
*platform_info
;
1168 struct spi_master
*master
;
1169 struct driver_data
*drv_data
= 0;
1172 platform_info
= dev
->platform_data
;
1174 /* Allocate master with space for drv_data */
1175 master
= spi_alloc_master(dev
, sizeof(struct driver_data
) + 16);
1177 dev_err(&pdev
->dev
, "can not alloc spi_master\n");
1180 drv_data
= spi_master_get_devdata(master
);
1181 drv_data
->master
= master
;
1182 drv_data
->master_info
= platform_info
;
1183 drv_data
->pdev
= pdev
;
1185 master
->bus_num
= pdev
->id
;
1186 master
->num_chipselect
= platform_info
->num_chipselect
;
1187 master
->cleanup
= cleanup
;
1188 master
->setup
= setup
;
1189 master
->transfer
= transfer
;
1191 /* Initial and start queue */
1192 status
= init_queue(drv_data
);
1194 dev_err(&pdev
->dev
, "problem initializing queue\n");
1195 goto out_error_queue_alloc
;
1197 status
= start_queue(drv_data
);
1199 dev_err(&pdev
->dev
, "problem starting queue\n");
1200 goto out_error_queue_alloc
;
1203 /* Register with the SPI framework */
1204 platform_set_drvdata(pdev
, drv_data
);
1205 status
= spi_register_master(master
);
1207 dev_err(&pdev
->dev
, "problem registering spi master\n");
1208 goto out_error_queue_alloc
;
1210 pr_debug("controller probe successfully\n");
1213 out_error_queue_alloc
:
1214 destroy_queue(drv_data
);
1215 spi_master_put(master
);
1219 /* stop hardware and remove the driver */
1220 static int __devexit
bfin5xx_spi_remove(struct platform_device
*pdev
)
1222 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1228 /* Remove the queue */
1229 status
= destroy_queue(drv_data
);
1233 /* Disable the SSP at the peripheral and SOC level */
1234 bfin_spi_disable(drv_data
);
1237 if (drv_data
->master_info
->enable_dma
) {
1238 if (dma_channel_active(CH_SPI
))
1242 /* Disconnect from the SPI framework */
1243 spi_unregister_master(drv_data
->master
);
1245 /* Prevent double remove */
1246 platform_set_drvdata(pdev
, NULL
);
1252 static int bfin5xx_spi_suspend(struct platform_device
*pdev
, pm_message_t state
)
1254 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1257 status
= stop_queue(drv_data
);
1262 bfin_spi_disable(drv_data
);
1267 static int bfin5xx_spi_resume(struct platform_device
*pdev
)
1269 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1272 /* Enable the SPI interface */
1273 bfin_spi_enable(drv_data
);
1275 /* Start the queue running */
1276 status
= start_queue(drv_data
);
1278 dev_err(&pdev
->dev
, "problem starting queue (%d)\n", status
);
1285 #define bfin5xx_spi_suspend NULL
1286 #define bfin5xx_spi_resume NULL
1287 #endif /* CONFIG_PM */
1289 static struct platform_driver bfin5xx_spi_driver
= {
1291 .name
= "bfin-spi-master",
1292 .bus
= &platform_bus_type
,
1293 .owner
= THIS_MODULE
,
1295 .probe
= bfin5xx_spi_probe
,
1296 .remove
= __devexit_p(bfin5xx_spi_remove
),
1297 .suspend
= bfin5xx_spi_suspend
,
1298 .resume
= bfin5xx_spi_resume
,
1301 static int __init
bfin5xx_spi_init(void)
1303 return platform_driver_register(&bfin5xx_spi_driver
);
1306 module_init(bfin5xx_spi_init
);
1308 static void __exit
bfin5xx_spi_exit(void)
1310 platform_driver_unregister(&bfin5xx_spi_driver
);
1313 module_exit(bfin5xx_spi_exit
);