2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/highmem.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
26 #include <linux/spi/dw_spi.h>
27 #include <linux/spi/spi.h>
29 #ifdef CONFIG_DEBUG_FS
30 #include <linux/debugfs.h>
33 #define START_STATE ((void *)0)
34 #define RUNNING_STATE ((void *)1)
35 #define DONE_STATE ((void *)2)
36 #define ERROR_STATE ((void *)-1)
38 #define QUEUE_RUNNING 0
39 #define QUEUE_STOPPED 1
41 #define MRST_SPI_DEASSERT 0
42 #define MRST_SPI_ASSERT 1
44 /* Slave spi_dev related */
47 u8 cs
; /* chip select pin */
48 u8 n_bytes
; /* current is a 1/2/4 byte op */
49 u8 tmode
; /* TR/TO/RO/EEPROM */
50 u8 type
; /* SPI/SSP/MicroWire */
52 u8 poll_mode
; /* 1 means use poll mode */
59 u16 clk_div
; /* baud rate divider */
60 u32 speed_hz
; /* baud rate */
61 int (*write
)(struct dw_spi
*dws
);
62 int (*read
)(struct dw_spi
*dws
);
63 void (*cs_control
)(u32 command
);
66 #ifdef CONFIG_DEBUG_FS
67 static int spi_show_regs_open(struct inode
*inode
, struct file
*file
)
69 file
->private_data
= inode
->i_private
;
73 #define SPI_REGS_BUFSIZE 1024
74 static ssize_t
spi_show_regs(struct file
*file
, char __user
*user_buf
,
75 size_t count
, loff_t
*ppos
)
82 dws
= file
->private_data
;
84 buf
= kzalloc(SPI_REGS_BUFSIZE
, GFP_KERNEL
);
88 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
89 "MRST SPI0 registers:\n");
90 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
91 "=================================\n");
92 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
93 "CTRL0: \t\t0x%08x\n", dw_readl(dws
, ctrl0
));
94 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
95 "CTRL1: \t\t0x%08x\n", dw_readl(dws
, ctrl1
));
96 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
97 "SSIENR: \t0x%08x\n", dw_readl(dws
, ssienr
));
98 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
99 "SER: \t\t0x%08x\n", dw_readl(dws
, ser
));
100 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
101 "BAUDR: \t\t0x%08x\n", dw_readl(dws
, baudr
));
102 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
103 "TXFTLR: \t0x%08x\n", dw_readl(dws
, txfltr
));
104 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
105 "RXFTLR: \t0x%08x\n", dw_readl(dws
, rxfltr
));
106 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
107 "TXFLR: \t\t0x%08x\n", dw_readl(dws
, txflr
));
108 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
109 "RXFLR: \t\t0x%08x\n", dw_readl(dws
, rxflr
));
110 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
111 "SR: \t\t0x%08x\n", dw_readl(dws
, sr
));
112 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
113 "IMR: \t\t0x%08x\n", dw_readl(dws
, imr
));
114 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
115 "ISR: \t\t0x%08x\n", dw_readl(dws
, isr
));
116 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
117 "DMACR: \t\t0x%08x\n", dw_readl(dws
, dmacr
));
118 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
119 "DMATDLR: \t0x%08x\n", dw_readl(dws
, dmatdlr
));
120 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
121 "DMARDLR: \t0x%08x\n", dw_readl(dws
, dmardlr
));
122 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
123 "=================================\n");
125 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
130 static const struct file_operations mrst_spi_regs_ops
= {
131 .owner
= THIS_MODULE
,
132 .open
= spi_show_regs_open
,
133 .read
= spi_show_regs
,
136 static int mrst_spi_debugfs_init(struct dw_spi
*dws
)
138 dws
->debugfs
= debugfs_create_dir("mrst_spi", NULL
);
142 debugfs_create_file("registers", S_IFREG
| S_IRUGO
,
143 dws
->debugfs
, (void *)dws
, &mrst_spi_regs_ops
);
147 static void mrst_spi_debugfs_remove(struct dw_spi
*dws
)
150 debugfs_remove_recursive(dws
->debugfs
);
154 static inline int mrst_spi_debugfs_init(struct dw_spi
*dws
)
159 static inline void mrst_spi_debugfs_remove(struct dw_spi
*dws
)
162 #endif /* CONFIG_DEBUG_FS */
164 static void wait_till_not_busy(struct dw_spi
*dws
)
166 unsigned long end
= jiffies
+ 1 + usecs_to_jiffies(1000);
168 while (time_before(jiffies
, end
)) {
169 if (!(dw_readw(dws
, sr
) & SR_BUSY
))
172 dev_err(&dws
->master
->dev
,
173 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
176 static void flush(struct dw_spi
*dws
)
178 while (dw_readw(dws
, sr
) & SR_RF_NOT_EMPT
)
181 wait_till_not_busy(dws
);
184 static int null_writer(struct dw_spi
*dws
)
186 u8 n_bytes
= dws
->n_bytes
;
188 if (!(dw_readw(dws
, sr
) & SR_TF_NOT_FULL
)
189 || (dws
->tx
== dws
->tx_end
))
191 dw_writew(dws
, dr
, 0);
194 wait_till_not_busy(dws
);
198 static int null_reader(struct dw_spi
*dws
)
200 u8 n_bytes
= dws
->n_bytes
;
202 while ((dw_readw(dws
, sr
) & SR_RF_NOT_EMPT
)
203 && (dws
->rx
< dws
->rx_end
)) {
207 wait_till_not_busy(dws
);
208 return dws
->rx
== dws
->rx_end
;
211 static int u8_writer(struct dw_spi
*dws
)
213 if (!(dw_readw(dws
, sr
) & SR_TF_NOT_FULL
)
214 || (dws
->tx
== dws
->tx_end
))
217 dw_writew(dws
, dr
, *(u8
*)(dws
->tx
));
220 wait_till_not_busy(dws
);
224 static int u8_reader(struct dw_spi
*dws
)
226 while ((dw_readw(dws
, sr
) & SR_RF_NOT_EMPT
)
227 && (dws
->rx
< dws
->rx_end
)) {
228 *(u8
*)(dws
->rx
) = dw_readw(dws
, dr
);
232 wait_till_not_busy(dws
);
233 return dws
->rx
== dws
->rx_end
;
236 static int u16_writer(struct dw_spi
*dws
)
238 if (!(dw_readw(dws
, sr
) & SR_TF_NOT_FULL
)
239 || (dws
->tx
== dws
->tx_end
))
242 dw_writew(dws
, dr
, *(u16
*)(dws
->tx
));
245 wait_till_not_busy(dws
);
249 static int u16_reader(struct dw_spi
*dws
)
253 while ((dw_readw(dws
, sr
) & SR_RF_NOT_EMPT
)
254 && (dws
->rx
< dws
->rx_end
)) {
255 temp
= dw_readw(dws
, dr
);
256 *(u16
*)(dws
->rx
) = temp
;
260 wait_till_not_busy(dws
);
261 return dws
->rx
== dws
->rx_end
;
264 static void *next_transfer(struct dw_spi
*dws
)
266 struct spi_message
*msg
= dws
->cur_msg
;
267 struct spi_transfer
*trans
= dws
->cur_transfer
;
269 /* Move to next transfer */
270 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
272 list_entry(trans
->transfer_list
.next
,
275 return RUNNING_STATE
;
281 * Note: first step is the protocol driver prepares
282 * a dma-capable memory, and this func just need translate
283 * the virt addr to physical
285 static int map_dma_buffers(struct dw_spi
*dws
)
287 if (!dws
->cur_msg
->is_dma_mapped
|| !dws
->dma_inited
288 || !dws
->cur_chip
->enable_dma
)
291 if (dws
->cur_transfer
->tx_dma
)
292 dws
->tx_dma
= dws
->cur_transfer
->tx_dma
;
294 if (dws
->cur_transfer
->rx_dma
)
295 dws
->rx_dma
= dws
->cur_transfer
->rx_dma
;
300 /* Caller already set message->status; dma and pio irqs are blocked */
301 static void giveback(struct dw_spi
*dws
)
303 struct spi_transfer
*last_transfer
;
305 struct spi_message
*msg
;
307 spin_lock_irqsave(&dws
->lock
, flags
);
310 dws
->cur_transfer
= NULL
;
311 dws
->prev_chip
= dws
->cur_chip
;
312 dws
->cur_chip
= NULL
;
314 queue_work(dws
->workqueue
, &dws
->pump_messages
);
315 spin_unlock_irqrestore(&dws
->lock
, flags
);
317 last_transfer
= list_entry(msg
->transfers
.prev
,
321 if (!last_transfer
->cs_change
&& dws
->cs_control
)
322 dws
->cs_control(MRST_SPI_DEASSERT
);
326 msg
->complete(msg
->context
);
329 static void int_error_stop(struct dw_spi
*dws
, const char *msg
)
331 /* Stop and reset hw */
333 spi_enable_chip(dws
, 0);
335 dev_err(&dws
->master
->dev
, "%s\n", msg
);
336 dws
->cur_msg
->state
= ERROR_STATE
;
337 tasklet_schedule(&dws
->pump_transfers
);
340 static void transfer_complete(struct dw_spi
*dws
)
342 /* Update total byte transfered return count actual bytes read */
343 dws
->cur_msg
->actual_length
+= dws
->len
;
345 /* Move to next transfer */
346 dws
->cur_msg
->state
= next_transfer(dws
);
348 /* Handle end of message */
349 if (dws
->cur_msg
->state
== DONE_STATE
) {
350 dws
->cur_msg
->status
= 0;
353 tasklet_schedule(&dws
->pump_transfers
);
356 static irqreturn_t
interrupt_transfer(struct dw_spi
*dws
)
358 u16 irq_status
, irq_mask
= 0x3f;
359 u32 int_level
= dws
->fifo_len
/ 2;
362 irq_status
= dw_readw(dws
, isr
) & irq_mask
;
364 if (irq_status
& (SPI_INT_TXOI
| SPI_INT_RXOI
| SPI_INT_RXUI
)) {
365 dw_readw(dws
, txoicr
);
366 dw_readw(dws
, rxoicr
);
367 dw_readw(dws
, rxuicr
);
368 int_error_stop(dws
, "interrupt_transfer: fifo overrun");
372 if (irq_status
& SPI_INT_TXEI
) {
373 spi_mask_intr(dws
, SPI_INT_TXEI
);
375 left
= (dws
->tx_end
- dws
->tx
) / dws
->n_bytes
;
376 left
= (left
> int_level
) ? int_level
: left
;
382 /* Re-enable the IRQ if there is still data left to tx */
383 if (dws
->tx_end
> dws
->tx
)
384 spi_umask_intr(dws
, SPI_INT_TXEI
);
386 transfer_complete(dws
);
392 static irqreturn_t
dw_spi_irq(int irq
, void *dev_id
)
394 struct dw_spi
*dws
= dev_id
;
395 u16 irq_status
, irq_mask
= 0x3f;
397 irq_status
= dw_readw(dws
, isr
) & irq_mask
;
402 spi_mask_intr(dws
, SPI_INT_TXEI
);
407 return dws
->transfer_handler(dws
);
410 /* Must be called inside pump_transfers() */
411 static void poll_transfer(struct dw_spi
*dws
)
413 while (dws
->write(dws
))
416 transfer_complete(dws
);
419 static void dma_transfer(struct dw_spi
*dws
, int cs_change
)
423 static void pump_transfers(unsigned long data
)
425 struct dw_spi
*dws
= (struct dw_spi
*)data
;
426 struct spi_message
*message
= NULL
;
427 struct spi_transfer
*transfer
= NULL
;
428 struct spi_transfer
*previous
= NULL
;
429 struct spi_device
*spi
= NULL
;
430 struct chip_data
*chip
= NULL
;
439 /* Get current state information */
440 message
= dws
->cur_msg
;
441 transfer
= dws
->cur_transfer
;
442 chip
= dws
->cur_chip
;
445 if (unlikely(!chip
->clk_div
))
446 chip
->clk_div
= dws
->max_freq
/ chip
->speed_hz
;
448 if (message
->state
== ERROR_STATE
) {
449 message
->status
= -EIO
;
453 /* Handle end of message */
454 if (message
->state
== DONE_STATE
) {
459 /* Delay if requested at end of transfer*/
460 if (message
->state
== RUNNING_STATE
) {
461 previous
= list_entry(transfer
->transfer_list
.prev
,
464 if (previous
->delay_usecs
)
465 udelay(previous
->delay_usecs
);
468 dws
->n_bytes
= chip
->n_bytes
;
469 dws
->dma_width
= chip
->dma_width
;
470 dws
->cs_control
= chip
->cs_control
;
472 dws
->rx_dma
= transfer
->rx_dma
;
473 dws
->tx_dma
= transfer
->tx_dma
;
474 dws
->tx
= (void *)transfer
->tx_buf
;
475 dws
->tx_end
= dws
->tx
+ transfer
->len
;
476 dws
->rx
= transfer
->rx_buf
;
477 dws
->rx_end
= dws
->rx
+ transfer
->len
;
478 dws
->write
= dws
->tx
? chip
->write
: null_writer
;
479 dws
->read
= dws
->rx
? chip
->read
: null_reader
;
480 dws
->cs_change
= transfer
->cs_change
;
481 dws
->len
= dws
->cur_transfer
->len
;
482 if (chip
!= dws
->prev_chip
)
487 /* Handle per transfer options for bpw and speed */
488 if (transfer
->speed_hz
) {
489 speed
= chip
->speed_hz
;
491 if (transfer
->speed_hz
!= speed
) {
492 speed
= transfer
->speed_hz
;
493 if (speed
> dws
->max_freq
) {
494 printk(KERN_ERR
"MRST SPI0: unsupported"
495 "freq: %dHz\n", speed
);
496 message
->status
= -EIO
;
500 /* clk_div doesn't support odd number */
501 clk_div
= dws
->max_freq
/ speed
;
502 clk_div
= (clk_div
+ 1) & 0xfffe;
504 chip
->speed_hz
= speed
;
505 chip
->clk_div
= clk_div
;
508 if (transfer
->bits_per_word
) {
509 bits
= transfer
->bits_per_word
;
515 dws
->read
= (dws
->read
!= null_reader
) ?
516 u8_reader
: null_reader
;
517 dws
->write
= (dws
->write
!= null_writer
) ?
518 u8_writer
: null_writer
;
523 dws
->read
= (dws
->read
!= null_reader
) ?
524 u16_reader
: null_reader
;
525 dws
->write
= (dws
->write
!= null_writer
) ?
526 u16_writer
: null_writer
;
529 printk(KERN_ERR
"MRST SPI0: unsupported bits:"
531 message
->status
= -EIO
;
536 | (chip
->type
<< SPI_FRF_OFFSET
)
537 | (spi
->mode
<< SPI_MODE_OFFSET
)
538 | (chip
->tmode
<< SPI_TMOD_OFFSET
);
540 message
->state
= RUNNING_STATE
;
543 * Adjust transfer mode if necessary. Requires platform dependent
544 * chipselect mechanism.
546 if (dws
->cs_control
) {
547 if (dws
->rx
&& dws
->tx
)
548 chip
->tmode
= SPI_TMOD_TR
;
550 chip
->tmode
= SPI_TMOD_RO
;
552 chip
->tmode
= SPI_TMOD_TO
;
554 cr0
&= ~SPI_TMOD_MASK
;
555 cr0
|= (chip
->tmode
<< SPI_TMOD_OFFSET
);
558 /* Check if current transfer is a DMA transaction */
559 dws
->dma_mapped
= map_dma_buffers(dws
);
563 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
565 if (!dws
->dma_mapped
&& !chip
->poll_mode
) {
566 int templen
= dws
->len
/ dws
->n_bytes
;
567 txint_level
= dws
->fifo_len
/ 2;
568 txint_level
= (templen
> txint_level
) ? txint_level
: templen
;
570 imask
|= SPI_INT_TXEI
;
571 dws
->transfer_handler
= interrupt_transfer
;
575 * Reprogram registers only if
576 * 1. chip select changes
577 * 2. clk_div is changed
578 * 3. control value changes
580 if (dw_readw(dws
, ctrl0
) != cr0
|| cs_change
|| clk_div
|| imask
) {
581 spi_enable_chip(dws
, 0);
583 if (dw_readw(dws
, ctrl0
) != cr0
)
584 dw_writew(dws
, ctrl0
, cr0
);
586 spi_set_clk(dws
, clk_div
? clk_div
: chip
->clk_div
);
587 spi_chip_sel(dws
, spi
->chip_select
);
589 /* Set the interrupt mask, for poll mode just diable all int */
590 spi_mask_intr(dws
, 0xff);
592 spi_umask_intr(dws
, imask
);
594 dw_writew(dws
, txfltr
, txint_level
);
596 spi_enable_chip(dws
, 1);
598 dws
->prev_chip
= chip
;
602 dma_transfer(dws
, cs_change
);
614 static void pump_messages(struct work_struct
*work
)
617 container_of(work
, struct dw_spi
, pump_messages
);
620 /* Lock queue and check for queue work */
621 spin_lock_irqsave(&dws
->lock
, flags
);
622 if (list_empty(&dws
->queue
) || dws
->run
== QUEUE_STOPPED
) {
624 spin_unlock_irqrestore(&dws
->lock
, flags
);
628 /* Make sure we are not already running a message */
630 spin_unlock_irqrestore(&dws
->lock
, flags
);
634 /* Extract head of queue */
635 dws
->cur_msg
= list_entry(dws
->queue
.next
, struct spi_message
, queue
);
636 list_del_init(&dws
->cur_msg
->queue
);
638 /* Initial message state*/
639 dws
->cur_msg
->state
= START_STATE
;
640 dws
->cur_transfer
= list_entry(dws
->cur_msg
->transfers
.next
,
643 dws
->cur_chip
= spi_get_ctldata(dws
->cur_msg
->spi
);
645 /* Mark as busy and launch transfers */
646 tasklet_schedule(&dws
->pump_transfers
);
649 spin_unlock_irqrestore(&dws
->lock
, flags
);
652 /* spi_device use this to queue in their spi_msg */
653 static int dw_spi_transfer(struct spi_device
*spi
, struct spi_message
*msg
)
655 struct dw_spi
*dws
= spi_master_get_devdata(spi
->master
);
658 spin_lock_irqsave(&dws
->lock
, flags
);
660 if (dws
->run
== QUEUE_STOPPED
) {
661 spin_unlock_irqrestore(&dws
->lock
, flags
);
665 msg
->actual_length
= 0;
666 msg
->status
= -EINPROGRESS
;
667 msg
->state
= START_STATE
;
669 list_add_tail(&msg
->queue
, &dws
->queue
);
671 if (dws
->run
== QUEUE_RUNNING
&& !dws
->busy
) {
673 if (dws
->cur_transfer
|| dws
->cur_msg
)
674 queue_work(dws
->workqueue
,
675 &dws
->pump_messages
);
677 /* If no other data transaction in air, just go */
678 spin_unlock_irqrestore(&dws
->lock
, flags
);
679 pump_messages(&dws
->pump_messages
);
684 spin_unlock_irqrestore(&dws
->lock
, flags
);
688 /* This may be called twice for each spi dev */
689 static int dw_spi_setup(struct spi_device
*spi
)
691 struct dw_spi_chip
*chip_info
= NULL
;
692 struct chip_data
*chip
;
694 if (spi
->bits_per_word
!= 8 && spi
->bits_per_word
!= 16)
697 /* Only alloc on first setup */
698 chip
= spi_get_ctldata(spi
);
700 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
706 * Protocol drivers may change the chip settings, so...
707 * if chip_info exists, use it
709 chip_info
= spi
->controller_data
;
711 /* chip_info doesn't always exist */
713 if (chip_info
->cs_control
)
714 chip
->cs_control
= chip_info
->cs_control
;
716 chip
->poll_mode
= chip_info
->poll_mode
;
717 chip
->type
= chip_info
->type
;
719 chip
->rx_threshold
= 0;
720 chip
->tx_threshold
= 0;
722 chip
->enable_dma
= chip_info
->enable_dma
;
725 if (spi
->bits_per_word
<= 8) {
728 chip
->read
= u8_reader
;
729 chip
->write
= u8_writer
;
730 } else if (spi
->bits_per_word
<= 16) {
733 chip
->read
= u16_reader
;
734 chip
->write
= u16_writer
;
736 /* Never take >16b case for MRST SPIC */
737 dev_err(&spi
->dev
, "invalid wordsize\n");
740 chip
->bits_per_word
= spi
->bits_per_word
;
742 if (!spi
->max_speed_hz
) {
743 dev_err(&spi
->dev
, "No max speed HZ parameter\n");
746 chip
->speed_hz
= spi
->max_speed_hz
;
748 chip
->tmode
= 0; /* Tx & Rx */
749 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
750 chip
->cr0
= (chip
->bits_per_word
- 1)
751 | (chip
->type
<< SPI_FRF_OFFSET
)
752 | (spi
->mode
<< SPI_MODE_OFFSET
)
753 | (chip
->tmode
<< SPI_TMOD_OFFSET
);
755 spi_set_ctldata(spi
, chip
);
759 static void dw_spi_cleanup(struct spi_device
*spi
)
761 struct chip_data
*chip
= spi_get_ctldata(spi
);
765 static int __devinit
init_queue(struct dw_spi
*dws
)
767 INIT_LIST_HEAD(&dws
->queue
);
768 spin_lock_init(&dws
->lock
);
770 dws
->run
= QUEUE_STOPPED
;
773 tasklet_init(&dws
->pump_transfers
,
774 pump_transfers
, (unsigned long)dws
);
776 INIT_WORK(&dws
->pump_messages
, pump_messages
);
777 dws
->workqueue
= create_singlethread_workqueue(
778 dev_name(dws
->master
->dev
.parent
));
779 if (dws
->workqueue
== NULL
)
785 static int start_queue(struct dw_spi
*dws
)
789 spin_lock_irqsave(&dws
->lock
, flags
);
791 if (dws
->run
== QUEUE_RUNNING
|| dws
->busy
) {
792 spin_unlock_irqrestore(&dws
->lock
, flags
);
796 dws
->run
= QUEUE_RUNNING
;
798 dws
->cur_transfer
= NULL
;
799 dws
->cur_chip
= NULL
;
800 dws
->prev_chip
= NULL
;
801 spin_unlock_irqrestore(&dws
->lock
, flags
);
803 queue_work(dws
->workqueue
, &dws
->pump_messages
);
808 static int stop_queue(struct dw_spi
*dws
)
814 spin_lock_irqsave(&dws
->lock
, flags
);
815 dws
->run
= QUEUE_STOPPED
;
816 while (!list_empty(&dws
->queue
) && dws
->busy
&& limit
--) {
817 spin_unlock_irqrestore(&dws
->lock
, flags
);
819 spin_lock_irqsave(&dws
->lock
, flags
);
822 if (!list_empty(&dws
->queue
) || dws
->busy
)
824 spin_unlock_irqrestore(&dws
->lock
, flags
);
829 static int destroy_queue(struct dw_spi
*dws
)
833 status
= stop_queue(dws
);
836 destroy_workqueue(dws
->workqueue
);
840 /* Restart the controller, disable all interrupts, clean rx fifo */
841 static void spi_hw_init(struct dw_spi
*dws
)
843 spi_enable_chip(dws
, 0);
844 spi_mask_intr(dws
, 0xff);
845 spi_enable_chip(dws
, 1);
849 * Try to detect the FIFO depth if not set by interface driver,
850 * the depth could be from 2 to 256 from HW spec
852 if (!dws
->fifo_len
) {
854 for (fifo
= 2; fifo
<= 257; fifo
++) {
855 dw_writew(dws
, txfltr
, fifo
);
856 if (fifo
!= dw_readw(dws
, txfltr
))
860 dws
->fifo_len
= (fifo
== 257) ? 0 : fifo
;
861 dw_writew(dws
, txfltr
, 0);
865 int __devinit
dw_spi_add_host(struct dw_spi
*dws
)
867 struct spi_master
*master
;
872 master
= spi_alloc_master(dws
->parent_dev
, 0);
878 dws
->master
= master
;
879 dws
->type
= SSI_MOTO_SPI
;
880 dws
->prev_chip
= NULL
;
882 dws
->dma_addr
= (dma_addr_t
)(dws
->paddr
+ 0x60);
884 ret
= request_irq(dws
->irq
, dw_spi_irq
, IRQF_SHARED
,
887 dev_err(&master
->dev
, "can not get IRQ\n");
888 goto err_free_master
;
891 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
;
892 master
->bus_num
= dws
->bus_num
;
893 master
->num_chipselect
= dws
->num_cs
;
894 master
->cleanup
= dw_spi_cleanup
;
895 master
->setup
= dw_spi_setup
;
896 master
->transfer
= dw_spi_transfer
;
903 /* Initial and start queue */
904 ret
= init_queue(dws
);
906 dev_err(&master
->dev
, "problem initializing queue\n");
909 ret
= start_queue(dws
);
911 dev_err(&master
->dev
, "problem starting queue\n");
915 spi_master_set_devdata(master
, dws
);
916 ret
= spi_register_master(master
);
918 dev_err(&master
->dev
, "problem registering spi master\n");
919 goto err_queue_alloc
;
922 mrst_spi_debugfs_init(dws
);
928 spi_enable_chip(dws
, 0);
929 free_irq(dws
->irq
, dws
);
931 spi_master_put(master
);
935 EXPORT_SYMBOL(dw_spi_add_host
);
937 void __devexit
dw_spi_remove_host(struct dw_spi
*dws
)
943 mrst_spi_debugfs_remove(dws
);
945 /* Remove the queue */
946 status
= destroy_queue(dws
);
948 dev_err(&dws
->master
->dev
, "dw_spi_remove: workqueue will not "
949 "complete, message memory not freed\n");
951 spi_enable_chip(dws
, 0);
954 free_irq(dws
->irq
, dws
);
956 /* Disconnect from the SPI framework */
957 spi_unregister_master(dws
->master
);
959 EXPORT_SYMBOL(dw_spi_remove_host
);
961 int dw_spi_suspend_host(struct dw_spi
*dws
)
965 ret
= stop_queue(dws
);
968 spi_enable_chip(dws
, 0);
972 EXPORT_SYMBOL(dw_spi_suspend_host
);
974 int dw_spi_resume_host(struct dw_spi
*dws
)
979 ret
= start_queue(dws
);
981 dev_err(&dws
->master
->dev
, "fail to start queue (%d)\n", ret
);
984 EXPORT_SYMBOL(dw_spi_resume_host
);
986 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
987 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
988 MODULE_LICENSE("GPL v2");