2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/highmem.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spi/spi.h>
23 #include <linux/gpio.h>
27 #ifdef CONFIG_DEBUG_FS
28 #include <linux/debugfs.h>
31 #define START_STATE ((void *)0)
32 #define RUNNING_STATE ((void *)1)
33 #define DONE_STATE ((void *)2)
34 #define ERROR_STATE ((void *)-1)
36 /* Slave spi_dev related */
39 u8 cs
; /* chip select pin */
40 u8 n_bytes
; /* current is a 1/2/4 byte op */
41 u8 tmode
; /* TR/TO/RO/EEPROM */
42 u8 type
; /* SPI/SSP/MicroWire */
44 u8 poll_mode
; /* 1 means use poll mode */
51 u16 clk_div
; /* baud rate divider */
52 u32 speed_hz
; /* baud rate */
53 void (*cs_control
)(u32 command
);
56 #ifdef CONFIG_DEBUG_FS
57 #define SPI_REGS_BUFSIZE 1024
58 static ssize_t
dw_spi_show_regs(struct file
*file
, char __user
*user_buf
,
59 size_t count
, loff_t
*ppos
)
61 struct dw_spi
*dws
= file
->private_data
;
66 buf
= kzalloc(SPI_REGS_BUFSIZE
, GFP_KERNEL
);
70 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
71 "%s registers:\n", dev_name(&dws
->master
->dev
));
72 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
73 "=================================\n");
74 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
75 "CTRL0: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_CTRL0
));
76 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
77 "CTRL1: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_CTRL1
));
78 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
79 "SSIENR: \t0x%08x\n", dw_readl(dws
, DW_SPI_SSIENR
));
80 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
81 "SER: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_SER
));
82 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
83 "BAUDR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_BAUDR
));
84 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
85 "TXFTLR: \t0x%08x\n", dw_readl(dws
, DW_SPI_TXFLTR
));
86 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
87 "RXFTLR: \t0x%08x\n", dw_readl(dws
, DW_SPI_RXFLTR
));
88 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
89 "TXFLR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_TXFLR
));
90 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
91 "RXFLR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_RXFLR
));
92 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
93 "SR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_SR
));
94 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
95 "IMR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_IMR
));
96 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
97 "ISR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_ISR
));
98 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
99 "DMACR: \t\t0x%08x\n", dw_readl(dws
, DW_SPI_DMACR
));
100 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
101 "DMATDLR: \t0x%08x\n", dw_readl(dws
, DW_SPI_DMATDLR
));
102 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
103 "DMARDLR: \t0x%08x\n", dw_readl(dws
, DW_SPI_DMARDLR
));
104 len
+= snprintf(buf
+ len
, SPI_REGS_BUFSIZE
- len
,
105 "=================================\n");
107 ret
= simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
112 static const struct file_operations dw_spi_regs_ops
= {
113 .owner
= THIS_MODULE
,
115 .read
= dw_spi_show_regs
,
116 .llseek
= default_llseek
,
119 static int dw_spi_debugfs_init(struct dw_spi
*dws
)
121 dws
->debugfs
= debugfs_create_dir("dw_spi", NULL
);
125 debugfs_create_file("registers", S_IFREG
| S_IRUGO
,
126 dws
->debugfs
, (void *)dws
, &dw_spi_regs_ops
);
130 static void dw_spi_debugfs_remove(struct dw_spi
*dws
)
132 debugfs_remove_recursive(dws
->debugfs
);
136 static inline int dw_spi_debugfs_init(struct dw_spi
*dws
)
141 static inline void dw_spi_debugfs_remove(struct dw_spi
*dws
)
144 #endif /* CONFIG_DEBUG_FS */
146 /* Return the max entries we can fill into tx fifo */
147 static inline u32
tx_max(struct dw_spi
*dws
)
149 u32 tx_left
, tx_room
, rxtx_gap
;
151 tx_left
= (dws
->tx_end
- dws
->tx
) / dws
->n_bytes
;
152 tx_room
= dws
->fifo_len
- dw_readw(dws
, DW_SPI_TXFLR
);
155 * Another concern is about the tx/rx mismatch, we
156 * though to use (dws->fifo_len - rxflr - txflr) as
157 * one maximum value for tx, but it doesn't cover the
158 * data which is out of tx/rx fifo and inside the
159 * shift registers. So a control from sw point of
162 rxtx_gap
= ((dws
->rx_end
- dws
->rx
) - (dws
->tx_end
- dws
->tx
))
165 return min3(tx_left
, tx_room
, (u32
) (dws
->fifo_len
- rxtx_gap
));
168 /* Return the max entries we should read out of rx fifo */
169 static inline u32
rx_max(struct dw_spi
*dws
)
171 u32 rx_left
= (dws
->rx_end
- dws
->rx
) / dws
->n_bytes
;
173 return min_t(u32
, rx_left
, dw_readw(dws
, DW_SPI_RXFLR
));
176 static void dw_writer(struct dw_spi
*dws
)
178 u32 max
= tx_max(dws
);
182 /* Set the tx word if the transfer's original "tx" is not null */
183 if (dws
->tx_end
- dws
->len
) {
184 if (dws
->n_bytes
== 1)
185 txw
= *(u8
*)(dws
->tx
);
187 txw
= *(u16
*)(dws
->tx
);
189 dw_writew(dws
, DW_SPI_DR
, txw
);
190 dws
->tx
+= dws
->n_bytes
;
194 static void dw_reader(struct dw_spi
*dws
)
196 u32 max
= rx_max(dws
);
200 rxw
= dw_readw(dws
, DW_SPI_DR
);
201 /* Care rx only if the transfer's original "rx" is not null */
202 if (dws
->rx_end
- dws
->len
) {
203 if (dws
->n_bytes
== 1)
204 *(u8
*)(dws
->rx
) = rxw
;
206 *(u16
*)(dws
->rx
) = rxw
;
208 dws
->rx
+= dws
->n_bytes
;
212 static void *next_transfer(struct dw_spi
*dws
)
214 struct spi_message
*msg
= dws
->cur_msg
;
215 struct spi_transfer
*trans
= dws
->cur_transfer
;
217 /* Move to next transfer */
218 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
220 list_entry(trans
->transfer_list
.next
,
223 return RUNNING_STATE
;
230 * Note: first step is the protocol driver prepares
231 * a dma-capable memory, and this func just need translate
232 * the virt addr to physical
234 static int map_dma_buffers(struct dw_spi
*dws
)
236 if (!dws
->cur_msg
->is_dma_mapped
238 || !dws
->cur_chip
->enable_dma
242 if (dws
->cur_transfer
->tx_dma
)
243 dws
->tx_dma
= dws
->cur_transfer
->tx_dma
;
245 if (dws
->cur_transfer
->rx_dma
)
246 dws
->rx_dma
= dws
->cur_transfer
->rx_dma
;
251 /* Caller already set message->status; dma and pio irqs are blocked */
252 static void giveback(struct dw_spi
*dws
)
254 struct spi_transfer
*last_transfer
;
255 struct spi_message
*msg
;
259 dws
->cur_transfer
= NULL
;
260 dws
->prev_chip
= dws
->cur_chip
;
261 dws
->cur_chip
= NULL
;
264 last_transfer
= list_last_entry(&msg
->transfers
, struct spi_transfer
,
267 if (!last_transfer
->cs_change
)
268 spi_chip_sel(dws
, msg
->spi
, 0);
270 spi_finalize_current_message(dws
->master
);
273 static void int_error_stop(struct dw_spi
*dws
, const char *msg
)
276 spi_enable_chip(dws
, 0);
278 dev_err(&dws
->master
->dev
, "%s\n", msg
);
279 dws
->cur_msg
->state
= ERROR_STATE
;
280 tasklet_schedule(&dws
->pump_transfers
);
283 void dw_spi_xfer_done(struct dw_spi
*dws
)
285 /* Update total byte transferred return count actual bytes read */
286 dws
->cur_msg
->actual_length
+= dws
->len
;
288 /* Move to next transfer */
289 dws
->cur_msg
->state
= next_transfer(dws
);
291 /* Handle end of message */
292 if (dws
->cur_msg
->state
== DONE_STATE
) {
293 dws
->cur_msg
->status
= 0;
296 tasklet_schedule(&dws
->pump_transfers
);
298 EXPORT_SYMBOL_GPL(dw_spi_xfer_done
);
300 static irqreturn_t
interrupt_transfer(struct dw_spi
*dws
)
302 u16 irq_status
= dw_readw(dws
, DW_SPI_ISR
);
305 if (irq_status
& (SPI_INT_TXOI
| SPI_INT_RXOI
| SPI_INT_RXUI
)) {
306 dw_readw(dws
, DW_SPI_TXOICR
);
307 dw_readw(dws
, DW_SPI_RXOICR
);
308 dw_readw(dws
, DW_SPI_RXUICR
);
309 int_error_stop(dws
, "interrupt_transfer: fifo overrun/underrun");
314 if (dws
->rx_end
== dws
->rx
) {
315 spi_mask_intr(dws
, SPI_INT_TXEI
);
316 dw_spi_xfer_done(dws
);
319 if (irq_status
& SPI_INT_TXEI
) {
320 spi_mask_intr(dws
, SPI_INT_TXEI
);
322 /* Enable TX irq always, it will be disabled when RX finished */
323 spi_umask_intr(dws
, SPI_INT_TXEI
);
329 static irqreturn_t
dw_spi_irq(int irq
, void *dev_id
)
331 struct dw_spi
*dws
= dev_id
;
332 u16 irq_status
= dw_readw(dws
, DW_SPI_ISR
) & 0x3f;
338 spi_mask_intr(dws
, SPI_INT_TXEI
);
342 return dws
->transfer_handler(dws
);
345 /* Must be called inside pump_transfers() */
346 static void poll_transfer(struct dw_spi
*dws
)
352 } while (dws
->rx_end
> dws
->rx
);
354 dw_spi_xfer_done(dws
);
357 static void pump_transfers(unsigned long data
)
359 struct dw_spi
*dws
= (struct dw_spi
*)data
;
360 struct spi_message
*message
= NULL
;
361 struct spi_transfer
*transfer
= NULL
;
362 struct spi_transfer
*previous
= NULL
;
363 struct spi_device
*spi
= NULL
;
364 struct chip_data
*chip
= NULL
;
373 /* Get current state information */
374 message
= dws
->cur_msg
;
375 transfer
= dws
->cur_transfer
;
376 chip
= dws
->cur_chip
;
379 if (message
->state
== ERROR_STATE
) {
380 message
->status
= -EIO
;
384 /* Handle end of message */
385 if (message
->state
== DONE_STATE
) {
390 /* Delay if requested at end of transfer */
391 if (message
->state
== RUNNING_STATE
) {
392 previous
= list_entry(transfer
->transfer_list
.prev
,
395 if (previous
->delay_usecs
)
396 udelay(previous
->delay_usecs
);
399 dws
->n_bytes
= chip
->n_bytes
;
400 dws
->dma_width
= chip
->dma_width
;
401 dws
->cs_control
= chip
->cs_control
;
403 dws
->rx_dma
= transfer
->rx_dma
;
404 dws
->tx_dma
= transfer
->tx_dma
;
405 dws
->tx
= (void *)transfer
->tx_buf
;
406 dws
->tx_end
= dws
->tx
+ transfer
->len
;
407 dws
->rx
= transfer
->rx_buf
;
408 dws
->rx_end
= dws
->rx
+ transfer
->len
;
409 dws
->len
= dws
->cur_transfer
->len
;
410 if (chip
!= dws
->prev_chip
)
415 /* Handle per transfer options for bpw and speed */
416 if (transfer
->speed_hz
) {
417 speed
= chip
->speed_hz
;
419 if ((transfer
->speed_hz
!= speed
) || (!chip
->clk_div
)) {
420 speed
= transfer
->speed_hz
;
422 /* clk_div doesn't support odd number */
423 clk_div
= dws
->max_freq
/ speed
;
424 clk_div
= (clk_div
+ 1) & 0xfffe;
426 chip
->speed_hz
= speed
;
427 chip
->clk_div
= clk_div
;
430 if (transfer
->bits_per_word
) {
431 bits
= transfer
->bits_per_word
;
432 dws
->n_bytes
= dws
->dma_width
= bits
>> 3;
434 | (chip
->type
<< SPI_FRF_OFFSET
)
435 | (spi
->mode
<< SPI_MODE_OFFSET
)
436 | (chip
->tmode
<< SPI_TMOD_OFFSET
);
438 message
->state
= RUNNING_STATE
;
441 * Adjust transfer mode if necessary. Requires platform dependent
442 * chipselect mechanism.
444 if (dws
->cs_control
) {
445 if (dws
->rx
&& dws
->tx
)
446 chip
->tmode
= SPI_TMOD_TR
;
448 chip
->tmode
= SPI_TMOD_RO
;
450 chip
->tmode
= SPI_TMOD_TO
;
452 cr0
&= ~SPI_TMOD_MASK
;
453 cr0
|= (chip
->tmode
<< SPI_TMOD_OFFSET
);
456 /* Check if current transfer is a DMA transaction */
457 dws
->dma_mapped
= map_dma_buffers(dws
);
461 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
463 if (!dws
->dma_mapped
&& !chip
->poll_mode
) {
464 int templen
= dws
->len
/ dws
->n_bytes
;
466 txint_level
= dws
->fifo_len
/ 2;
467 txint_level
= (templen
> txint_level
) ? txint_level
: templen
;
469 imask
|= SPI_INT_TXEI
| SPI_INT_TXOI
|
470 SPI_INT_RXUI
| SPI_INT_RXOI
;
471 dws
->transfer_handler
= interrupt_transfer
;
475 * Reprogram registers only if
476 * 1. chip select changes
477 * 2. clk_div is changed
478 * 3. control value changes
480 if (dw_readw(dws
, DW_SPI_CTRL0
) != cr0
|| cs_change
|| clk_div
|| imask
) {
481 spi_enable_chip(dws
, 0);
483 if (dw_readw(dws
, DW_SPI_CTRL0
) != cr0
)
484 dw_writew(dws
, DW_SPI_CTRL0
, cr0
);
486 spi_set_clk(dws
, clk_div
? clk_div
: chip
->clk_div
);
487 spi_chip_sel(dws
, spi
, 1);
489 /* Set the interrupt mask, for poll mode just disable all int */
490 spi_mask_intr(dws
, 0xff);
492 spi_umask_intr(dws
, imask
);
494 dw_writew(dws
, DW_SPI_TXFLTR
, txint_level
);
496 spi_enable_chip(dws
, 1);
498 dws
->prev_chip
= chip
;
502 dws
->dma_ops
->dma_transfer(dws
, cs_change
);
513 static int dw_spi_transfer_one_message(struct spi_master
*master
,
514 struct spi_message
*msg
)
516 struct dw_spi
*dws
= spi_master_get_devdata(master
);
519 /* Initial message state */
520 dws
->cur_msg
->state
= START_STATE
;
521 dws
->cur_transfer
= list_entry(dws
->cur_msg
->transfers
.next
,
524 dws
->cur_chip
= spi_get_ctldata(dws
->cur_msg
->spi
);
526 /* Launch transfers */
527 tasklet_schedule(&dws
->pump_transfers
);
532 /* This may be called twice for each spi dev */
533 static int dw_spi_setup(struct spi_device
*spi
)
535 struct dw_spi_chip
*chip_info
= NULL
;
536 struct chip_data
*chip
;
539 /* Only alloc on first setup */
540 chip
= spi_get_ctldata(spi
);
542 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
545 spi_set_ctldata(spi
, chip
);
549 * Protocol drivers may change the chip settings, so...
550 * if chip_info exists, use it
552 chip_info
= spi
->controller_data
;
554 /* chip_info doesn't always exist */
556 if (chip_info
->cs_control
)
557 chip
->cs_control
= chip_info
->cs_control
;
559 chip
->poll_mode
= chip_info
->poll_mode
;
560 chip
->type
= chip_info
->type
;
562 chip
->rx_threshold
= 0;
563 chip
->tx_threshold
= 0;
565 chip
->enable_dma
= chip_info
->enable_dma
;
568 if (spi
->bits_per_word
== 8) {
571 } else if (spi
->bits_per_word
== 16) {
575 chip
->bits_per_word
= spi
->bits_per_word
;
577 if (!spi
->max_speed_hz
) {
578 dev_err(&spi
->dev
, "No max speed HZ parameter\n");
582 chip
->tmode
= 0; /* Tx & Rx */
583 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
584 chip
->cr0
= (chip
->bits_per_word
- 1)
585 | (chip
->type
<< SPI_FRF_OFFSET
)
586 | (spi
->mode
<< SPI_MODE_OFFSET
)
587 | (chip
->tmode
<< SPI_TMOD_OFFSET
);
589 if (spi
->mode
& SPI_LOOP
)
590 chip
->cr0
|= 1 << SPI_SRL_OFFSET
;
592 if (gpio_is_valid(spi
->cs_gpio
)) {
593 ret
= gpio_direction_output(spi
->cs_gpio
,
594 !(spi
->mode
& SPI_CS_HIGH
));
602 static void dw_spi_cleanup(struct spi_device
*spi
)
604 struct chip_data
*chip
= spi_get_ctldata(spi
);
607 spi_set_ctldata(spi
, NULL
);
610 /* Restart the controller, disable all interrupts, clean rx fifo */
611 static void spi_hw_init(struct device
*dev
, struct dw_spi
*dws
)
613 spi_enable_chip(dws
, 0);
614 spi_mask_intr(dws
, 0xff);
615 spi_enable_chip(dws
, 1);
618 * Try to detect the FIFO depth if not set by interface driver,
619 * the depth could be from 2 to 256 from HW spec
621 if (!dws
->fifo_len
) {
624 for (fifo
= 1; fifo
< 256; fifo
++) {
625 dw_writew(dws
, DW_SPI_TXFLTR
, fifo
);
626 if (fifo
!= dw_readw(dws
, DW_SPI_TXFLTR
))
629 dw_writew(dws
, DW_SPI_TXFLTR
, 0);
631 dws
->fifo_len
= (fifo
== 1) ? 0 : fifo
;
632 dev_dbg(dev
, "Detected FIFO size: %u bytes\n", dws
->fifo_len
);
636 int dw_spi_add_host(struct device
*dev
, struct dw_spi
*dws
)
638 struct spi_master
*master
;
643 master
= spi_alloc_master(dev
, 0);
647 dws
->master
= master
;
648 dws
->type
= SSI_MOTO_SPI
;
649 dws
->prev_chip
= NULL
;
651 dws
->dma_addr
= (dma_addr_t
)(dws
->paddr
+ 0x60);
652 snprintf(dws
->name
, sizeof(dws
->name
), "dw_spi%d", dws
->bus_num
);
654 ret
= devm_request_irq(dev
, dws
->irq
, dw_spi_irq
, IRQF_SHARED
,
657 dev_err(&master
->dev
, "can not get IRQ\n");
658 goto err_free_master
;
661 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_LOOP
;
662 master
->bits_per_word_mask
= SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
663 master
->bus_num
= dws
->bus_num
;
664 master
->num_chipselect
= dws
->num_cs
;
665 master
->setup
= dw_spi_setup
;
666 master
->cleanup
= dw_spi_cleanup
;
667 master
->transfer_one_message
= dw_spi_transfer_one_message
;
668 master
->max_speed_hz
= dws
->max_freq
;
669 master
->dev
.of_node
= dev
->of_node
;
672 spi_hw_init(dev
, dws
);
674 if (dws
->dma_ops
&& dws
->dma_ops
->dma_init
) {
675 ret
= dws
->dma_ops
->dma_init(dws
);
677 dev_warn(dev
, "DMA init failed\n");
682 tasklet_init(&dws
->pump_transfers
, pump_transfers
, (unsigned long)dws
);
684 spi_master_set_devdata(master
, dws
);
685 ret
= devm_spi_register_master(dev
, master
);
687 dev_err(&master
->dev
, "problem registering spi master\n");
691 dw_spi_debugfs_init(dws
);
695 if (dws
->dma_ops
&& dws
->dma_ops
->dma_exit
)
696 dws
->dma_ops
->dma_exit(dws
);
697 spi_enable_chip(dws
, 0);
699 spi_master_put(master
);
702 EXPORT_SYMBOL_GPL(dw_spi_add_host
);
704 void dw_spi_remove_host(struct dw_spi
*dws
)
708 dw_spi_debugfs_remove(dws
);
710 if (dws
->dma_ops
&& dws
->dma_ops
->dma_exit
)
711 dws
->dma_ops
->dma_exit(dws
);
712 spi_enable_chip(dws
, 0);
716 EXPORT_SYMBOL_GPL(dw_spi_remove_host
);
718 int dw_spi_suspend_host(struct dw_spi
*dws
)
722 ret
= spi_master_suspend(dws
->master
);
725 spi_enable_chip(dws
, 0);
729 EXPORT_SYMBOL_GPL(dw_spi_suspend_host
);
731 int dw_spi_resume_host(struct dw_spi
*dws
)
735 spi_hw_init(&dws
->master
->dev
, dws
);
736 ret
= spi_master_resume(dws
->master
);
738 dev_err(&dws
->master
->dev
, "fail to start queue (%d)\n", ret
);
741 EXPORT_SYMBOL_GPL(dw_spi_resume_host
);
743 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
744 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
745 MODULE_LICENSE("GPL v2");