2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/ioport.h>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/spi/pxa2xx_spi.h>
29 #include <linux/spi/spi.h>
30 #include <linux/workqueue.h>
31 #include <linux/delay.h>
32 #include <linux/gpio.h>
33 #include <linux/slab.h>
34 #include <linux/clk.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/acpi.h>
40 #include <asm/delay.h>
42 #include "spi-pxa2xx.h"
44 MODULE_AUTHOR("Stephen Street");
45 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
46 MODULE_LICENSE("GPL");
47 MODULE_ALIAS("platform:pxa2xx-spi");
51 #define TIMOUT_DFLT 1000
54 * for testing SSCR1 changes that require SSP restart, basically
55 * everything except the service and interrupt enables, the pxa270 developer
56 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
57 * list, but the PXA255 dev man says all bits without really meaning the
58 * service and interrupt enables
60 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
61 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
62 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
63 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
64 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
65 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
67 #define LPSS_RX_THRESH_DFLT 64
68 #define LPSS_TX_LOTHRESH_DFLT 160
69 #define LPSS_TX_HITHRESH_DFLT 224
71 /* Offset from drv_data->lpss_base */
72 #define GENERAL_REG 0x08
73 #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
75 #define SPI_CS_CONTROL 0x18
76 #define SPI_CS_CONTROL_SW_MODE BIT(0)
77 #define SPI_CS_CONTROL_CS_HIGH BIT(1)
79 static bool is_lpss_ssp(const struct driver_data
*drv_data
)
81 return drv_data
->ssp_type
== LPSS_SSP
;
85 * Read and write LPSS SSP private registers. Caller must first check that
86 * is_lpss_ssp() returns true before these can be called.
88 static u32
__lpss_ssp_read_priv(struct driver_data
*drv_data
, unsigned offset
)
90 WARN_ON(!drv_data
->lpss_base
);
91 return readl(drv_data
->lpss_base
+ offset
);
94 static void __lpss_ssp_write_priv(struct driver_data
*drv_data
,
95 unsigned offset
, u32 value
)
97 WARN_ON(!drv_data
->lpss_base
);
98 writel(value
, drv_data
->lpss_base
+ offset
);
102 * lpss_ssp_setup - perform LPSS SSP specific setup
103 * @drv_data: pointer to the driver private data
105 * Perform LPSS SSP specific setup. This function must be called first if
106 * one is going to use LPSS SSP private registers.
108 static void lpss_ssp_setup(struct driver_data
*drv_data
)
110 unsigned offset
= 0x400;
113 if (!is_lpss_ssp(drv_data
))
117 * Perform auto-detection of the LPSS SSP private registers. They
118 * can be either at 1k or 2k offset from the base address.
120 orig
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
122 value
= orig
| SPI_CS_CONTROL_SW_MODE
;
123 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
124 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
125 if (value
!= (orig
| SPI_CS_CONTROL_SW_MODE
)) {
130 value
&= ~SPI_CS_CONTROL_SW_MODE
;
131 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
132 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
139 /* Now set the LPSS base */
140 drv_data
->lpss_base
= drv_data
->ioaddr
+ offset
;
142 /* Enable software chip select control */
143 value
= SPI_CS_CONTROL_SW_MODE
| SPI_CS_CONTROL_CS_HIGH
;
144 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
146 /* Enable multiblock DMA transfers */
147 if (drv_data
->master_info
->enable_dma
) {
148 __lpss_ssp_write_priv(drv_data
, SSP_REG
, 1);
150 value
= __lpss_ssp_read_priv(drv_data
, GENERAL_REG
);
151 value
|= GENERAL_REG_RXTO_HOLDOFF_DISABLE
;
152 __lpss_ssp_write_priv(drv_data
, GENERAL_REG
, value
);
156 static void lpss_ssp_cs_control(struct driver_data
*drv_data
, bool enable
)
160 if (!is_lpss_ssp(drv_data
))
163 value
= __lpss_ssp_read_priv(drv_data
, SPI_CS_CONTROL
);
165 value
&= ~SPI_CS_CONTROL_CS_HIGH
;
167 value
|= SPI_CS_CONTROL_CS_HIGH
;
168 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
171 static void cs_assert(struct driver_data
*drv_data
)
173 struct chip_data
*chip
= drv_data
->cur_chip
;
175 if (drv_data
->ssp_type
== CE4100_SSP
) {
176 write_SSSR(drv_data
->cur_chip
->frm
, drv_data
->ioaddr
);
180 if (chip
->cs_control
) {
181 chip
->cs_control(PXA2XX_CS_ASSERT
);
185 if (gpio_is_valid(chip
->gpio_cs
)) {
186 gpio_set_value(chip
->gpio_cs
, chip
->gpio_cs_inverted
);
190 lpss_ssp_cs_control(drv_data
, true);
193 static void cs_deassert(struct driver_data
*drv_data
)
195 struct chip_data
*chip
= drv_data
->cur_chip
;
197 if (drv_data
->ssp_type
== CE4100_SSP
)
200 if (chip
->cs_control
) {
201 chip
->cs_control(PXA2XX_CS_DEASSERT
);
205 if (gpio_is_valid(chip
->gpio_cs
)) {
206 gpio_set_value(chip
->gpio_cs
, !chip
->gpio_cs_inverted
);
210 lpss_ssp_cs_control(drv_data
, false);
213 int pxa2xx_spi_flush(struct driver_data
*drv_data
)
215 unsigned long limit
= loops_per_jiffy
<< 1;
217 void __iomem
*reg
= drv_data
->ioaddr
;
220 while (read_SSSR(reg
) & SSSR_RNE
) {
223 } while ((read_SSSR(reg
) & SSSR_BSY
) && --limit
);
224 write_SSSR_CS(drv_data
, SSSR_ROR
);
229 static int null_writer(struct driver_data
*drv_data
)
231 void __iomem
*reg
= drv_data
->ioaddr
;
232 u8 n_bytes
= drv_data
->n_bytes
;
234 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
235 || (drv_data
->tx
== drv_data
->tx_end
))
239 drv_data
->tx
+= n_bytes
;
244 static int null_reader(struct driver_data
*drv_data
)
246 void __iomem
*reg
= drv_data
->ioaddr
;
247 u8 n_bytes
= drv_data
->n_bytes
;
249 while ((read_SSSR(reg
) & SSSR_RNE
)
250 && (drv_data
->rx
< drv_data
->rx_end
)) {
252 drv_data
->rx
+= n_bytes
;
255 return drv_data
->rx
== drv_data
->rx_end
;
258 static int u8_writer(struct driver_data
*drv_data
)
260 void __iomem
*reg
= drv_data
->ioaddr
;
262 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
263 || (drv_data
->tx
== drv_data
->tx_end
))
266 write_SSDR(*(u8
*)(drv_data
->tx
), reg
);
272 static int u8_reader(struct driver_data
*drv_data
)
274 void __iomem
*reg
= drv_data
->ioaddr
;
276 while ((read_SSSR(reg
) & SSSR_RNE
)
277 && (drv_data
->rx
< drv_data
->rx_end
)) {
278 *(u8
*)(drv_data
->rx
) = read_SSDR(reg
);
282 return drv_data
->rx
== drv_data
->rx_end
;
285 static int u16_writer(struct driver_data
*drv_data
)
287 void __iomem
*reg
= drv_data
->ioaddr
;
289 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
290 || (drv_data
->tx
== drv_data
->tx_end
))
293 write_SSDR(*(u16
*)(drv_data
->tx
), reg
);
299 static int u16_reader(struct driver_data
*drv_data
)
301 void __iomem
*reg
= drv_data
->ioaddr
;
303 while ((read_SSSR(reg
) & SSSR_RNE
)
304 && (drv_data
->rx
< drv_data
->rx_end
)) {
305 *(u16
*)(drv_data
->rx
) = read_SSDR(reg
);
309 return drv_data
->rx
== drv_data
->rx_end
;
312 static int u32_writer(struct driver_data
*drv_data
)
314 void __iomem
*reg
= drv_data
->ioaddr
;
316 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
317 || (drv_data
->tx
== drv_data
->tx_end
))
320 write_SSDR(*(u32
*)(drv_data
->tx
), reg
);
326 static int u32_reader(struct driver_data
*drv_data
)
328 void __iomem
*reg
= drv_data
->ioaddr
;
330 while ((read_SSSR(reg
) & SSSR_RNE
)
331 && (drv_data
->rx
< drv_data
->rx_end
)) {
332 *(u32
*)(drv_data
->rx
) = read_SSDR(reg
);
336 return drv_data
->rx
== drv_data
->rx_end
;
339 void *pxa2xx_spi_next_transfer(struct driver_data
*drv_data
)
341 struct spi_message
*msg
= drv_data
->cur_msg
;
342 struct spi_transfer
*trans
= drv_data
->cur_transfer
;
344 /* Move to next transfer */
345 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
346 drv_data
->cur_transfer
=
347 list_entry(trans
->transfer_list
.next
,
350 return RUNNING_STATE
;
355 /* caller already set message->status; dma and pio irqs are blocked */
356 static void giveback(struct driver_data
*drv_data
)
358 struct spi_transfer
* last_transfer
;
359 struct spi_message
*msg
;
361 msg
= drv_data
->cur_msg
;
362 drv_data
->cur_msg
= NULL
;
363 drv_data
->cur_transfer
= NULL
;
365 last_transfer
= list_entry(msg
->transfers
.prev
,
369 /* Delay if requested before any change in chip select */
370 if (last_transfer
->delay_usecs
)
371 udelay(last_transfer
->delay_usecs
);
373 /* Drop chip select UNLESS cs_change is true or we are returning
374 * a message with an error, or next message is for another chip
376 if (!last_transfer
->cs_change
)
377 cs_deassert(drv_data
);
379 struct spi_message
*next_msg
;
381 /* Holding of cs was hinted, but we need to make sure
382 * the next message is for the same chip. Don't waste
383 * time with the following tests unless this was hinted.
385 * We cannot postpone this until pump_messages, because
386 * after calling msg->complete (below) the driver that
387 * sent the current message could be unloaded, which
388 * could invalidate the cs_control() callback...
391 /* get a pointer to the next message, if any */
392 next_msg
= spi_get_next_queued_message(drv_data
->master
);
394 /* see if the next and current messages point
397 if (next_msg
&& next_msg
->spi
!= msg
->spi
)
399 if (!next_msg
|| msg
->state
== ERROR_STATE
)
400 cs_deassert(drv_data
);
403 drv_data
->cur_chip
= NULL
;
404 spi_finalize_current_message(drv_data
->master
);
407 static void reset_sccr1(struct driver_data
*drv_data
)
409 void __iomem
*reg
= drv_data
->ioaddr
;
410 struct chip_data
*chip
= drv_data
->cur_chip
;
413 sccr1_reg
= read_SSCR1(reg
) & ~drv_data
->int_cr1
;
414 sccr1_reg
&= ~SSCR1_RFT
;
415 sccr1_reg
|= chip
->threshold
;
416 write_SSCR1(sccr1_reg
, reg
);
419 static void int_error_stop(struct driver_data
*drv_data
, const char* msg
)
421 void __iomem
*reg
= drv_data
->ioaddr
;
423 /* Stop and reset SSP */
424 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
425 reset_sccr1(drv_data
);
426 if (!pxa25x_ssp_comp(drv_data
))
428 pxa2xx_spi_flush(drv_data
);
429 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
431 dev_err(&drv_data
->pdev
->dev
, "%s\n", msg
);
433 drv_data
->cur_msg
->state
= ERROR_STATE
;
434 tasklet_schedule(&drv_data
->pump_transfers
);
437 static void int_transfer_complete(struct driver_data
*drv_data
)
439 void __iomem
*reg
= drv_data
->ioaddr
;
442 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
443 reset_sccr1(drv_data
);
444 if (!pxa25x_ssp_comp(drv_data
))
447 /* Update total byte transferred return count actual bytes read */
448 drv_data
->cur_msg
->actual_length
+= drv_data
->len
-
449 (drv_data
->rx_end
- drv_data
->rx
);
451 /* Transfer delays and chip select release are
452 * handled in pump_transfers or giveback
455 /* Move to next transfer */
456 drv_data
->cur_msg
->state
= pxa2xx_spi_next_transfer(drv_data
);
458 /* Schedule transfer tasklet */
459 tasklet_schedule(&drv_data
->pump_transfers
);
462 static irqreturn_t
interrupt_transfer(struct driver_data
*drv_data
)
464 void __iomem
*reg
= drv_data
->ioaddr
;
466 u32 irq_mask
= (read_SSCR1(reg
) & SSCR1_TIE
) ?
467 drv_data
->mask_sr
: drv_data
->mask_sr
& ~SSSR_TFS
;
469 u32 irq_status
= read_SSSR(reg
) & irq_mask
;
471 if (irq_status
& SSSR_ROR
) {
472 int_error_stop(drv_data
, "interrupt_transfer: fifo overrun");
476 if (irq_status
& SSSR_TINT
) {
477 write_SSSR(SSSR_TINT
, reg
);
478 if (drv_data
->read(drv_data
)) {
479 int_transfer_complete(drv_data
);
484 /* Drain rx fifo, Fill tx fifo and prevent overruns */
486 if (drv_data
->read(drv_data
)) {
487 int_transfer_complete(drv_data
);
490 } while (drv_data
->write(drv_data
));
492 if (drv_data
->read(drv_data
)) {
493 int_transfer_complete(drv_data
);
497 if (drv_data
->tx
== drv_data
->tx_end
) {
501 sccr1_reg
= read_SSCR1(reg
);
502 sccr1_reg
&= ~SSCR1_TIE
;
505 * PXA25x_SSP has no timeout, set up rx threshould for the
506 * remaining RX bytes.
508 if (pxa25x_ssp_comp(drv_data
)) {
510 sccr1_reg
&= ~SSCR1_RFT
;
512 bytes_left
= drv_data
->rx_end
- drv_data
->rx
;
513 switch (drv_data
->n_bytes
) {
520 if (bytes_left
> RX_THRESH_DFLT
)
521 bytes_left
= RX_THRESH_DFLT
;
523 sccr1_reg
|= SSCR1_RxTresh(bytes_left
);
525 write_SSCR1(sccr1_reg
, reg
);
528 /* We did something */
532 static irqreturn_t
ssp_int(int irq
, void *dev_id
)
534 struct driver_data
*drv_data
= dev_id
;
535 void __iomem
*reg
= drv_data
->ioaddr
;
537 u32 mask
= drv_data
->mask_sr
;
541 * The IRQ might be shared with other peripherals so we must first
542 * check that are we RPM suspended or not. If we are we assume that
543 * the IRQ was not for us (we shouldn't be RPM suspended when the
544 * interrupt is enabled).
546 if (pm_runtime_suspended(&drv_data
->pdev
->dev
))
550 * If the device is not yet in RPM suspended state and we get an
551 * interrupt that is meant for another device, check if status bits
552 * are all set to one. That means that the device is already
555 status
= read_SSSR(reg
);
559 sccr1_reg
= read_SSCR1(reg
);
561 /* Ignore possible writes if we don't need to write */
562 if (!(sccr1_reg
& SSCR1_TIE
))
565 /* Ignore RX timeout interrupt if it is disabled */
566 if (!(sccr1_reg
& SSCR1_TINTE
))
569 if (!(status
& mask
))
572 if (!drv_data
->cur_msg
) {
574 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
575 write_SSCR1(read_SSCR1(reg
) & ~drv_data
->int_cr1
, reg
);
576 if (!pxa25x_ssp_comp(drv_data
))
578 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
580 dev_err(&drv_data
->pdev
->dev
, "bad message state "
581 "in interrupt handler\n");
587 return drv_data
->transfer_handler(drv_data
);
590 static unsigned int ssp_get_clk_div(struct driver_data
*drv_data
, int rate
)
592 unsigned long ssp_clk
= drv_data
->max_clk_rate
;
593 const struct ssp_device
*ssp
= drv_data
->ssp
;
595 rate
= min_t(int, ssp_clk
, rate
);
597 if (ssp
->type
== PXA25x_SSP
|| ssp
->type
== CE4100_SSP
)
598 return ((ssp_clk
/ (2 * rate
) - 1) & 0xff) << 8;
600 return ((ssp_clk
/ rate
- 1) & 0xfff) << 8;
603 static void pump_transfers(unsigned long data
)
605 struct driver_data
*drv_data
= (struct driver_data
*)data
;
606 struct spi_message
*message
= NULL
;
607 struct spi_transfer
*transfer
= NULL
;
608 struct spi_transfer
*previous
= NULL
;
609 struct chip_data
*chip
= NULL
;
610 void __iomem
*reg
= drv_data
->ioaddr
;
616 u32 dma_thresh
= drv_data
->cur_chip
->dma_threshold
;
617 u32 dma_burst
= drv_data
->cur_chip
->dma_burst_size
;
619 /* Get current state information */
620 message
= drv_data
->cur_msg
;
621 transfer
= drv_data
->cur_transfer
;
622 chip
= drv_data
->cur_chip
;
624 /* Handle for abort */
625 if (message
->state
== ERROR_STATE
) {
626 message
->status
= -EIO
;
631 /* Handle end of message */
632 if (message
->state
== DONE_STATE
) {
638 /* Delay if requested at end of transfer before CS change */
639 if (message
->state
== RUNNING_STATE
) {
640 previous
= list_entry(transfer
->transfer_list
.prev
,
643 if (previous
->delay_usecs
)
644 udelay(previous
->delay_usecs
);
646 /* Drop chip select only if cs_change is requested */
647 if (previous
->cs_change
)
648 cs_deassert(drv_data
);
651 /* Check if we can DMA this transfer */
652 if (!pxa2xx_spi_dma_is_possible(transfer
->len
) && chip
->enable_dma
) {
654 /* reject already-mapped transfers; PIO won't always work */
655 if (message
->is_dma_mapped
656 || transfer
->rx_dma
|| transfer
->tx_dma
) {
657 dev_err(&drv_data
->pdev
->dev
,
658 "pump_transfers: mapped transfer length "
659 "of %u is greater than %d\n",
660 transfer
->len
, MAX_DMA_LEN
);
661 message
->status
= -EINVAL
;
666 /* warn ... we force this to PIO mode */
667 if (printk_ratelimit())
668 dev_warn(&message
->spi
->dev
, "pump_transfers: "
669 "DMA disabled for transfer length %ld "
671 (long)drv_data
->len
, MAX_DMA_LEN
);
674 /* Setup the transfer state based on the type of transfer */
675 if (pxa2xx_spi_flush(drv_data
) == 0) {
676 dev_err(&drv_data
->pdev
->dev
, "pump_transfers: flush failed\n");
677 message
->status
= -EIO
;
681 drv_data
->n_bytes
= chip
->n_bytes
;
682 drv_data
->tx
= (void *)transfer
->tx_buf
;
683 drv_data
->tx_end
= drv_data
->tx
+ transfer
->len
;
684 drv_data
->rx
= transfer
->rx_buf
;
685 drv_data
->rx_end
= drv_data
->rx
+ transfer
->len
;
686 drv_data
->rx_dma
= transfer
->rx_dma
;
687 drv_data
->tx_dma
= transfer
->tx_dma
;
688 drv_data
->len
= transfer
->len
;
689 drv_data
->write
= drv_data
->tx
? chip
->write
: null_writer
;
690 drv_data
->read
= drv_data
->rx
? chip
->read
: null_reader
;
692 /* Change speed and bit per word on a per transfer */
694 if (transfer
->speed_hz
|| transfer
->bits_per_word
) {
696 bits
= chip
->bits_per_word
;
697 speed
= chip
->speed_hz
;
699 if (transfer
->speed_hz
)
700 speed
= transfer
->speed_hz
;
702 if (transfer
->bits_per_word
)
703 bits
= transfer
->bits_per_word
;
705 clk_div
= ssp_get_clk_div(drv_data
, speed
);
708 drv_data
->n_bytes
= 1;
709 drv_data
->read
= drv_data
->read
!= null_reader
?
710 u8_reader
: null_reader
;
711 drv_data
->write
= drv_data
->write
!= null_writer
?
712 u8_writer
: null_writer
;
713 } else if (bits
<= 16) {
714 drv_data
->n_bytes
= 2;
715 drv_data
->read
= drv_data
->read
!= null_reader
?
716 u16_reader
: null_reader
;
717 drv_data
->write
= drv_data
->write
!= null_writer
?
718 u16_writer
: null_writer
;
719 } else if (bits
<= 32) {
720 drv_data
->n_bytes
= 4;
721 drv_data
->read
= drv_data
->read
!= null_reader
?
722 u32_reader
: null_reader
;
723 drv_data
->write
= drv_data
->write
!= null_writer
?
724 u32_writer
: null_writer
;
726 /* if bits/word is changed in dma mode, then must check the
727 * thresholds and burst also */
728 if (chip
->enable_dma
) {
729 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
,
733 if (printk_ratelimit())
734 dev_warn(&message
->spi
->dev
,
736 "DMA burst size reduced to "
737 "match bits_per_word\n");
742 | SSCR0_DataSize(bits
> 16 ? bits
- 16 : bits
)
744 | (bits
> 16 ? SSCR0_EDSS
: 0);
747 message
->state
= RUNNING_STATE
;
749 drv_data
->dma_mapped
= 0;
750 if (pxa2xx_spi_dma_is_possible(drv_data
->len
))
751 drv_data
->dma_mapped
= pxa2xx_spi_map_dma_buffers(drv_data
);
752 if (drv_data
->dma_mapped
) {
754 /* Ensure we have the correct interrupt handler */
755 drv_data
->transfer_handler
= pxa2xx_spi_dma_transfer
;
757 pxa2xx_spi_dma_prepare(drv_data
, dma_burst
);
759 /* Clear status and start DMA engine */
760 cr1
= chip
->cr1
| dma_thresh
| drv_data
->dma_cr1
;
761 write_SSSR(drv_data
->clear_sr
, reg
);
763 pxa2xx_spi_dma_start(drv_data
);
765 /* Ensure we have the correct interrupt handler */
766 drv_data
->transfer_handler
= interrupt_transfer
;
769 cr1
= chip
->cr1
| chip
->threshold
| drv_data
->int_cr1
;
770 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
773 if (is_lpss_ssp(drv_data
)) {
774 if ((read_SSIRF(reg
) & 0xff) != chip
->lpss_rx_threshold
)
775 write_SSIRF(chip
->lpss_rx_threshold
, reg
);
776 if ((read_SSITF(reg
) & 0xffff) != chip
->lpss_tx_threshold
)
777 write_SSITF(chip
->lpss_tx_threshold
, reg
);
780 /* see if we need to reload the config registers */
781 if ((read_SSCR0(reg
) != cr0
)
782 || (read_SSCR1(reg
) & SSCR1_CHANGE_MASK
) !=
783 (cr1
& SSCR1_CHANGE_MASK
)) {
785 /* stop the SSP, and update the other bits */
786 write_SSCR0(cr0
& ~SSCR0_SSE
, reg
);
787 if (!pxa25x_ssp_comp(drv_data
))
788 write_SSTO(chip
->timeout
, reg
);
789 /* first set CR1 without interrupt and service enables */
790 write_SSCR1(cr1
& SSCR1_CHANGE_MASK
, reg
);
791 /* restart the SSP */
792 write_SSCR0(cr0
, reg
);
795 if (!pxa25x_ssp_comp(drv_data
))
796 write_SSTO(chip
->timeout
, reg
);
801 /* after chip select, release the data by enabling service
802 * requests and interrupts, without changing any mode bits */
803 write_SSCR1(cr1
, reg
);
806 static int pxa2xx_spi_transfer_one_message(struct spi_master
*master
,
807 struct spi_message
*msg
)
809 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
811 drv_data
->cur_msg
= msg
;
812 /* Initial message state*/
813 drv_data
->cur_msg
->state
= START_STATE
;
814 drv_data
->cur_transfer
= list_entry(drv_data
->cur_msg
->transfers
.next
,
818 /* prepare to setup the SSP, in pump_transfers, using the per
819 * chip configuration */
820 drv_data
->cur_chip
= spi_get_ctldata(drv_data
->cur_msg
->spi
);
822 /* Mark as busy and launch transfers */
823 tasklet_schedule(&drv_data
->pump_transfers
);
827 static int pxa2xx_spi_unprepare_transfer(struct spi_master
*master
)
829 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
831 /* Disable the SSP now */
832 write_SSCR0(read_SSCR0(drv_data
->ioaddr
) & ~SSCR0_SSE
,
838 static int setup_cs(struct spi_device
*spi
, struct chip_data
*chip
,
839 struct pxa2xx_spi_chip
*chip_info
)
843 if (chip
== NULL
|| chip_info
== NULL
)
846 /* NOTE: setup() can be called multiple times, possibly with
847 * different chip_info, release previously requested GPIO
849 if (gpio_is_valid(chip
->gpio_cs
))
850 gpio_free(chip
->gpio_cs
);
852 /* If (*cs_control) is provided, ignore GPIO chip select */
853 if (chip_info
->cs_control
) {
854 chip
->cs_control
= chip_info
->cs_control
;
858 if (gpio_is_valid(chip_info
->gpio_cs
)) {
859 err
= gpio_request(chip_info
->gpio_cs
, "SPI_CS");
861 dev_err(&spi
->dev
, "failed to request chip select "
862 "GPIO%d\n", chip_info
->gpio_cs
);
866 chip
->gpio_cs
= chip_info
->gpio_cs
;
867 chip
->gpio_cs_inverted
= spi
->mode
& SPI_CS_HIGH
;
869 err
= gpio_direction_output(chip
->gpio_cs
,
870 !chip
->gpio_cs_inverted
);
876 static int setup(struct spi_device
*spi
)
878 struct pxa2xx_spi_chip
*chip_info
= NULL
;
879 struct chip_data
*chip
;
880 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
881 unsigned int clk_div
;
882 uint tx_thres
, tx_hi_thres
, rx_thres
;
884 if (is_lpss_ssp(drv_data
)) {
885 tx_thres
= LPSS_TX_LOTHRESH_DFLT
;
886 tx_hi_thres
= LPSS_TX_HITHRESH_DFLT
;
887 rx_thres
= LPSS_RX_THRESH_DFLT
;
889 tx_thres
= TX_THRESH_DFLT
;
891 rx_thres
= RX_THRESH_DFLT
;
894 /* Only alloc on first setup */
895 chip
= spi_get_ctldata(spi
);
897 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
900 "failed setup: can't allocate chip data\n");
904 if (drv_data
->ssp_type
== CE4100_SSP
) {
905 if (spi
->chip_select
> 4) {
906 dev_err(&spi
->dev
, "failed setup: "
907 "cs number must not be > 4.\n");
912 chip
->frm
= spi
->chip_select
;
915 chip
->enable_dma
= 0;
916 chip
->timeout
= TIMOUT_DFLT
;
919 /* protocol drivers may change the chip settings, so...
920 * if chip_info exists, use it */
921 chip_info
= spi
->controller_data
;
923 /* chip_info isn't always needed */
926 if (chip_info
->timeout
)
927 chip
->timeout
= chip_info
->timeout
;
928 if (chip_info
->tx_threshold
)
929 tx_thres
= chip_info
->tx_threshold
;
930 if (chip_info
->tx_hi_threshold
)
931 tx_hi_thres
= chip_info
->tx_hi_threshold
;
932 if (chip_info
->rx_threshold
)
933 rx_thres
= chip_info
->rx_threshold
;
934 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
935 chip
->dma_threshold
= 0;
936 if (chip_info
->enable_loopback
)
937 chip
->cr1
= SSCR1_LBM
;
938 } else if (ACPI_HANDLE(&spi
->dev
)) {
940 * Slave devices enumerated from ACPI namespace don't
941 * usually have chip_info but we still might want to use
944 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
947 chip
->threshold
= (SSCR1_RxTresh(rx_thres
) & SSCR1_RFT
) |
948 (SSCR1_TxTresh(tx_thres
) & SSCR1_TFT
);
950 chip
->lpss_rx_threshold
= SSIRF_RxThresh(rx_thres
);
951 chip
->lpss_tx_threshold
= SSITF_TxLoThresh(tx_thres
)
952 | SSITF_TxHiThresh(tx_hi_thres
);
954 /* set dma burst and threshold outside of chip_info path so that if
955 * chip_info goes away after setting chip->enable_dma, the
956 * burst and threshold can still respond to changes in bits_per_word */
957 if (chip
->enable_dma
) {
958 /* set up legal burst and threshold for dma */
959 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
, spi
,
961 &chip
->dma_burst_size
,
962 &chip
->dma_threshold
)) {
963 dev_warn(&spi
->dev
, "in setup: DMA burst size reduced "
964 "to match bits_per_word\n");
968 clk_div
= ssp_get_clk_div(drv_data
, spi
->max_speed_hz
);
969 chip
->speed_hz
= spi
->max_speed_hz
;
973 | SSCR0_DataSize(spi
->bits_per_word
> 16 ?
974 spi
->bits_per_word
- 16 : spi
->bits_per_word
)
976 | (spi
->bits_per_word
> 16 ? SSCR0_EDSS
: 0);
977 chip
->cr1
&= ~(SSCR1_SPO
| SSCR1_SPH
);
978 chip
->cr1
|= (((spi
->mode
& SPI_CPHA
) != 0) ? SSCR1_SPH
: 0)
979 | (((spi
->mode
& SPI_CPOL
) != 0) ? SSCR1_SPO
: 0);
981 if (spi
->mode
& SPI_LOOP
)
982 chip
->cr1
|= SSCR1_LBM
;
984 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
985 if (!pxa25x_ssp_comp(drv_data
))
986 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
987 drv_data
->max_clk_rate
988 / (1 + ((chip
->cr0
& SSCR0_SCR(0xfff)) >> 8)),
989 chip
->enable_dma
? "DMA" : "PIO");
991 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
992 drv_data
->max_clk_rate
/ 2
993 / (1 + ((chip
->cr0
& SSCR0_SCR(0x0ff)) >> 8)),
994 chip
->enable_dma
? "DMA" : "PIO");
996 if (spi
->bits_per_word
<= 8) {
998 chip
->read
= u8_reader
;
999 chip
->write
= u8_writer
;
1000 } else if (spi
->bits_per_word
<= 16) {
1002 chip
->read
= u16_reader
;
1003 chip
->write
= u16_writer
;
1004 } else if (spi
->bits_per_word
<= 32) {
1005 chip
->cr0
|= SSCR0_EDSS
;
1007 chip
->read
= u32_reader
;
1008 chip
->write
= u32_writer
;
1010 chip
->bits_per_word
= spi
->bits_per_word
;
1012 spi_set_ctldata(spi
, chip
);
1014 if (drv_data
->ssp_type
== CE4100_SSP
)
1017 return setup_cs(spi
, chip
, chip_info
);
1020 static void cleanup(struct spi_device
*spi
)
1022 struct chip_data
*chip
= spi_get_ctldata(spi
);
1023 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
1028 if (drv_data
->ssp_type
!= CE4100_SSP
&& gpio_is_valid(chip
->gpio_cs
))
1029 gpio_free(chip
->gpio_cs
);
1035 static struct pxa2xx_spi_master
*
1036 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1038 struct pxa2xx_spi_master
*pdata
;
1039 struct acpi_device
*adev
;
1040 struct ssp_device
*ssp
;
1041 struct resource
*res
;
1044 if (!ACPI_HANDLE(&pdev
->dev
) ||
1045 acpi_bus_get_device(ACPI_HANDLE(&pdev
->dev
), &adev
))
1048 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1051 "failed to allocate memory for platform data\n");
1055 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1061 ssp
->phys_base
= res
->start
;
1062 ssp
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1063 if (IS_ERR(ssp
->mmio_base
))
1066 ssp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1067 ssp
->irq
= platform_get_irq(pdev
, 0);
1068 ssp
->type
= LPSS_SSP
;
1072 if (adev
->pnp
.unique_id
&& !kstrtoint(adev
->pnp
.unique_id
, 0, &devid
))
1073 ssp
->port_id
= devid
;
1075 pdata
->num_chipselect
= 1;
1076 pdata
->enable_dma
= true;
1077 pdata
->tx_chan_id
= -1;
1078 pdata
->rx_chan_id
= -1;
1083 static struct acpi_device_id pxa2xx_spi_acpi_match
[] = {
1092 MODULE_DEVICE_TABLE(acpi
, pxa2xx_spi_acpi_match
);
1094 static inline struct pxa2xx_spi_master
*
1095 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1101 static int pxa2xx_spi_probe(struct platform_device
*pdev
)
1103 struct device
*dev
= &pdev
->dev
;
1104 struct pxa2xx_spi_master
*platform_info
;
1105 struct spi_master
*master
;
1106 struct driver_data
*drv_data
;
1107 struct ssp_device
*ssp
;
1110 platform_info
= dev_get_platdata(dev
);
1111 if (!platform_info
) {
1112 platform_info
= pxa2xx_spi_acpi_get_pdata(pdev
);
1113 if (!platform_info
) {
1114 dev_err(&pdev
->dev
, "missing platform data\n");
1119 ssp
= pxa_ssp_request(pdev
->id
, pdev
->name
);
1121 ssp
= &platform_info
->ssp
;
1123 if (!ssp
->mmio_base
) {
1124 dev_err(&pdev
->dev
, "failed to get ssp\n");
1128 /* Allocate master with space for drv_data and null dma buffer */
1129 master
= spi_alloc_master(dev
, sizeof(struct driver_data
) + 16);
1131 dev_err(&pdev
->dev
, "cannot alloc spi_master\n");
1135 drv_data
= spi_master_get_devdata(master
);
1136 drv_data
->master
= master
;
1137 drv_data
->master_info
= platform_info
;
1138 drv_data
->pdev
= pdev
;
1139 drv_data
->ssp
= ssp
;
1141 master
->dev
.parent
= &pdev
->dev
;
1142 master
->dev
.of_node
= pdev
->dev
.of_node
;
1143 /* the spi->mode bits understood by this driver: */
1144 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
1146 master
->bus_num
= ssp
->port_id
;
1147 master
->num_chipselect
= platform_info
->num_chipselect
;
1148 master
->dma_alignment
= DMA_ALIGNMENT
;
1149 master
->cleanup
= cleanup
;
1150 master
->setup
= setup
;
1151 master
->transfer_one_message
= pxa2xx_spi_transfer_one_message
;
1152 master
->unprepare_transfer_hardware
= pxa2xx_spi_unprepare_transfer
;
1153 master
->auto_runtime_pm
= true;
1155 drv_data
->ssp_type
= ssp
->type
;
1156 drv_data
->null_dma_buf
= (u32
*)PTR_ALIGN(&drv_data
[1], DMA_ALIGNMENT
);
1158 drv_data
->ioaddr
= ssp
->mmio_base
;
1159 drv_data
->ssdr_physical
= ssp
->phys_base
+ SSDR
;
1160 if (pxa25x_ssp_comp(drv_data
)) {
1161 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
1162 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
;
1163 drv_data
->dma_cr1
= 0;
1164 drv_data
->clear_sr
= SSSR_ROR
;
1165 drv_data
->mask_sr
= SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1167 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
1168 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
| SSCR1_TINTE
;
1169 drv_data
->dma_cr1
= DEFAULT_DMA_CR1
;
1170 drv_data
->clear_sr
= SSSR_ROR
| SSSR_TINT
;
1171 drv_data
->mask_sr
= SSSR_TINT
| SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1174 status
= request_irq(ssp
->irq
, ssp_int
, IRQF_SHARED
, dev_name(dev
),
1177 dev_err(&pdev
->dev
, "cannot get IRQ %d\n", ssp
->irq
);
1178 goto out_error_master_alloc
;
1181 /* Setup DMA if requested */
1182 drv_data
->tx_channel
= -1;
1183 drv_data
->rx_channel
= -1;
1184 if (platform_info
->enable_dma
) {
1185 status
= pxa2xx_spi_dma_setup(drv_data
);
1187 dev_dbg(dev
, "no DMA channels available, using PIO\n");
1188 platform_info
->enable_dma
= false;
1192 /* Enable SOC clock */
1193 clk_prepare_enable(ssp
->clk
);
1195 drv_data
->max_clk_rate
= clk_get_rate(ssp
->clk
);
1197 /* Load default SSP configuration */
1198 write_SSCR0(0, drv_data
->ioaddr
);
1199 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT
) |
1200 SSCR1_TxTresh(TX_THRESH_DFLT
),
1202 write_SSCR0(SSCR0_SCR(2)
1204 | SSCR0_DataSize(8),
1206 if (!pxa25x_ssp_comp(drv_data
))
1207 write_SSTO(0, drv_data
->ioaddr
);
1208 write_SSPSP(0, drv_data
->ioaddr
);
1210 lpss_ssp_setup(drv_data
);
1212 tasklet_init(&drv_data
->pump_transfers
, pump_transfers
,
1213 (unsigned long)drv_data
);
1215 /* Register with the SPI framework */
1216 platform_set_drvdata(pdev
, drv_data
);
1217 status
= spi_register_master(master
);
1219 dev_err(&pdev
->dev
, "problem registering spi master\n");
1220 goto out_error_clock_enabled
;
1223 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1224 pm_runtime_use_autosuspend(&pdev
->dev
);
1225 pm_runtime_set_active(&pdev
->dev
);
1226 pm_runtime_enable(&pdev
->dev
);
1230 out_error_clock_enabled
:
1231 clk_disable_unprepare(ssp
->clk
);
1232 pxa2xx_spi_dma_release(drv_data
);
1233 free_irq(ssp
->irq
, drv_data
);
1235 out_error_master_alloc
:
1236 spi_master_put(master
);
1241 static int pxa2xx_spi_remove(struct platform_device
*pdev
)
1243 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1244 struct ssp_device
*ssp
;
1248 ssp
= drv_data
->ssp
;
1250 pm_runtime_get_sync(&pdev
->dev
);
1252 /* Disable the SSP at the peripheral and SOC level */
1253 write_SSCR0(0, drv_data
->ioaddr
);
1254 clk_disable_unprepare(ssp
->clk
);
1257 if (drv_data
->master_info
->enable_dma
)
1258 pxa2xx_spi_dma_release(drv_data
);
1260 pm_runtime_put_noidle(&pdev
->dev
);
1261 pm_runtime_disable(&pdev
->dev
);
1264 free_irq(ssp
->irq
, drv_data
);
1269 /* Disconnect from the SPI framework */
1270 spi_unregister_master(drv_data
->master
);
1275 static void pxa2xx_spi_shutdown(struct platform_device
*pdev
)
1279 if ((status
= pxa2xx_spi_remove(pdev
)) != 0)
1280 dev_err(&pdev
->dev
, "shutdown failed with %d\n", status
);
1284 static int pxa2xx_spi_suspend(struct device
*dev
)
1286 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1287 struct ssp_device
*ssp
= drv_data
->ssp
;
1290 status
= spi_master_suspend(drv_data
->master
);
1293 write_SSCR0(0, drv_data
->ioaddr
);
1295 if (!pm_runtime_suspended(dev
))
1296 clk_disable_unprepare(ssp
->clk
);
1301 static int pxa2xx_spi_resume(struct device
*dev
)
1303 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1304 struct ssp_device
*ssp
= drv_data
->ssp
;
1307 pxa2xx_spi_dma_resume(drv_data
);
1309 /* Enable the SSP clock */
1310 if (!pm_runtime_suspended(dev
))
1311 clk_prepare_enable(ssp
->clk
);
1313 /* Start the queue running */
1314 status
= spi_master_resume(drv_data
->master
);
1316 dev_err(dev
, "problem starting queue (%d)\n", status
);
1324 #ifdef CONFIG_PM_RUNTIME
1325 static int pxa2xx_spi_runtime_suspend(struct device
*dev
)
1327 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1329 clk_disable_unprepare(drv_data
->ssp
->clk
);
1333 static int pxa2xx_spi_runtime_resume(struct device
*dev
)
1335 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1337 clk_prepare_enable(drv_data
->ssp
->clk
);
1342 static const struct dev_pm_ops pxa2xx_spi_pm_ops
= {
1343 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend
, pxa2xx_spi_resume
)
1344 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend
,
1345 pxa2xx_spi_runtime_resume
, NULL
)
1348 static struct platform_driver driver
= {
1350 .name
= "pxa2xx-spi",
1351 .owner
= THIS_MODULE
,
1352 .pm
= &pxa2xx_spi_pm_ops
,
1353 .acpi_match_table
= ACPI_PTR(pxa2xx_spi_acpi_match
),
1355 .probe
= pxa2xx_spi_probe
,
1356 .remove
= pxa2xx_spi_remove
,
1357 .shutdown
= pxa2xx_spi_shutdown
,
1360 static int __init
pxa2xx_spi_init(void)
1362 return platform_driver_register(&driver
);
1364 subsys_initcall(pxa2xx_spi_init
);
1366 static void __exit
pxa2xx_spi_exit(void)
1368 platform_driver_unregister(&driver
);
1370 module_exit(pxa2xx_spi_exit
);