2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/ioport.h>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/spi/pxa2xx_spi.h>
29 #include <linux/spi/spi.h>
30 #include <linux/delay.h>
31 #include <linux/gpio.h>
32 #include <linux/slab.h>
33 #include <linux/clk.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/acpi.h>
39 #include <asm/delay.h>
41 #include "spi-pxa2xx.h"
43 MODULE_AUTHOR("Stephen Street");
44 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
45 MODULE_LICENSE("GPL");
46 MODULE_ALIAS("platform:pxa2xx-spi");
50 #define TIMOUT_DFLT 1000
53 * for testing SSCR1 changes that require SSP restart, basically
54 * everything except the service and interrupt enables, the pxa270 developer
55 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
56 * list, but the PXA255 dev man says all bits without really meaning the
57 * service and interrupt enables
59 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
60 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
61 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
62 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
63 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
64 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
66 #define LPSS_RX_THRESH_DFLT 64
67 #define LPSS_TX_LOTHRESH_DFLT 160
68 #define LPSS_TX_HITHRESH_DFLT 224
70 /* Offset from drv_data->lpss_base */
71 #define GENERAL_REG 0x08
72 #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
74 #define SPI_CS_CONTROL 0x18
75 #define SPI_CS_CONTROL_SW_MODE BIT(0)
76 #define SPI_CS_CONTROL_CS_HIGH BIT(1)
78 static bool is_lpss_ssp(const struct driver_data
*drv_data
)
80 return drv_data
->ssp_type
== LPSS_SSP
;
84 * Read and write LPSS SSP private registers. Caller must first check that
85 * is_lpss_ssp() returns true before these can be called.
87 static u32
__lpss_ssp_read_priv(struct driver_data
*drv_data
, unsigned offset
)
89 WARN_ON(!drv_data
->lpss_base
);
90 return readl(drv_data
->lpss_base
+ offset
);
93 static void __lpss_ssp_write_priv(struct driver_data
*drv_data
,
94 unsigned offset
, u32 value
)
96 WARN_ON(!drv_data
->lpss_base
);
97 writel(value
, drv_data
->lpss_base
+ offset
);
101 * lpss_ssp_setup - perform LPSS SSP specific setup
102 * @drv_data: pointer to the driver private data
104 * Perform LPSS SSP specific setup. This function must be called first if
105 * one is going to use LPSS SSP private registers.
107 static void lpss_ssp_setup(struct driver_data
*drv_data
)
109 unsigned offset
= 0x400;
112 if (!is_lpss_ssp(drv_data
))
116 * Perform auto-detection of the LPSS SSP private registers. They
117 * can be either at 1k or 2k offset from the base address.
119 orig
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
121 /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
122 value
= orig
| SPI_CS_CONTROL_SW_MODE
;
123 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
124 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
125 if (value
!= (orig
| SPI_CS_CONTROL_SW_MODE
)) {
130 orig
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
132 /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
133 value
= orig
& ~SPI_CS_CONTROL_SW_MODE
;
134 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
135 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
136 if (value
!= (orig
& ~SPI_CS_CONTROL_SW_MODE
)) {
142 /* Now set the LPSS base */
143 drv_data
->lpss_base
= drv_data
->ioaddr
+ offset
;
145 /* Enable software chip select control */
146 value
= SPI_CS_CONTROL_SW_MODE
| SPI_CS_CONTROL_CS_HIGH
;
147 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
149 /* Enable multiblock DMA transfers */
150 if (drv_data
->master_info
->enable_dma
) {
151 __lpss_ssp_write_priv(drv_data
, SSP_REG
, 1);
153 value
= __lpss_ssp_read_priv(drv_data
, GENERAL_REG
);
154 value
|= GENERAL_REG_RXTO_HOLDOFF_DISABLE
;
155 __lpss_ssp_write_priv(drv_data
, GENERAL_REG
, value
);
159 static void lpss_ssp_cs_control(struct driver_data
*drv_data
, bool enable
)
163 if (!is_lpss_ssp(drv_data
))
166 value
= __lpss_ssp_read_priv(drv_data
, SPI_CS_CONTROL
);
168 value
&= ~SPI_CS_CONTROL_CS_HIGH
;
170 value
|= SPI_CS_CONTROL_CS_HIGH
;
171 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
174 static void cs_assert(struct driver_data
*drv_data
)
176 struct chip_data
*chip
= drv_data
->cur_chip
;
178 if (drv_data
->ssp_type
== CE4100_SSP
) {
179 write_SSSR(drv_data
->cur_chip
->frm
, drv_data
->ioaddr
);
183 if (chip
->cs_control
) {
184 chip
->cs_control(PXA2XX_CS_ASSERT
);
188 if (gpio_is_valid(chip
->gpio_cs
)) {
189 gpio_set_value(chip
->gpio_cs
, chip
->gpio_cs_inverted
);
193 lpss_ssp_cs_control(drv_data
, true);
196 static void cs_deassert(struct driver_data
*drv_data
)
198 struct chip_data
*chip
= drv_data
->cur_chip
;
200 if (drv_data
->ssp_type
== CE4100_SSP
)
203 if (chip
->cs_control
) {
204 chip
->cs_control(PXA2XX_CS_DEASSERT
);
208 if (gpio_is_valid(chip
->gpio_cs
)) {
209 gpio_set_value(chip
->gpio_cs
, !chip
->gpio_cs_inverted
);
213 lpss_ssp_cs_control(drv_data
, false);
216 int pxa2xx_spi_flush(struct driver_data
*drv_data
)
218 unsigned long limit
= loops_per_jiffy
<< 1;
220 void __iomem
*reg
= drv_data
->ioaddr
;
223 while (read_SSSR(reg
) & SSSR_RNE
) {
226 } while ((read_SSSR(reg
) & SSSR_BSY
) && --limit
);
227 write_SSSR_CS(drv_data
, SSSR_ROR
);
232 static int null_writer(struct driver_data
*drv_data
)
234 void __iomem
*reg
= drv_data
->ioaddr
;
235 u8 n_bytes
= drv_data
->n_bytes
;
237 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
238 || (drv_data
->tx
== drv_data
->tx_end
))
242 drv_data
->tx
+= n_bytes
;
247 static int null_reader(struct driver_data
*drv_data
)
249 void __iomem
*reg
= drv_data
->ioaddr
;
250 u8 n_bytes
= drv_data
->n_bytes
;
252 while ((read_SSSR(reg
) & SSSR_RNE
)
253 && (drv_data
->rx
< drv_data
->rx_end
)) {
255 drv_data
->rx
+= n_bytes
;
258 return drv_data
->rx
== drv_data
->rx_end
;
261 static int u8_writer(struct driver_data
*drv_data
)
263 void __iomem
*reg
= drv_data
->ioaddr
;
265 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
266 || (drv_data
->tx
== drv_data
->tx_end
))
269 write_SSDR(*(u8
*)(drv_data
->tx
), reg
);
275 static int u8_reader(struct driver_data
*drv_data
)
277 void __iomem
*reg
= drv_data
->ioaddr
;
279 while ((read_SSSR(reg
) & SSSR_RNE
)
280 && (drv_data
->rx
< drv_data
->rx_end
)) {
281 *(u8
*)(drv_data
->rx
) = read_SSDR(reg
);
285 return drv_data
->rx
== drv_data
->rx_end
;
288 static int u16_writer(struct driver_data
*drv_data
)
290 void __iomem
*reg
= drv_data
->ioaddr
;
292 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
293 || (drv_data
->tx
== drv_data
->tx_end
))
296 write_SSDR(*(u16
*)(drv_data
->tx
), reg
);
302 static int u16_reader(struct driver_data
*drv_data
)
304 void __iomem
*reg
= drv_data
->ioaddr
;
306 while ((read_SSSR(reg
) & SSSR_RNE
)
307 && (drv_data
->rx
< drv_data
->rx_end
)) {
308 *(u16
*)(drv_data
->rx
) = read_SSDR(reg
);
312 return drv_data
->rx
== drv_data
->rx_end
;
315 static int u32_writer(struct driver_data
*drv_data
)
317 void __iomem
*reg
= drv_data
->ioaddr
;
319 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
320 || (drv_data
->tx
== drv_data
->tx_end
))
323 write_SSDR(*(u32
*)(drv_data
->tx
), reg
);
329 static int u32_reader(struct driver_data
*drv_data
)
331 void __iomem
*reg
= drv_data
->ioaddr
;
333 while ((read_SSSR(reg
) & SSSR_RNE
)
334 && (drv_data
->rx
< drv_data
->rx_end
)) {
335 *(u32
*)(drv_data
->rx
) = read_SSDR(reg
);
339 return drv_data
->rx
== drv_data
->rx_end
;
342 void *pxa2xx_spi_next_transfer(struct driver_data
*drv_data
)
344 struct spi_message
*msg
= drv_data
->cur_msg
;
345 struct spi_transfer
*trans
= drv_data
->cur_transfer
;
347 /* Move to next transfer */
348 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
349 drv_data
->cur_transfer
=
350 list_entry(trans
->transfer_list
.next
,
353 return RUNNING_STATE
;
358 /* caller already set message->status; dma and pio irqs are blocked */
359 static void giveback(struct driver_data
*drv_data
)
361 struct spi_transfer
* last_transfer
;
362 struct spi_message
*msg
;
364 msg
= drv_data
->cur_msg
;
365 drv_data
->cur_msg
= NULL
;
366 drv_data
->cur_transfer
= NULL
;
368 last_transfer
= list_last_entry(&msg
->transfers
, struct spi_transfer
,
371 /* Delay if requested before any change in chip select */
372 if (last_transfer
->delay_usecs
)
373 udelay(last_transfer
->delay_usecs
);
375 /* Drop chip select UNLESS cs_change is true or we are returning
376 * a message with an error, or next message is for another chip
378 if (!last_transfer
->cs_change
)
379 cs_deassert(drv_data
);
381 struct spi_message
*next_msg
;
383 /* Holding of cs was hinted, but we need to make sure
384 * the next message is for the same chip. Don't waste
385 * time with the following tests unless this was hinted.
387 * We cannot postpone this until pump_messages, because
388 * after calling msg->complete (below) the driver that
389 * sent the current message could be unloaded, which
390 * could invalidate the cs_control() callback...
393 /* get a pointer to the next message, if any */
394 next_msg
= spi_get_next_queued_message(drv_data
->master
);
396 /* see if the next and current messages point
399 if (next_msg
&& next_msg
->spi
!= msg
->spi
)
401 if (!next_msg
|| msg
->state
== ERROR_STATE
)
402 cs_deassert(drv_data
);
405 drv_data
->cur_chip
= NULL
;
406 spi_finalize_current_message(drv_data
->master
);
409 static void reset_sccr1(struct driver_data
*drv_data
)
411 void __iomem
*reg
= drv_data
->ioaddr
;
412 struct chip_data
*chip
= drv_data
->cur_chip
;
415 sccr1_reg
= read_SSCR1(reg
) & ~drv_data
->int_cr1
;
416 sccr1_reg
&= ~SSCR1_RFT
;
417 sccr1_reg
|= chip
->threshold
;
418 write_SSCR1(sccr1_reg
, reg
);
421 static void int_error_stop(struct driver_data
*drv_data
, const char* msg
)
423 void __iomem
*reg
= drv_data
->ioaddr
;
425 /* Stop and reset SSP */
426 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
427 reset_sccr1(drv_data
);
428 if (!pxa25x_ssp_comp(drv_data
))
430 pxa2xx_spi_flush(drv_data
);
431 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
433 dev_err(&drv_data
->pdev
->dev
, "%s\n", msg
);
435 drv_data
->cur_msg
->state
= ERROR_STATE
;
436 tasklet_schedule(&drv_data
->pump_transfers
);
439 static void int_transfer_complete(struct driver_data
*drv_data
)
441 void __iomem
*reg
= drv_data
->ioaddr
;
444 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
445 reset_sccr1(drv_data
);
446 if (!pxa25x_ssp_comp(drv_data
))
449 /* Update total byte transferred return count actual bytes read */
450 drv_data
->cur_msg
->actual_length
+= drv_data
->len
-
451 (drv_data
->rx_end
- drv_data
->rx
);
453 /* Transfer delays and chip select release are
454 * handled in pump_transfers or giveback
457 /* Move to next transfer */
458 drv_data
->cur_msg
->state
= pxa2xx_spi_next_transfer(drv_data
);
460 /* Schedule transfer tasklet */
461 tasklet_schedule(&drv_data
->pump_transfers
);
464 static irqreturn_t
interrupt_transfer(struct driver_data
*drv_data
)
466 void __iomem
*reg
= drv_data
->ioaddr
;
468 u32 irq_mask
= (read_SSCR1(reg
) & SSCR1_TIE
) ?
469 drv_data
->mask_sr
: drv_data
->mask_sr
& ~SSSR_TFS
;
471 u32 irq_status
= read_SSSR(reg
) & irq_mask
;
473 if (irq_status
& SSSR_ROR
) {
474 int_error_stop(drv_data
, "interrupt_transfer: fifo overrun");
478 if (irq_status
& SSSR_TINT
) {
479 write_SSSR(SSSR_TINT
, reg
);
480 if (drv_data
->read(drv_data
)) {
481 int_transfer_complete(drv_data
);
486 /* Drain rx fifo, Fill tx fifo and prevent overruns */
488 if (drv_data
->read(drv_data
)) {
489 int_transfer_complete(drv_data
);
492 } while (drv_data
->write(drv_data
));
494 if (drv_data
->read(drv_data
)) {
495 int_transfer_complete(drv_data
);
499 if (drv_data
->tx
== drv_data
->tx_end
) {
503 sccr1_reg
= read_SSCR1(reg
);
504 sccr1_reg
&= ~SSCR1_TIE
;
507 * PXA25x_SSP has no timeout, set up rx threshould for the
508 * remaining RX bytes.
510 if (pxa25x_ssp_comp(drv_data
)) {
512 sccr1_reg
&= ~SSCR1_RFT
;
514 bytes_left
= drv_data
->rx_end
- drv_data
->rx
;
515 switch (drv_data
->n_bytes
) {
522 if (bytes_left
> RX_THRESH_DFLT
)
523 bytes_left
= RX_THRESH_DFLT
;
525 sccr1_reg
|= SSCR1_RxTresh(bytes_left
);
527 write_SSCR1(sccr1_reg
, reg
);
530 /* We did something */
534 static irqreturn_t
ssp_int(int irq
, void *dev_id
)
536 struct driver_data
*drv_data
= dev_id
;
537 void __iomem
*reg
= drv_data
->ioaddr
;
539 u32 mask
= drv_data
->mask_sr
;
543 * The IRQ might be shared with other peripherals so we must first
544 * check that are we RPM suspended or not. If we are we assume that
545 * the IRQ was not for us (we shouldn't be RPM suspended when the
546 * interrupt is enabled).
548 if (pm_runtime_suspended(&drv_data
->pdev
->dev
))
552 * If the device is not yet in RPM suspended state and we get an
553 * interrupt that is meant for another device, check if status bits
554 * are all set to one. That means that the device is already
557 status
= read_SSSR(reg
);
561 sccr1_reg
= read_SSCR1(reg
);
563 /* Ignore possible writes if we don't need to write */
564 if (!(sccr1_reg
& SSCR1_TIE
))
567 /* Ignore RX timeout interrupt if it is disabled */
568 if (!(sccr1_reg
& SSCR1_TINTE
))
571 if (!(status
& mask
))
574 if (!drv_data
->cur_msg
) {
576 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
577 write_SSCR1(read_SSCR1(reg
) & ~drv_data
->int_cr1
, reg
);
578 if (!pxa25x_ssp_comp(drv_data
))
580 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
582 dev_err(&drv_data
->pdev
->dev
,
583 "bad message state in interrupt handler\n");
589 return drv_data
->transfer_handler(drv_data
);
592 static unsigned int ssp_get_clk_div(struct driver_data
*drv_data
, int rate
)
594 unsigned long ssp_clk
= drv_data
->max_clk_rate
;
595 const struct ssp_device
*ssp
= drv_data
->ssp
;
597 rate
= min_t(int, ssp_clk
, rate
);
599 if (ssp
->type
== PXA25x_SSP
|| ssp
->type
== CE4100_SSP
)
600 return ((ssp_clk
/ (2 * rate
) - 1) & 0xff) << 8;
602 return ((ssp_clk
/ rate
- 1) & 0xfff) << 8;
605 static void pump_transfers(unsigned long data
)
607 struct driver_data
*drv_data
= (struct driver_data
*)data
;
608 struct spi_message
*message
= NULL
;
609 struct spi_transfer
*transfer
= NULL
;
610 struct spi_transfer
*previous
= NULL
;
611 struct chip_data
*chip
= NULL
;
612 void __iomem
*reg
= drv_data
->ioaddr
;
618 u32 dma_thresh
= drv_data
->cur_chip
->dma_threshold
;
619 u32 dma_burst
= drv_data
->cur_chip
->dma_burst_size
;
621 /* Get current state information */
622 message
= drv_data
->cur_msg
;
623 transfer
= drv_data
->cur_transfer
;
624 chip
= drv_data
->cur_chip
;
626 /* Handle for abort */
627 if (message
->state
== ERROR_STATE
) {
628 message
->status
= -EIO
;
633 /* Handle end of message */
634 if (message
->state
== DONE_STATE
) {
640 /* Delay if requested at end of transfer before CS change */
641 if (message
->state
== RUNNING_STATE
) {
642 previous
= list_entry(transfer
->transfer_list
.prev
,
645 if (previous
->delay_usecs
)
646 udelay(previous
->delay_usecs
);
648 /* Drop chip select only if cs_change is requested */
649 if (previous
->cs_change
)
650 cs_deassert(drv_data
);
653 /* Check if we can DMA this transfer */
654 if (!pxa2xx_spi_dma_is_possible(transfer
->len
) && chip
->enable_dma
) {
656 /* reject already-mapped transfers; PIO won't always work */
657 if (message
->is_dma_mapped
658 || transfer
->rx_dma
|| transfer
->tx_dma
) {
659 dev_err(&drv_data
->pdev
->dev
,
660 "pump_transfers: mapped transfer length of "
661 "%u is greater than %d\n",
662 transfer
->len
, MAX_DMA_LEN
);
663 message
->status
= -EINVAL
;
668 /* warn ... we force this to PIO mode */
669 dev_warn_ratelimited(&message
->spi
->dev
,
670 "pump_transfers: DMA disabled for transfer length %ld "
672 (long)drv_data
->len
, MAX_DMA_LEN
);
675 /* Setup the transfer state based on the type of transfer */
676 if (pxa2xx_spi_flush(drv_data
) == 0) {
677 dev_err(&drv_data
->pdev
->dev
, "pump_transfers: flush failed\n");
678 message
->status
= -EIO
;
682 drv_data
->n_bytes
= chip
->n_bytes
;
683 drv_data
->tx
= (void *)transfer
->tx_buf
;
684 drv_data
->tx_end
= drv_data
->tx
+ transfer
->len
;
685 drv_data
->rx
= transfer
->rx_buf
;
686 drv_data
->rx_end
= drv_data
->rx
+ transfer
->len
;
687 drv_data
->rx_dma
= transfer
->rx_dma
;
688 drv_data
->tx_dma
= transfer
->tx_dma
;
689 drv_data
->len
= transfer
->len
;
690 drv_data
->write
= drv_data
->tx
? chip
->write
: null_writer
;
691 drv_data
->read
= drv_data
->rx
? chip
->read
: null_reader
;
693 /* Change speed and bit per word on a per transfer */
695 if (transfer
->speed_hz
|| transfer
->bits_per_word
) {
697 bits
= chip
->bits_per_word
;
698 speed
= chip
->speed_hz
;
700 if (transfer
->speed_hz
)
701 speed
= transfer
->speed_hz
;
703 if (transfer
->bits_per_word
)
704 bits
= transfer
->bits_per_word
;
706 clk_div
= ssp_get_clk_div(drv_data
, speed
);
709 drv_data
->n_bytes
= 1;
710 drv_data
->read
= drv_data
->read
!= null_reader
?
711 u8_reader
: null_reader
;
712 drv_data
->write
= drv_data
->write
!= null_writer
?
713 u8_writer
: null_writer
;
714 } else if (bits
<= 16) {
715 drv_data
->n_bytes
= 2;
716 drv_data
->read
= drv_data
->read
!= null_reader
?
717 u16_reader
: null_reader
;
718 drv_data
->write
= drv_data
->write
!= null_writer
?
719 u16_writer
: null_writer
;
720 } else if (bits
<= 32) {
721 drv_data
->n_bytes
= 4;
722 drv_data
->read
= drv_data
->read
!= null_reader
?
723 u32_reader
: null_reader
;
724 drv_data
->write
= drv_data
->write
!= null_writer
?
725 u32_writer
: null_writer
;
727 /* if bits/word is changed in dma mode, then must check the
728 * thresholds and burst also */
729 if (chip
->enable_dma
) {
730 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
,
734 dev_warn_ratelimited(&message
->spi
->dev
,
735 "pump_transfers: DMA burst size reduced to match bits_per_word\n");
740 | SSCR0_DataSize(bits
> 16 ? bits
- 16 : bits
)
742 | (bits
> 16 ? SSCR0_EDSS
: 0);
745 message
->state
= RUNNING_STATE
;
747 drv_data
->dma_mapped
= 0;
748 if (pxa2xx_spi_dma_is_possible(drv_data
->len
))
749 drv_data
->dma_mapped
= pxa2xx_spi_map_dma_buffers(drv_data
);
750 if (drv_data
->dma_mapped
) {
752 /* Ensure we have the correct interrupt handler */
753 drv_data
->transfer_handler
= pxa2xx_spi_dma_transfer
;
755 pxa2xx_spi_dma_prepare(drv_data
, dma_burst
);
757 /* Clear status and start DMA engine */
758 cr1
= chip
->cr1
| dma_thresh
| drv_data
->dma_cr1
;
759 write_SSSR(drv_data
->clear_sr
, reg
);
761 pxa2xx_spi_dma_start(drv_data
);
763 /* Ensure we have the correct interrupt handler */
764 drv_data
->transfer_handler
= interrupt_transfer
;
767 cr1
= chip
->cr1
| chip
->threshold
| drv_data
->int_cr1
;
768 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
771 if (is_lpss_ssp(drv_data
)) {
772 if ((read_SSIRF(reg
) & 0xff) != chip
->lpss_rx_threshold
)
773 write_SSIRF(chip
->lpss_rx_threshold
, reg
);
774 if ((read_SSITF(reg
) & 0xffff) != chip
->lpss_tx_threshold
)
775 write_SSITF(chip
->lpss_tx_threshold
, reg
);
778 /* see if we need to reload the config registers */
779 if ((read_SSCR0(reg
) != cr0
)
780 || (read_SSCR1(reg
) & SSCR1_CHANGE_MASK
) !=
781 (cr1
& SSCR1_CHANGE_MASK
)) {
783 /* stop the SSP, and update the other bits */
784 write_SSCR0(cr0
& ~SSCR0_SSE
, reg
);
785 if (!pxa25x_ssp_comp(drv_data
))
786 write_SSTO(chip
->timeout
, reg
);
787 /* first set CR1 without interrupt and service enables */
788 write_SSCR1(cr1
& SSCR1_CHANGE_MASK
, reg
);
789 /* restart the SSP */
790 write_SSCR0(cr0
, reg
);
793 if (!pxa25x_ssp_comp(drv_data
))
794 write_SSTO(chip
->timeout
, reg
);
799 /* after chip select, release the data by enabling service
800 * requests and interrupts, without changing any mode bits */
801 write_SSCR1(cr1
, reg
);
804 static int pxa2xx_spi_transfer_one_message(struct spi_master
*master
,
805 struct spi_message
*msg
)
807 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
809 drv_data
->cur_msg
= msg
;
810 /* Initial message state*/
811 drv_data
->cur_msg
->state
= START_STATE
;
812 drv_data
->cur_transfer
= list_entry(drv_data
->cur_msg
->transfers
.next
,
816 /* prepare to setup the SSP, in pump_transfers, using the per
817 * chip configuration */
818 drv_data
->cur_chip
= spi_get_ctldata(drv_data
->cur_msg
->spi
);
820 /* Mark as busy and launch transfers */
821 tasklet_schedule(&drv_data
->pump_transfers
);
825 static int pxa2xx_spi_unprepare_transfer(struct spi_master
*master
)
827 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
829 /* Disable the SSP now */
830 write_SSCR0(read_SSCR0(drv_data
->ioaddr
) & ~SSCR0_SSE
,
836 static int setup_cs(struct spi_device
*spi
, struct chip_data
*chip
,
837 struct pxa2xx_spi_chip
*chip_info
)
841 if (chip
== NULL
|| chip_info
== NULL
)
844 /* NOTE: setup() can be called multiple times, possibly with
845 * different chip_info, release previously requested GPIO
847 if (gpio_is_valid(chip
->gpio_cs
))
848 gpio_free(chip
->gpio_cs
);
850 /* If (*cs_control) is provided, ignore GPIO chip select */
851 if (chip_info
->cs_control
) {
852 chip
->cs_control
= chip_info
->cs_control
;
856 if (gpio_is_valid(chip_info
->gpio_cs
)) {
857 err
= gpio_request(chip_info
->gpio_cs
, "SPI_CS");
859 dev_err(&spi
->dev
, "failed to request chip select GPIO%d\n",
864 chip
->gpio_cs
= chip_info
->gpio_cs
;
865 chip
->gpio_cs_inverted
= spi
->mode
& SPI_CS_HIGH
;
867 err
= gpio_direction_output(chip
->gpio_cs
,
868 !chip
->gpio_cs_inverted
);
874 static int setup(struct spi_device
*spi
)
876 struct pxa2xx_spi_chip
*chip_info
= NULL
;
877 struct chip_data
*chip
;
878 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
879 unsigned int clk_div
;
880 uint tx_thres
, tx_hi_thres
, rx_thres
;
882 if (is_lpss_ssp(drv_data
)) {
883 tx_thres
= LPSS_TX_LOTHRESH_DFLT
;
884 tx_hi_thres
= LPSS_TX_HITHRESH_DFLT
;
885 rx_thres
= LPSS_RX_THRESH_DFLT
;
887 tx_thres
= TX_THRESH_DFLT
;
889 rx_thres
= RX_THRESH_DFLT
;
892 /* Only alloc on first setup */
893 chip
= spi_get_ctldata(spi
);
895 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
899 if (drv_data
->ssp_type
== CE4100_SSP
) {
900 if (spi
->chip_select
> 4) {
902 "failed setup: cs number must not be > 4.\n");
907 chip
->frm
= spi
->chip_select
;
910 chip
->enable_dma
= 0;
911 chip
->timeout
= TIMOUT_DFLT
;
914 /* protocol drivers may change the chip settings, so...
915 * if chip_info exists, use it */
916 chip_info
= spi
->controller_data
;
918 /* chip_info isn't always needed */
921 if (chip_info
->timeout
)
922 chip
->timeout
= chip_info
->timeout
;
923 if (chip_info
->tx_threshold
)
924 tx_thres
= chip_info
->tx_threshold
;
925 if (chip_info
->tx_hi_threshold
)
926 tx_hi_thres
= chip_info
->tx_hi_threshold
;
927 if (chip_info
->rx_threshold
)
928 rx_thres
= chip_info
->rx_threshold
;
929 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
930 chip
->dma_threshold
= 0;
931 if (chip_info
->enable_loopback
)
932 chip
->cr1
= SSCR1_LBM
;
933 } else if (ACPI_HANDLE(&spi
->dev
)) {
935 * Slave devices enumerated from ACPI namespace don't
936 * usually have chip_info but we still might want to use
939 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
942 chip
->threshold
= (SSCR1_RxTresh(rx_thres
) & SSCR1_RFT
) |
943 (SSCR1_TxTresh(tx_thres
) & SSCR1_TFT
);
945 chip
->lpss_rx_threshold
= SSIRF_RxThresh(rx_thres
);
946 chip
->lpss_tx_threshold
= SSITF_TxLoThresh(tx_thres
)
947 | SSITF_TxHiThresh(tx_hi_thres
);
949 /* set dma burst and threshold outside of chip_info path so that if
950 * chip_info goes away after setting chip->enable_dma, the
951 * burst and threshold can still respond to changes in bits_per_word */
952 if (chip
->enable_dma
) {
953 /* set up legal burst and threshold for dma */
954 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
, spi
,
956 &chip
->dma_burst_size
,
957 &chip
->dma_threshold
)) {
959 "in setup: DMA burst size reduced to match bits_per_word\n");
963 clk_div
= ssp_get_clk_div(drv_data
, spi
->max_speed_hz
);
964 chip
->speed_hz
= spi
->max_speed_hz
;
968 | SSCR0_DataSize(spi
->bits_per_word
> 16 ?
969 spi
->bits_per_word
- 16 : spi
->bits_per_word
)
971 | (spi
->bits_per_word
> 16 ? SSCR0_EDSS
: 0);
972 chip
->cr1
&= ~(SSCR1_SPO
| SSCR1_SPH
);
973 chip
->cr1
|= (((spi
->mode
& SPI_CPHA
) != 0) ? SSCR1_SPH
: 0)
974 | (((spi
->mode
& SPI_CPOL
) != 0) ? SSCR1_SPO
: 0);
976 if (spi
->mode
& SPI_LOOP
)
977 chip
->cr1
|= SSCR1_LBM
;
979 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
980 if (!pxa25x_ssp_comp(drv_data
))
981 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
982 drv_data
->max_clk_rate
983 / (1 + ((chip
->cr0
& SSCR0_SCR(0xfff)) >> 8)),
984 chip
->enable_dma
? "DMA" : "PIO");
986 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
987 drv_data
->max_clk_rate
/ 2
988 / (1 + ((chip
->cr0
& SSCR0_SCR(0x0ff)) >> 8)),
989 chip
->enable_dma
? "DMA" : "PIO");
991 if (spi
->bits_per_word
<= 8) {
993 chip
->read
= u8_reader
;
994 chip
->write
= u8_writer
;
995 } else if (spi
->bits_per_word
<= 16) {
997 chip
->read
= u16_reader
;
998 chip
->write
= u16_writer
;
999 } else if (spi
->bits_per_word
<= 32) {
1000 chip
->cr0
|= SSCR0_EDSS
;
1002 chip
->read
= u32_reader
;
1003 chip
->write
= u32_writer
;
1005 chip
->bits_per_word
= spi
->bits_per_word
;
1007 spi_set_ctldata(spi
, chip
);
1009 if (drv_data
->ssp_type
== CE4100_SSP
)
1012 return setup_cs(spi
, chip
, chip_info
);
1015 static void cleanup(struct spi_device
*spi
)
1017 struct chip_data
*chip
= spi_get_ctldata(spi
);
1018 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
1023 if (drv_data
->ssp_type
!= CE4100_SSP
&& gpio_is_valid(chip
->gpio_cs
))
1024 gpio_free(chip
->gpio_cs
);
1030 static struct pxa2xx_spi_master
*
1031 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1033 struct pxa2xx_spi_master
*pdata
;
1034 struct acpi_device
*adev
;
1035 struct ssp_device
*ssp
;
1036 struct resource
*res
;
1039 if (!ACPI_HANDLE(&pdev
->dev
) ||
1040 acpi_bus_get_device(ACPI_HANDLE(&pdev
->dev
), &adev
))
1043 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1047 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1053 ssp
->phys_base
= res
->start
;
1054 ssp
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1055 if (IS_ERR(ssp
->mmio_base
))
1058 ssp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1059 ssp
->irq
= platform_get_irq(pdev
, 0);
1060 ssp
->type
= LPSS_SSP
;
1064 if (adev
->pnp
.unique_id
&& !kstrtoint(adev
->pnp
.unique_id
, 0, &devid
))
1065 ssp
->port_id
= devid
;
1067 pdata
->num_chipselect
= 1;
1068 pdata
->enable_dma
= true;
1069 pdata
->tx_chan_id
= -1;
1070 pdata
->rx_chan_id
= -1;
1075 static struct acpi_device_id pxa2xx_spi_acpi_match
[] = {
1084 MODULE_DEVICE_TABLE(acpi
, pxa2xx_spi_acpi_match
);
1086 static inline struct pxa2xx_spi_master
*
1087 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1093 static int pxa2xx_spi_probe(struct platform_device
*pdev
)
1095 struct device
*dev
= &pdev
->dev
;
1096 struct pxa2xx_spi_master
*platform_info
;
1097 struct spi_master
*master
;
1098 struct driver_data
*drv_data
;
1099 struct ssp_device
*ssp
;
1102 platform_info
= dev_get_platdata(dev
);
1103 if (!platform_info
) {
1104 platform_info
= pxa2xx_spi_acpi_get_pdata(pdev
);
1105 if (!platform_info
) {
1106 dev_err(&pdev
->dev
, "missing platform data\n");
1111 ssp
= pxa_ssp_request(pdev
->id
, pdev
->name
);
1113 ssp
= &platform_info
->ssp
;
1115 if (!ssp
->mmio_base
) {
1116 dev_err(&pdev
->dev
, "failed to get ssp\n");
1120 /* Allocate master with space for drv_data and null dma buffer */
1121 master
= spi_alloc_master(dev
, sizeof(struct driver_data
) + 16);
1123 dev_err(&pdev
->dev
, "cannot alloc spi_master\n");
1127 drv_data
= spi_master_get_devdata(master
);
1128 drv_data
->master
= master
;
1129 drv_data
->master_info
= platform_info
;
1130 drv_data
->pdev
= pdev
;
1131 drv_data
->ssp
= ssp
;
1133 master
->dev
.parent
= &pdev
->dev
;
1134 master
->dev
.of_node
= pdev
->dev
.of_node
;
1135 /* the spi->mode bits understood by this driver: */
1136 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
1138 master
->bus_num
= ssp
->port_id
;
1139 master
->num_chipselect
= platform_info
->num_chipselect
;
1140 master
->dma_alignment
= DMA_ALIGNMENT
;
1141 master
->cleanup
= cleanup
;
1142 master
->setup
= setup
;
1143 master
->transfer_one_message
= pxa2xx_spi_transfer_one_message
;
1144 master
->unprepare_transfer_hardware
= pxa2xx_spi_unprepare_transfer
;
1145 master
->auto_runtime_pm
= true;
1147 drv_data
->ssp_type
= ssp
->type
;
1148 drv_data
->null_dma_buf
= (u32
*)PTR_ALIGN(&drv_data
[1], DMA_ALIGNMENT
);
1150 drv_data
->ioaddr
= ssp
->mmio_base
;
1151 drv_data
->ssdr_physical
= ssp
->phys_base
+ SSDR
;
1152 if (pxa25x_ssp_comp(drv_data
)) {
1153 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
1154 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
;
1155 drv_data
->dma_cr1
= 0;
1156 drv_data
->clear_sr
= SSSR_ROR
;
1157 drv_data
->mask_sr
= SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1159 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
1160 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
| SSCR1_TINTE
;
1161 drv_data
->dma_cr1
= DEFAULT_DMA_CR1
;
1162 drv_data
->clear_sr
= SSSR_ROR
| SSSR_TINT
;
1163 drv_data
->mask_sr
= SSSR_TINT
| SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1166 status
= request_irq(ssp
->irq
, ssp_int
, IRQF_SHARED
, dev_name(dev
),
1169 dev_err(&pdev
->dev
, "cannot get IRQ %d\n", ssp
->irq
);
1170 goto out_error_master_alloc
;
1173 /* Setup DMA if requested */
1174 drv_data
->tx_channel
= -1;
1175 drv_data
->rx_channel
= -1;
1176 if (platform_info
->enable_dma
) {
1177 status
= pxa2xx_spi_dma_setup(drv_data
);
1179 dev_dbg(dev
, "no DMA channels available, using PIO\n");
1180 platform_info
->enable_dma
= false;
1184 /* Enable SOC clock */
1185 clk_prepare_enable(ssp
->clk
);
1187 drv_data
->max_clk_rate
= clk_get_rate(ssp
->clk
);
1189 /* Load default SSP configuration */
1190 write_SSCR0(0, drv_data
->ioaddr
);
1191 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT
) |
1192 SSCR1_TxTresh(TX_THRESH_DFLT
),
1194 write_SSCR0(SSCR0_SCR(2)
1196 | SSCR0_DataSize(8),
1198 if (!pxa25x_ssp_comp(drv_data
))
1199 write_SSTO(0, drv_data
->ioaddr
);
1200 write_SSPSP(0, drv_data
->ioaddr
);
1202 lpss_ssp_setup(drv_data
);
1204 tasklet_init(&drv_data
->pump_transfers
, pump_transfers
,
1205 (unsigned long)drv_data
);
1207 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1208 pm_runtime_use_autosuspend(&pdev
->dev
);
1209 pm_runtime_set_active(&pdev
->dev
);
1210 pm_runtime_enable(&pdev
->dev
);
1212 /* Register with the SPI framework */
1213 platform_set_drvdata(pdev
, drv_data
);
1214 status
= devm_spi_register_master(&pdev
->dev
, master
);
1216 dev_err(&pdev
->dev
, "problem registering spi master\n");
1217 goto out_error_clock_enabled
;
1222 out_error_clock_enabled
:
1223 clk_disable_unprepare(ssp
->clk
);
1224 pxa2xx_spi_dma_release(drv_data
);
1225 free_irq(ssp
->irq
, drv_data
);
1227 out_error_master_alloc
:
1228 spi_master_put(master
);
1233 static int pxa2xx_spi_remove(struct platform_device
*pdev
)
1235 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1236 struct ssp_device
*ssp
;
1240 ssp
= drv_data
->ssp
;
1242 pm_runtime_get_sync(&pdev
->dev
);
1244 /* Disable the SSP at the peripheral and SOC level */
1245 write_SSCR0(0, drv_data
->ioaddr
);
1246 clk_disable_unprepare(ssp
->clk
);
1249 if (drv_data
->master_info
->enable_dma
)
1250 pxa2xx_spi_dma_release(drv_data
);
1252 pm_runtime_put_noidle(&pdev
->dev
);
1253 pm_runtime_disable(&pdev
->dev
);
1256 free_irq(ssp
->irq
, drv_data
);
1264 static void pxa2xx_spi_shutdown(struct platform_device
*pdev
)
1268 if ((status
= pxa2xx_spi_remove(pdev
)) != 0)
1269 dev_err(&pdev
->dev
, "shutdown failed with %d\n", status
);
1272 #ifdef CONFIG_PM_SLEEP
1273 static int pxa2xx_spi_suspend(struct device
*dev
)
1275 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1276 struct ssp_device
*ssp
= drv_data
->ssp
;
1279 status
= spi_master_suspend(drv_data
->master
);
1282 write_SSCR0(0, drv_data
->ioaddr
);
1284 if (!pm_runtime_suspended(dev
))
1285 clk_disable_unprepare(ssp
->clk
);
1290 static int pxa2xx_spi_resume(struct device
*dev
)
1292 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1293 struct ssp_device
*ssp
= drv_data
->ssp
;
1296 pxa2xx_spi_dma_resume(drv_data
);
1298 /* Enable the SSP clock */
1299 if (!pm_runtime_suspended(dev
))
1300 clk_prepare_enable(ssp
->clk
);
1302 /* Restore LPSS private register bits */
1303 lpss_ssp_setup(drv_data
);
1305 /* Start the queue running */
1306 status
= spi_master_resume(drv_data
->master
);
1308 dev_err(dev
, "problem starting queue (%d)\n", status
);
1316 #ifdef CONFIG_PM_RUNTIME
1317 static int pxa2xx_spi_runtime_suspend(struct device
*dev
)
1319 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1321 clk_disable_unprepare(drv_data
->ssp
->clk
);
1325 static int pxa2xx_spi_runtime_resume(struct device
*dev
)
1327 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1329 clk_prepare_enable(drv_data
->ssp
->clk
);
1334 static const struct dev_pm_ops pxa2xx_spi_pm_ops
= {
1335 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend
, pxa2xx_spi_resume
)
1336 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend
,
1337 pxa2xx_spi_runtime_resume
, NULL
)
1340 static struct platform_driver driver
= {
1342 .name
= "pxa2xx-spi",
1343 .owner
= THIS_MODULE
,
1344 .pm
= &pxa2xx_spi_pm_ops
,
1345 .acpi_match_table
= ACPI_PTR(pxa2xx_spi_acpi_match
),
1347 .probe
= pxa2xx_spi_probe
,
1348 .remove
= pxa2xx_spi_remove
,
1349 .shutdown
= pxa2xx_spi_shutdown
,
1352 static int __init
pxa2xx_spi_init(void)
1354 return platform_driver_register(&driver
);
1356 subsys_initcall(pxa2xx_spi_init
);
1358 static void __exit
pxa2xx_spi_exit(void)
1360 platform_driver_unregister(&driver
);
1362 module_exit(pxa2xx_spi_exit
);