2 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3 * Copyright (C) 2013, Intel Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/device.h>
23 #include <linux/ioport.h>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/interrupt.h>
27 #include <linux/platform_device.h>
28 #include <linux/spi/pxa2xx_spi.h>
29 #include <linux/spi/spi.h>
30 #include <linux/workqueue.h>
31 #include <linux/delay.h>
32 #include <linux/gpio.h>
33 #include <linux/slab.h>
34 #include <linux/clk.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/acpi.h>
40 #include <asm/delay.h>
42 #include "spi-pxa2xx.h"
44 MODULE_AUTHOR("Stephen Street");
45 MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
46 MODULE_LICENSE("GPL");
47 MODULE_ALIAS("platform:pxa2xx-spi");
51 #define TIMOUT_DFLT 1000
54 * for testing SSCR1 changes that require SSP restart, basically
55 * everything except the service and interrupt enables, the pxa270 developer
56 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
57 * list, but the PXA255 dev man says all bits without really meaning the
58 * service and interrupt enables
60 #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
61 | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
62 | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
63 | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
64 | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
65 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
67 #define LPSS_RX_THRESH_DFLT 64
68 #define LPSS_TX_LOTHRESH_DFLT 160
69 #define LPSS_TX_HITHRESH_DFLT 224
71 /* Offset from drv_data->lpss_base */
72 #define GENERAL_REG 0x08
73 #define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
75 #define SPI_CS_CONTROL 0x18
76 #define SPI_CS_CONTROL_SW_MODE BIT(0)
77 #define SPI_CS_CONTROL_CS_HIGH BIT(1)
79 static bool is_lpss_ssp(const struct driver_data
*drv_data
)
81 return drv_data
->ssp_type
== LPSS_SSP
;
85 * Read and write LPSS SSP private registers. Caller must first check that
86 * is_lpss_ssp() returns true before these can be called.
88 static u32
__lpss_ssp_read_priv(struct driver_data
*drv_data
, unsigned offset
)
90 WARN_ON(!drv_data
->lpss_base
);
91 return readl(drv_data
->lpss_base
+ offset
);
94 static void __lpss_ssp_write_priv(struct driver_data
*drv_data
,
95 unsigned offset
, u32 value
)
97 WARN_ON(!drv_data
->lpss_base
);
98 writel(value
, drv_data
->lpss_base
+ offset
);
102 * lpss_ssp_setup - perform LPSS SSP specific setup
103 * @drv_data: pointer to the driver private data
105 * Perform LPSS SSP specific setup. This function must be called first if
106 * one is going to use LPSS SSP private registers.
108 static void lpss_ssp_setup(struct driver_data
*drv_data
)
110 unsigned offset
= 0x400;
113 if (!is_lpss_ssp(drv_data
))
117 * Perform auto-detection of the LPSS SSP private registers. They
118 * can be either at 1k or 2k offset from the base address.
120 orig
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
122 value
= orig
| SPI_CS_CONTROL_SW_MODE
;
123 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
124 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
125 if (value
!= (orig
| SPI_CS_CONTROL_SW_MODE
)) {
130 value
&= ~SPI_CS_CONTROL_SW_MODE
;
131 writel(value
, drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
132 value
= readl(drv_data
->ioaddr
+ offset
+ SPI_CS_CONTROL
);
139 /* Now set the LPSS base */
140 drv_data
->lpss_base
= drv_data
->ioaddr
+ offset
;
142 /* Enable software chip select control */
143 value
= SPI_CS_CONTROL_SW_MODE
| SPI_CS_CONTROL_CS_HIGH
;
144 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
146 /* Enable multiblock DMA transfers */
147 if (drv_data
->master_info
->enable_dma
) {
148 __lpss_ssp_write_priv(drv_data
, SSP_REG
, 1);
150 value
= __lpss_ssp_read_priv(drv_data
, GENERAL_REG
);
151 value
|= GENERAL_REG_RXTO_HOLDOFF_DISABLE
;
152 __lpss_ssp_write_priv(drv_data
, GENERAL_REG
, value
);
156 static void lpss_ssp_cs_control(struct driver_data
*drv_data
, bool enable
)
160 if (!is_lpss_ssp(drv_data
))
163 value
= __lpss_ssp_read_priv(drv_data
, SPI_CS_CONTROL
);
165 value
&= ~SPI_CS_CONTROL_CS_HIGH
;
167 value
|= SPI_CS_CONTROL_CS_HIGH
;
168 __lpss_ssp_write_priv(drv_data
, SPI_CS_CONTROL
, value
);
171 static void cs_assert(struct driver_data
*drv_data
)
173 struct chip_data
*chip
= drv_data
->cur_chip
;
175 if (drv_data
->ssp_type
== CE4100_SSP
) {
176 write_SSSR(drv_data
->cur_chip
->frm
, drv_data
->ioaddr
);
180 if (chip
->cs_control
) {
181 chip
->cs_control(PXA2XX_CS_ASSERT
);
185 if (gpio_is_valid(chip
->gpio_cs
)) {
186 gpio_set_value(chip
->gpio_cs
, chip
->gpio_cs_inverted
);
190 lpss_ssp_cs_control(drv_data
, true);
193 static void cs_deassert(struct driver_data
*drv_data
)
195 struct chip_data
*chip
= drv_data
->cur_chip
;
197 if (drv_data
->ssp_type
== CE4100_SSP
)
200 if (chip
->cs_control
) {
201 chip
->cs_control(PXA2XX_CS_DEASSERT
);
205 if (gpio_is_valid(chip
->gpio_cs
)) {
206 gpio_set_value(chip
->gpio_cs
, !chip
->gpio_cs_inverted
);
210 lpss_ssp_cs_control(drv_data
, false);
213 int pxa2xx_spi_flush(struct driver_data
*drv_data
)
215 unsigned long limit
= loops_per_jiffy
<< 1;
217 void __iomem
*reg
= drv_data
->ioaddr
;
220 while (read_SSSR(reg
) & SSSR_RNE
) {
223 } while ((read_SSSR(reg
) & SSSR_BSY
) && --limit
);
224 write_SSSR_CS(drv_data
, SSSR_ROR
);
229 static int null_writer(struct driver_data
*drv_data
)
231 void __iomem
*reg
= drv_data
->ioaddr
;
232 u8 n_bytes
= drv_data
->n_bytes
;
234 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
235 || (drv_data
->tx
== drv_data
->tx_end
))
239 drv_data
->tx
+= n_bytes
;
244 static int null_reader(struct driver_data
*drv_data
)
246 void __iomem
*reg
= drv_data
->ioaddr
;
247 u8 n_bytes
= drv_data
->n_bytes
;
249 while ((read_SSSR(reg
) & SSSR_RNE
)
250 && (drv_data
->rx
< drv_data
->rx_end
)) {
252 drv_data
->rx
+= n_bytes
;
255 return drv_data
->rx
== drv_data
->rx_end
;
258 static int u8_writer(struct driver_data
*drv_data
)
260 void __iomem
*reg
= drv_data
->ioaddr
;
262 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
263 || (drv_data
->tx
== drv_data
->tx_end
))
266 write_SSDR(*(u8
*)(drv_data
->tx
), reg
);
272 static int u8_reader(struct driver_data
*drv_data
)
274 void __iomem
*reg
= drv_data
->ioaddr
;
276 while ((read_SSSR(reg
) & SSSR_RNE
)
277 && (drv_data
->rx
< drv_data
->rx_end
)) {
278 *(u8
*)(drv_data
->rx
) = read_SSDR(reg
);
282 return drv_data
->rx
== drv_data
->rx_end
;
285 static int u16_writer(struct driver_data
*drv_data
)
287 void __iomem
*reg
= drv_data
->ioaddr
;
289 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
290 || (drv_data
->tx
== drv_data
->tx_end
))
293 write_SSDR(*(u16
*)(drv_data
->tx
), reg
);
299 static int u16_reader(struct driver_data
*drv_data
)
301 void __iomem
*reg
= drv_data
->ioaddr
;
303 while ((read_SSSR(reg
) & SSSR_RNE
)
304 && (drv_data
->rx
< drv_data
->rx_end
)) {
305 *(u16
*)(drv_data
->rx
) = read_SSDR(reg
);
309 return drv_data
->rx
== drv_data
->rx_end
;
312 static int u32_writer(struct driver_data
*drv_data
)
314 void __iomem
*reg
= drv_data
->ioaddr
;
316 if (((read_SSSR(reg
) & SSSR_TFL_MASK
) == SSSR_TFL_MASK
)
317 || (drv_data
->tx
== drv_data
->tx_end
))
320 write_SSDR(*(u32
*)(drv_data
->tx
), reg
);
326 static int u32_reader(struct driver_data
*drv_data
)
328 void __iomem
*reg
= drv_data
->ioaddr
;
330 while ((read_SSSR(reg
) & SSSR_RNE
)
331 && (drv_data
->rx
< drv_data
->rx_end
)) {
332 *(u32
*)(drv_data
->rx
) = read_SSDR(reg
);
336 return drv_data
->rx
== drv_data
->rx_end
;
339 void *pxa2xx_spi_next_transfer(struct driver_data
*drv_data
)
341 struct spi_message
*msg
= drv_data
->cur_msg
;
342 struct spi_transfer
*trans
= drv_data
->cur_transfer
;
344 /* Move to next transfer */
345 if (trans
->transfer_list
.next
!= &msg
->transfers
) {
346 drv_data
->cur_transfer
=
347 list_entry(trans
->transfer_list
.next
,
350 return RUNNING_STATE
;
355 /* caller already set message->status; dma and pio irqs are blocked */
356 static void giveback(struct driver_data
*drv_data
)
358 struct spi_transfer
* last_transfer
;
359 struct spi_message
*msg
;
361 msg
= drv_data
->cur_msg
;
362 drv_data
->cur_msg
= NULL
;
363 drv_data
->cur_transfer
= NULL
;
365 last_transfer
= list_entry(msg
->transfers
.prev
,
369 /* Delay if requested before any change in chip select */
370 if (last_transfer
->delay_usecs
)
371 udelay(last_transfer
->delay_usecs
);
373 /* Drop chip select UNLESS cs_change is true or we are returning
374 * a message with an error, or next message is for another chip
376 if (!last_transfer
->cs_change
)
377 cs_deassert(drv_data
);
379 struct spi_message
*next_msg
;
381 /* Holding of cs was hinted, but we need to make sure
382 * the next message is for the same chip. Don't waste
383 * time with the following tests unless this was hinted.
385 * We cannot postpone this until pump_messages, because
386 * after calling msg->complete (below) the driver that
387 * sent the current message could be unloaded, which
388 * could invalidate the cs_control() callback...
391 /* get a pointer to the next message, if any */
392 next_msg
= spi_get_next_queued_message(drv_data
->master
);
394 /* see if the next and current messages point
397 if (next_msg
&& next_msg
->spi
!= msg
->spi
)
399 if (!next_msg
|| msg
->state
== ERROR_STATE
)
400 cs_deassert(drv_data
);
403 spi_finalize_current_message(drv_data
->master
);
404 drv_data
->cur_chip
= NULL
;
407 static void reset_sccr1(struct driver_data
*drv_data
)
409 void __iomem
*reg
= drv_data
->ioaddr
;
410 struct chip_data
*chip
= drv_data
->cur_chip
;
413 sccr1_reg
= read_SSCR1(reg
) & ~drv_data
->int_cr1
;
414 sccr1_reg
&= ~SSCR1_RFT
;
415 sccr1_reg
|= chip
->threshold
;
416 write_SSCR1(sccr1_reg
, reg
);
419 static void int_error_stop(struct driver_data
*drv_data
, const char* msg
)
421 void __iomem
*reg
= drv_data
->ioaddr
;
423 /* Stop and reset SSP */
424 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
425 reset_sccr1(drv_data
);
426 if (!pxa25x_ssp_comp(drv_data
))
428 pxa2xx_spi_flush(drv_data
);
429 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
431 dev_err(&drv_data
->pdev
->dev
, "%s\n", msg
);
433 drv_data
->cur_msg
->state
= ERROR_STATE
;
434 tasklet_schedule(&drv_data
->pump_transfers
);
437 static void int_transfer_complete(struct driver_data
*drv_data
)
439 void __iomem
*reg
= drv_data
->ioaddr
;
442 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
443 reset_sccr1(drv_data
);
444 if (!pxa25x_ssp_comp(drv_data
))
447 /* Update total byte transferred return count actual bytes read */
448 drv_data
->cur_msg
->actual_length
+= drv_data
->len
-
449 (drv_data
->rx_end
- drv_data
->rx
);
451 /* Transfer delays and chip select release are
452 * handled in pump_transfers or giveback
455 /* Move to next transfer */
456 drv_data
->cur_msg
->state
= pxa2xx_spi_next_transfer(drv_data
);
458 /* Schedule transfer tasklet */
459 tasklet_schedule(&drv_data
->pump_transfers
);
462 static irqreturn_t
interrupt_transfer(struct driver_data
*drv_data
)
464 void __iomem
*reg
= drv_data
->ioaddr
;
466 u32 irq_mask
= (read_SSCR1(reg
) & SSCR1_TIE
) ?
467 drv_data
->mask_sr
: drv_data
->mask_sr
& ~SSSR_TFS
;
469 u32 irq_status
= read_SSSR(reg
) & irq_mask
;
471 if (irq_status
& SSSR_ROR
) {
472 int_error_stop(drv_data
, "interrupt_transfer: fifo overrun");
476 if (irq_status
& SSSR_TINT
) {
477 write_SSSR(SSSR_TINT
, reg
);
478 if (drv_data
->read(drv_data
)) {
479 int_transfer_complete(drv_data
);
484 /* Drain rx fifo, Fill tx fifo and prevent overruns */
486 if (drv_data
->read(drv_data
)) {
487 int_transfer_complete(drv_data
);
490 } while (drv_data
->write(drv_data
));
492 if (drv_data
->read(drv_data
)) {
493 int_transfer_complete(drv_data
);
497 if (drv_data
->tx
== drv_data
->tx_end
) {
501 sccr1_reg
= read_SSCR1(reg
);
502 sccr1_reg
&= ~SSCR1_TIE
;
505 * PXA25x_SSP has no timeout, set up rx threshould for the
506 * remaining RX bytes.
508 if (pxa25x_ssp_comp(drv_data
)) {
510 sccr1_reg
&= ~SSCR1_RFT
;
512 bytes_left
= drv_data
->rx_end
- drv_data
->rx
;
513 switch (drv_data
->n_bytes
) {
520 if (bytes_left
> RX_THRESH_DFLT
)
521 bytes_left
= RX_THRESH_DFLT
;
523 sccr1_reg
|= SSCR1_RxTresh(bytes_left
);
525 write_SSCR1(sccr1_reg
, reg
);
528 /* We did something */
532 static irqreturn_t
ssp_int(int irq
, void *dev_id
)
534 struct driver_data
*drv_data
= dev_id
;
535 void __iomem
*reg
= drv_data
->ioaddr
;
537 u32 mask
= drv_data
->mask_sr
;
541 * The IRQ might be shared with other peripherals so we must first
542 * check that are we RPM suspended or not. If we are we assume that
543 * the IRQ was not for us (we shouldn't be RPM suspended when the
544 * interrupt is enabled).
546 if (pm_runtime_suspended(&drv_data
->pdev
->dev
))
550 * If the device is not yet in RPM suspended state and we get an
551 * interrupt that is meant for another device, check if status bits
552 * are all set to one. That means that the device is already
555 status
= read_SSSR(reg
);
559 sccr1_reg
= read_SSCR1(reg
);
561 /* Ignore possible writes if we don't need to write */
562 if (!(sccr1_reg
& SSCR1_TIE
))
565 if (!(status
& mask
))
568 if (!drv_data
->cur_msg
) {
570 write_SSCR0(read_SSCR0(reg
) & ~SSCR0_SSE
, reg
);
571 write_SSCR1(read_SSCR1(reg
) & ~drv_data
->int_cr1
, reg
);
572 if (!pxa25x_ssp_comp(drv_data
))
574 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
576 dev_err(&drv_data
->pdev
->dev
, "bad message state "
577 "in interrupt handler\n");
583 return drv_data
->transfer_handler(drv_data
);
586 static unsigned int ssp_get_clk_div(struct driver_data
*drv_data
, int rate
)
588 unsigned long ssp_clk
= drv_data
->max_clk_rate
;
589 const struct ssp_device
*ssp
= drv_data
->ssp
;
591 rate
= min_t(int, ssp_clk
, rate
);
593 if (ssp
->type
== PXA25x_SSP
|| ssp
->type
== CE4100_SSP
)
594 return ((ssp_clk
/ (2 * rate
) - 1) & 0xff) << 8;
596 return ((ssp_clk
/ rate
- 1) & 0xfff) << 8;
599 static void pump_transfers(unsigned long data
)
601 struct driver_data
*drv_data
= (struct driver_data
*)data
;
602 struct spi_message
*message
= NULL
;
603 struct spi_transfer
*transfer
= NULL
;
604 struct spi_transfer
*previous
= NULL
;
605 struct chip_data
*chip
= NULL
;
606 void __iomem
*reg
= drv_data
->ioaddr
;
612 u32 dma_thresh
= drv_data
->cur_chip
->dma_threshold
;
613 u32 dma_burst
= drv_data
->cur_chip
->dma_burst_size
;
615 /* Get current state information */
616 message
= drv_data
->cur_msg
;
617 transfer
= drv_data
->cur_transfer
;
618 chip
= drv_data
->cur_chip
;
620 /* Handle for abort */
621 if (message
->state
== ERROR_STATE
) {
622 message
->status
= -EIO
;
627 /* Handle end of message */
628 if (message
->state
== DONE_STATE
) {
634 /* Delay if requested at end of transfer before CS change */
635 if (message
->state
== RUNNING_STATE
) {
636 previous
= list_entry(transfer
->transfer_list
.prev
,
639 if (previous
->delay_usecs
)
640 udelay(previous
->delay_usecs
);
642 /* Drop chip select only if cs_change is requested */
643 if (previous
->cs_change
)
644 cs_deassert(drv_data
);
647 /* Check if we can DMA this transfer */
648 if (!pxa2xx_spi_dma_is_possible(transfer
->len
) && chip
->enable_dma
) {
650 /* reject already-mapped transfers; PIO won't always work */
651 if (message
->is_dma_mapped
652 || transfer
->rx_dma
|| transfer
->tx_dma
) {
653 dev_err(&drv_data
->pdev
->dev
,
654 "pump_transfers: mapped transfer length "
655 "of %u is greater than %d\n",
656 transfer
->len
, MAX_DMA_LEN
);
657 message
->status
= -EINVAL
;
662 /* warn ... we force this to PIO mode */
663 if (printk_ratelimit())
664 dev_warn(&message
->spi
->dev
, "pump_transfers: "
665 "DMA disabled for transfer length %ld "
667 (long)drv_data
->len
, MAX_DMA_LEN
);
670 /* Setup the transfer state based on the type of transfer */
671 if (pxa2xx_spi_flush(drv_data
) == 0) {
672 dev_err(&drv_data
->pdev
->dev
, "pump_transfers: flush failed\n");
673 message
->status
= -EIO
;
677 drv_data
->n_bytes
= chip
->n_bytes
;
678 drv_data
->tx
= (void *)transfer
->tx_buf
;
679 drv_data
->tx_end
= drv_data
->tx
+ transfer
->len
;
680 drv_data
->rx
= transfer
->rx_buf
;
681 drv_data
->rx_end
= drv_data
->rx
+ transfer
->len
;
682 drv_data
->rx_dma
= transfer
->rx_dma
;
683 drv_data
->tx_dma
= transfer
->tx_dma
;
684 drv_data
->len
= transfer
->len
;
685 drv_data
->write
= drv_data
->tx
? chip
->write
: null_writer
;
686 drv_data
->read
= drv_data
->rx
? chip
->read
: null_reader
;
688 /* Change speed and bit per word on a per transfer */
690 if (transfer
->speed_hz
|| transfer
->bits_per_word
) {
692 bits
= chip
->bits_per_word
;
693 speed
= chip
->speed_hz
;
695 if (transfer
->speed_hz
)
696 speed
= transfer
->speed_hz
;
698 if (transfer
->bits_per_word
)
699 bits
= transfer
->bits_per_word
;
701 clk_div
= ssp_get_clk_div(drv_data
, speed
);
704 drv_data
->n_bytes
= 1;
705 drv_data
->read
= drv_data
->read
!= null_reader
?
706 u8_reader
: null_reader
;
707 drv_data
->write
= drv_data
->write
!= null_writer
?
708 u8_writer
: null_writer
;
709 } else if (bits
<= 16) {
710 drv_data
->n_bytes
= 2;
711 drv_data
->read
= drv_data
->read
!= null_reader
?
712 u16_reader
: null_reader
;
713 drv_data
->write
= drv_data
->write
!= null_writer
?
714 u16_writer
: null_writer
;
715 } else if (bits
<= 32) {
716 drv_data
->n_bytes
= 4;
717 drv_data
->read
= drv_data
->read
!= null_reader
?
718 u32_reader
: null_reader
;
719 drv_data
->write
= drv_data
->write
!= null_writer
?
720 u32_writer
: null_writer
;
722 /* if bits/word is changed in dma mode, then must check the
723 * thresholds and burst also */
724 if (chip
->enable_dma
) {
725 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
,
729 if (printk_ratelimit())
730 dev_warn(&message
->spi
->dev
,
732 "DMA burst size reduced to "
733 "match bits_per_word\n");
738 | SSCR0_DataSize(bits
> 16 ? bits
- 16 : bits
)
740 | (bits
> 16 ? SSCR0_EDSS
: 0);
743 message
->state
= RUNNING_STATE
;
745 drv_data
->dma_mapped
= 0;
746 if (pxa2xx_spi_dma_is_possible(drv_data
->len
))
747 drv_data
->dma_mapped
= pxa2xx_spi_map_dma_buffers(drv_data
);
748 if (drv_data
->dma_mapped
) {
750 /* Ensure we have the correct interrupt handler */
751 drv_data
->transfer_handler
= pxa2xx_spi_dma_transfer
;
753 pxa2xx_spi_dma_prepare(drv_data
, dma_burst
);
755 /* Clear status and start DMA engine */
756 cr1
= chip
->cr1
| dma_thresh
| drv_data
->dma_cr1
;
757 write_SSSR(drv_data
->clear_sr
, reg
);
759 pxa2xx_spi_dma_start(drv_data
);
761 /* Ensure we have the correct interrupt handler */
762 drv_data
->transfer_handler
= interrupt_transfer
;
765 cr1
= chip
->cr1
| chip
->threshold
| drv_data
->int_cr1
;
766 write_SSSR_CS(drv_data
, drv_data
->clear_sr
);
769 if (is_lpss_ssp(drv_data
)) {
770 if ((read_SSIRF(reg
) & 0xff) != chip
->lpss_rx_threshold
)
771 write_SSIRF(chip
->lpss_rx_threshold
, reg
);
772 if ((read_SSITF(reg
) & 0xffff) != chip
->lpss_tx_threshold
)
773 write_SSITF(chip
->lpss_tx_threshold
, reg
);
776 /* see if we need to reload the config registers */
777 if ((read_SSCR0(reg
) != cr0
)
778 || (read_SSCR1(reg
) & SSCR1_CHANGE_MASK
) !=
779 (cr1
& SSCR1_CHANGE_MASK
)) {
781 /* stop the SSP, and update the other bits */
782 write_SSCR0(cr0
& ~SSCR0_SSE
, reg
);
783 if (!pxa25x_ssp_comp(drv_data
))
784 write_SSTO(chip
->timeout
, reg
);
785 /* first set CR1 without interrupt and service enables */
786 write_SSCR1(cr1
& SSCR1_CHANGE_MASK
, reg
);
787 /* restart the SSP */
788 write_SSCR0(cr0
, reg
);
791 if (!pxa25x_ssp_comp(drv_data
))
792 write_SSTO(chip
->timeout
, reg
);
797 /* after chip select, release the data by enabling service
798 * requests and interrupts, without changing any mode bits */
799 write_SSCR1(cr1
, reg
);
802 static int pxa2xx_spi_transfer_one_message(struct spi_master
*master
,
803 struct spi_message
*msg
)
805 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
807 drv_data
->cur_msg
= msg
;
808 /* Initial message state*/
809 drv_data
->cur_msg
->state
= START_STATE
;
810 drv_data
->cur_transfer
= list_entry(drv_data
->cur_msg
->transfers
.next
,
814 /* prepare to setup the SSP, in pump_transfers, using the per
815 * chip configuration */
816 drv_data
->cur_chip
= spi_get_ctldata(drv_data
->cur_msg
->spi
);
818 /* Mark as busy and launch transfers */
819 tasklet_schedule(&drv_data
->pump_transfers
);
823 static int pxa2xx_spi_unprepare_transfer(struct spi_master
*master
)
825 struct driver_data
*drv_data
= spi_master_get_devdata(master
);
827 /* Disable the SSP now */
828 write_SSCR0(read_SSCR0(drv_data
->ioaddr
) & ~SSCR0_SSE
,
834 static int setup_cs(struct spi_device
*spi
, struct chip_data
*chip
,
835 struct pxa2xx_spi_chip
*chip_info
)
839 if (chip
== NULL
|| chip_info
== NULL
)
842 /* NOTE: setup() can be called multiple times, possibly with
843 * different chip_info, release previously requested GPIO
845 if (gpio_is_valid(chip
->gpio_cs
))
846 gpio_free(chip
->gpio_cs
);
848 /* If (*cs_control) is provided, ignore GPIO chip select */
849 if (chip_info
->cs_control
) {
850 chip
->cs_control
= chip_info
->cs_control
;
854 if (gpio_is_valid(chip_info
->gpio_cs
)) {
855 err
= gpio_request(chip_info
->gpio_cs
, "SPI_CS");
857 dev_err(&spi
->dev
, "failed to request chip select "
858 "GPIO%d\n", chip_info
->gpio_cs
);
862 chip
->gpio_cs
= chip_info
->gpio_cs
;
863 chip
->gpio_cs_inverted
= spi
->mode
& SPI_CS_HIGH
;
865 err
= gpio_direction_output(chip
->gpio_cs
,
866 !chip
->gpio_cs_inverted
);
872 static int setup(struct spi_device
*spi
)
874 struct pxa2xx_spi_chip
*chip_info
= NULL
;
875 struct chip_data
*chip
;
876 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
877 unsigned int clk_div
;
878 uint tx_thres
, tx_hi_thres
, rx_thres
;
880 if (is_lpss_ssp(drv_data
)) {
881 tx_thres
= LPSS_TX_LOTHRESH_DFLT
;
882 tx_hi_thres
= LPSS_TX_HITHRESH_DFLT
;
883 rx_thres
= LPSS_RX_THRESH_DFLT
;
885 tx_thres
= TX_THRESH_DFLT
;
887 rx_thres
= RX_THRESH_DFLT
;
890 /* Only alloc on first setup */
891 chip
= spi_get_ctldata(spi
);
893 chip
= kzalloc(sizeof(struct chip_data
), GFP_KERNEL
);
896 "failed setup: can't allocate chip data\n");
900 if (drv_data
->ssp_type
== CE4100_SSP
) {
901 if (spi
->chip_select
> 4) {
902 dev_err(&spi
->dev
, "failed setup: "
903 "cs number must not be > 4.\n");
908 chip
->frm
= spi
->chip_select
;
911 chip
->enable_dma
= 0;
912 chip
->timeout
= TIMOUT_DFLT
;
915 /* protocol drivers may change the chip settings, so...
916 * if chip_info exists, use it */
917 chip_info
= spi
->controller_data
;
919 /* chip_info isn't always needed */
922 if (chip_info
->timeout
)
923 chip
->timeout
= chip_info
->timeout
;
924 if (chip_info
->tx_threshold
)
925 tx_thres
= chip_info
->tx_threshold
;
926 if (chip_info
->tx_hi_threshold
)
927 tx_hi_thres
= chip_info
->tx_hi_threshold
;
928 if (chip_info
->rx_threshold
)
929 rx_thres
= chip_info
->rx_threshold
;
930 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
931 chip
->dma_threshold
= 0;
932 if (chip_info
->enable_loopback
)
933 chip
->cr1
= SSCR1_LBM
;
934 } else if (ACPI_HANDLE(&spi
->dev
)) {
936 * Slave devices enumerated from ACPI namespace don't
937 * usually have chip_info but we still might want to use
940 chip
->enable_dma
= drv_data
->master_info
->enable_dma
;
943 chip
->threshold
= (SSCR1_RxTresh(rx_thres
) & SSCR1_RFT
) |
944 (SSCR1_TxTresh(tx_thres
) & SSCR1_TFT
);
946 chip
->lpss_rx_threshold
= SSIRF_RxThresh(rx_thres
);
947 chip
->lpss_tx_threshold
= SSITF_TxLoThresh(tx_thres
)
948 | SSITF_TxHiThresh(tx_hi_thres
);
950 /* set dma burst and threshold outside of chip_info path so that if
951 * chip_info goes away after setting chip->enable_dma, the
952 * burst and threshold can still respond to changes in bits_per_word */
953 if (chip
->enable_dma
) {
954 /* set up legal burst and threshold for dma */
955 if (pxa2xx_spi_set_dma_burst_and_threshold(chip
, spi
,
957 &chip
->dma_burst_size
,
958 &chip
->dma_threshold
)) {
959 dev_warn(&spi
->dev
, "in setup: DMA burst size reduced "
960 "to match bits_per_word\n");
964 clk_div
= ssp_get_clk_div(drv_data
, spi
->max_speed_hz
);
965 chip
->speed_hz
= spi
->max_speed_hz
;
969 | SSCR0_DataSize(spi
->bits_per_word
> 16 ?
970 spi
->bits_per_word
- 16 : spi
->bits_per_word
)
972 | (spi
->bits_per_word
> 16 ? SSCR0_EDSS
: 0);
973 chip
->cr1
&= ~(SSCR1_SPO
| SSCR1_SPH
);
974 chip
->cr1
|= (((spi
->mode
& SPI_CPHA
) != 0) ? SSCR1_SPH
: 0)
975 | (((spi
->mode
& SPI_CPOL
) != 0) ? SSCR1_SPO
: 0);
977 if (spi
->mode
& SPI_LOOP
)
978 chip
->cr1
|= SSCR1_LBM
;
980 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
981 if (!pxa25x_ssp_comp(drv_data
))
982 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
983 drv_data
->max_clk_rate
984 / (1 + ((chip
->cr0
& SSCR0_SCR(0xfff)) >> 8)),
985 chip
->enable_dma
? "DMA" : "PIO");
987 dev_dbg(&spi
->dev
, "%ld Hz actual, %s\n",
988 drv_data
->max_clk_rate
/ 2
989 / (1 + ((chip
->cr0
& SSCR0_SCR(0x0ff)) >> 8)),
990 chip
->enable_dma
? "DMA" : "PIO");
992 if (spi
->bits_per_word
<= 8) {
994 chip
->read
= u8_reader
;
995 chip
->write
= u8_writer
;
996 } else if (spi
->bits_per_word
<= 16) {
998 chip
->read
= u16_reader
;
999 chip
->write
= u16_writer
;
1000 } else if (spi
->bits_per_word
<= 32) {
1001 chip
->cr0
|= SSCR0_EDSS
;
1003 chip
->read
= u32_reader
;
1004 chip
->write
= u32_writer
;
1006 chip
->bits_per_word
= spi
->bits_per_word
;
1008 spi_set_ctldata(spi
, chip
);
1010 if (drv_data
->ssp_type
== CE4100_SSP
)
1013 return setup_cs(spi
, chip
, chip_info
);
1016 static void cleanup(struct spi_device
*spi
)
1018 struct chip_data
*chip
= spi_get_ctldata(spi
);
1019 struct driver_data
*drv_data
= spi_master_get_devdata(spi
->master
);
1024 if (drv_data
->ssp_type
!= CE4100_SSP
&& gpio_is_valid(chip
->gpio_cs
))
1025 gpio_free(chip
->gpio_cs
);
1031 static struct pxa2xx_spi_master
*
1032 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1034 struct pxa2xx_spi_master
*pdata
;
1035 struct acpi_device
*adev
;
1036 struct ssp_device
*ssp
;
1037 struct resource
*res
;
1040 if (!ACPI_HANDLE(&pdev
->dev
) ||
1041 acpi_bus_get_device(ACPI_HANDLE(&pdev
->dev
), &adev
))
1044 pdata
= devm_kzalloc(&pdev
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1047 "failed to allocate memory for platform data\n");
1051 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1057 ssp
->phys_base
= res
->start
;
1058 ssp
->mmio_base
= devm_ioremap_resource(&pdev
->dev
, res
);
1059 if (IS_ERR(ssp
->mmio_base
))
1062 ssp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1063 ssp
->irq
= platform_get_irq(pdev
, 0);
1064 ssp
->type
= LPSS_SSP
;
1068 if (adev
->pnp
.unique_id
&& !kstrtoint(adev
->pnp
.unique_id
, 0, &devid
))
1069 ssp
->port_id
= devid
;
1071 pdata
->num_chipselect
= 1;
1072 pdata
->enable_dma
= true;
1073 pdata
->tx_chan_id
= -1;
1074 pdata
->rx_chan_id
= -1;
1079 static struct acpi_device_id pxa2xx_spi_acpi_match
[] = {
1087 MODULE_DEVICE_TABLE(acpi
, pxa2xx_spi_acpi_match
);
1089 static inline struct pxa2xx_spi_master
*
1090 pxa2xx_spi_acpi_get_pdata(struct platform_device
*pdev
)
1096 static int pxa2xx_spi_probe(struct platform_device
*pdev
)
1098 struct device
*dev
= &pdev
->dev
;
1099 struct pxa2xx_spi_master
*platform_info
;
1100 struct spi_master
*master
;
1101 struct driver_data
*drv_data
;
1102 struct ssp_device
*ssp
;
1105 platform_info
= dev_get_platdata(dev
);
1106 if (!platform_info
) {
1107 platform_info
= pxa2xx_spi_acpi_get_pdata(pdev
);
1108 if (!platform_info
) {
1109 dev_err(&pdev
->dev
, "missing platform data\n");
1114 ssp
= pxa_ssp_request(pdev
->id
, pdev
->name
);
1116 ssp
= &platform_info
->ssp
;
1118 if (!ssp
->mmio_base
) {
1119 dev_err(&pdev
->dev
, "failed to get ssp\n");
1123 /* Allocate master with space for drv_data and null dma buffer */
1124 master
= spi_alloc_master(dev
, sizeof(struct driver_data
) + 16);
1126 dev_err(&pdev
->dev
, "cannot alloc spi_master\n");
1130 drv_data
= spi_master_get_devdata(master
);
1131 drv_data
->master
= master
;
1132 drv_data
->master_info
= platform_info
;
1133 drv_data
->pdev
= pdev
;
1134 drv_data
->ssp
= ssp
;
1136 master
->dev
.parent
= &pdev
->dev
;
1137 master
->dev
.of_node
= pdev
->dev
.of_node
;
1138 /* the spi->mode bits understood by this driver: */
1139 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LOOP
;
1141 master
->bus_num
= ssp
->port_id
;
1142 master
->num_chipselect
= platform_info
->num_chipselect
;
1143 master
->dma_alignment
= DMA_ALIGNMENT
;
1144 master
->cleanup
= cleanup
;
1145 master
->setup
= setup
;
1146 master
->transfer_one_message
= pxa2xx_spi_transfer_one_message
;
1147 master
->unprepare_transfer_hardware
= pxa2xx_spi_unprepare_transfer
;
1148 master
->auto_runtime_pm
= true;
1150 drv_data
->ssp_type
= ssp
->type
;
1151 drv_data
->null_dma_buf
= (u32
*)PTR_ALIGN(&drv_data
[1], DMA_ALIGNMENT
);
1153 drv_data
->ioaddr
= ssp
->mmio_base
;
1154 drv_data
->ssdr_physical
= ssp
->phys_base
+ SSDR
;
1155 if (pxa25x_ssp_comp(drv_data
)) {
1156 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
1157 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
;
1158 drv_data
->dma_cr1
= 0;
1159 drv_data
->clear_sr
= SSSR_ROR
;
1160 drv_data
->mask_sr
= SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1162 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 32);
1163 drv_data
->int_cr1
= SSCR1_TIE
| SSCR1_RIE
| SSCR1_TINTE
;
1164 drv_data
->dma_cr1
= DEFAULT_DMA_CR1
;
1165 drv_data
->clear_sr
= SSSR_ROR
| SSSR_TINT
;
1166 drv_data
->mask_sr
= SSSR_TINT
| SSSR_RFS
| SSSR_TFS
| SSSR_ROR
;
1169 status
= request_irq(ssp
->irq
, ssp_int
, IRQF_SHARED
, dev_name(dev
),
1172 dev_err(&pdev
->dev
, "cannot get IRQ %d\n", ssp
->irq
);
1173 goto out_error_master_alloc
;
1176 /* Setup DMA if requested */
1177 drv_data
->tx_channel
= -1;
1178 drv_data
->rx_channel
= -1;
1179 if (platform_info
->enable_dma
) {
1180 status
= pxa2xx_spi_dma_setup(drv_data
);
1182 dev_dbg(dev
, "no DMA channels available, using PIO\n");
1183 platform_info
->enable_dma
= false;
1187 /* Enable SOC clock */
1188 clk_prepare_enable(ssp
->clk
);
1190 drv_data
->max_clk_rate
= clk_get_rate(ssp
->clk
);
1192 /* Load default SSP configuration */
1193 write_SSCR0(0, drv_data
->ioaddr
);
1194 write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT
) |
1195 SSCR1_TxTresh(TX_THRESH_DFLT
),
1197 write_SSCR0(SSCR0_SCR(2)
1199 | SSCR0_DataSize(8),
1201 if (!pxa25x_ssp_comp(drv_data
))
1202 write_SSTO(0, drv_data
->ioaddr
);
1203 write_SSPSP(0, drv_data
->ioaddr
);
1205 lpss_ssp_setup(drv_data
);
1207 tasklet_init(&drv_data
->pump_transfers
, pump_transfers
,
1208 (unsigned long)drv_data
);
1210 /* Register with the SPI framework */
1211 platform_set_drvdata(pdev
, drv_data
);
1212 status
= spi_register_master(master
);
1214 dev_err(&pdev
->dev
, "problem registering spi master\n");
1215 goto out_error_clock_enabled
;
1218 pm_runtime_set_autosuspend_delay(&pdev
->dev
, 50);
1219 pm_runtime_use_autosuspend(&pdev
->dev
);
1220 pm_runtime_set_active(&pdev
->dev
);
1221 pm_runtime_enable(&pdev
->dev
);
1225 out_error_clock_enabled
:
1226 clk_disable_unprepare(ssp
->clk
);
1227 pxa2xx_spi_dma_release(drv_data
);
1228 free_irq(ssp
->irq
, drv_data
);
1230 out_error_master_alloc
:
1231 spi_master_put(master
);
1236 static int pxa2xx_spi_remove(struct platform_device
*pdev
)
1238 struct driver_data
*drv_data
= platform_get_drvdata(pdev
);
1239 struct ssp_device
*ssp
;
1243 ssp
= drv_data
->ssp
;
1245 pm_runtime_get_sync(&pdev
->dev
);
1247 /* Disable the SSP at the peripheral and SOC level */
1248 write_SSCR0(0, drv_data
->ioaddr
);
1249 clk_disable_unprepare(ssp
->clk
);
1252 if (drv_data
->master_info
->enable_dma
)
1253 pxa2xx_spi_dma_release(drv_data
);
1255 pm_runtime_put_noidle(&pdev
->dev
);
1256 pm_runtime_disable(&pdev
->dev
);
1259 free_irq(ssp
->irq
, drv_data
);
1264 /* Disconnect from the SPI framework */
1265 spi_unregister_master(drv_data
->master
);
1270 static void pxa2xx_spi_shutdown(struct platform_device
*pdev
)
1274 if ((status
= pxa2xx_spi_remove(pdev
)) != 0)
1275 dev_err(&pdev
->dev
, "shutdown failed with %d\n", status
);
1279 static int pxa2xx_spi_suspend(struct device
*dev
)
1281 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1282 struct ssp_device
*ssp
= drv_data
->ssp
;
1285 status
= spi_master_suspend(drv_data
->master
);
1288 write_SSCR0(0, drv_data
->ioaddr
);
1289 clk_disable_unprepare(ssp
->clk
);
1294 static int pxa2xx_spi_resume(struct device
*dev
)
1296 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1297 struct ssp_device
*ssp
= drv_data
->ssp
;
1300 pxa2xx_spi_dma_resume(drv_data
);
1302 /* Enable the SSP clock */
1303 clk_prepare_enable(ssp
->clk
);
1305 /* Start the queue running */
1306 status
= spi_master_resume(drv_data
->master
);
1308 dev_err(dev
, "problem starting queue (%d)\n", status
);
1316 #ifdef CONFIG_PM_RUNTIME
1317 static int pxa2xx_spi_runtime_suspend(struct device
*dev
)
1319 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1321 clk_disable_unprepare(drv_data
->ssp
->clk
);
1325 static int pxa2xx_spi_runtime_resume(struct device
*dev
)
1327 struct driver_data
*drv_data
= dev_get_drvdata(dev
);
1329 clk_prepare_enable(drv_data
->ssp
->clk
);
1334 static const struct dev_pm_ops pxa2xx_spi_pm_ops
= {
1335 SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend
, pxa2xx_spi_resume
)
1336 SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend
,
1337 pxa2xx_spi_runtime_resume
, NULL
)
1340 static struct platform_driver driver
= {
1342 .name
= "pxa2xx-spi",
1343 .owner
= THIS_MODULE
,
1344 .pm
= &pxa2xx_spi_pm_ops
,
1345 .acpi_match_table
= ACPI_PTR(pxa2xx_spi_acpi_match
),
1347 .probe
= pxa2xx_spi_probe
,
1348 .remove
= pxa2xx_spi_remove
,
1349 .shutdown
= pxa2xx_spi_shutdown
,
1352 static int __init
pxa2xx_spi_init(void)
1354 return platform_driver_register(&driver
);
1356 subsys_initcall(pxa2xx_spi_init
);
1358 static void __exit
pxa2xx_spi_exit(void)
1360 platform_driver_unregister(&driver
);
1362 module_exit(pxa2xx_spi_exit
);