2 * Freescale eSPI controller driver.
4 * Copyright 2010 Freescale Semiconductor, Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/fsl_devices.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/spi/spi.h>
23 #include <linux/pm_runtime.h>
24 #include <sysdev/fsl_soc.h>
26 /* eSPI Controller registers */
27 #define ESPI_SPMODE 0x00 /* eSPI mode register */
28 #define ESPI_SPIE 0x04 /* eSPI event register */
29 #define ESPI_SPIM 0x08 /* eSPI mask register */
30 #define ESPI_SPCOM 0x0c /* eSPI command register */
31 #define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
32 #define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
33 #define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
35 #define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
37 /* eSPI Controller mode register definitions */
38 #define SPMODE_ENABLE BIT(31)
39 #define SPMODE_LOOP BIT(30)
40 #define SPMODE_TXTHR(x) ((x) << 8)
41 #define SPMODE_RXTHR(x) ((x) << 0)
43 /* eSPI Controller CS mode register definitions */
44 #define CSMODE_CI_INACTIVEHIGH BIT(31)
45 #define CSMODE_CP_BEGIN_EDGECLK BIT(30)
46 #define CSMODE_REV BIT(29)
47 #define CSMODE_DIV16 BIT(28)
48 #define CSMODE_PM(x) ((x) << 24)
49 #define CSMODE_POL_1 BIT(20)
50 #define CSMODE_LEN(x) ((x) << 16)
51 #define CSMODE_BEF(x) ((x) << 12)
52 #define CSMODE_AFT(x) ((x) << 8)
53 #define CSMODE_CG(x) ((x) << 3)
55 #define FSL_ESPI_FIFO_SIZE 32
56 #define FSL_ESPI_RXTHR 15
58 /* Default mode/csmode for eSPI controller */
59 #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
60 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
61 | CSMODE_AFT(0) | CSMODE_CG(1))
63 /* SPIE register values */
64 #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
65 #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
66 #define SPIE_TXE BIT(15) /* TX FIFO empty */
67 #define SPIE_DON BIT(14) /* TX done */
68 #define SPIE_RXT BIT(13) /* RX FIFO threshold */
69 #define SPIE_RXF BIT(12) /* RX FIFO full */
70 #define SPIE_TXT BIT(11) /* TX FIFO threshold*/
71 #define SPIE_RNE BIT(9) /* RX FIFO not empty */
72 #define SPIE_TNF BIT(8) /* TX FIFO not full */
74 /* SPIM register values */
75 #define SPIM_TXE BIT(15) /* TX FIFO empty */
76 #define SPIM_DON BIT(14) /* TX done */
77 #define SPIM_RXT BIT(13) /* RX FIFO threshold */
78 #define SPIM_RXF BIT(12) /* RX FIFO full */
79 #define SPIM_TXT BIT(11) /* TX FIFO threshold*/
80 #define SPIM_RNE BIT(9) /* RX FIFO not empty */
81 #define SPIM_TNF BIT(8) /* TX FIFO not full */
83 /* SPCOM register values */
84 #define SPCOM_CS(x) ((x) << 30)
85 #define SPCOM_DO BIT(28) /* Dual output */
86 #define SPCOM_TO BIT(27) /* TX only */
87 #define SPCOM_RXSKIP(x) ((x) << 16)
88 #define SPCOM_TRANLEN(x) ((x) << 0)
90 #define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
92 #define AUTOSUSPEND_TIMEOUT 2000
96 void __iomem
*reg_base
;
98 struct list_head
*m_transfers
;
99 struct spi_transfer
*tx_t
;
102 struct spi_transfer
*rx_t
;
111 u32 spibrg
; /* SPIBRG input clock */
113 struct completion done
;
120 static inline u32
fsl_espi_read_reg(struct fsl_espi
*espi
, int offset
)
122 return ioread32be(espi
->reg_base
+ offset
);
125 static inline u16
fsl_espi_read_reg16(struct fsl_espi
*espi
, int offset
)
127 return ioread16be(espi
->reg_base
+ offset
);
130 static inline u8
fsl_espi_read_reg8(struct fsl_espi
*espi
, int offset
)
132 return ioread8(espi
->reg_base
+ offset
);
135 static inline void fsl_espi_write_reg(struct fsl_espi
*espi
, int offset
,
138 iowrite32be(val
, espi
->reg_base
+ offset
);
141 static inline void fsl_espi_write_reg16(struct fsl_espi
*espi
, int offset
,
144 iowrite16be(val
, espi
->reg_base
+ offset
);
147 static inline void fsl_espi_write_reg8(struct fsl_espi
*espi
, int offset
,
150 iowrite8(val
, espi
->reg_base
+ offset
);
153 static int fsl_espi_check_message(struct spi_message
*m
)
155 struct fsl_espi
*espi
= spi_master_get_devdata(m
->spi
->master
);
156 struct spi_transfer
*t
, *first
;
158 if (m
->frame_length
> SPCOM_TRANLEN_MAX
) {
159 dev_err(espi
->dev
, "message too long, size is %u bytes\n",
164 first
= list_first_entry(&m
->transfers
, struct spi_transfer
,
167 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
168 if (first
->bits_per_word
!= t
->bits_per_word
||
169 first
->speed_hz
!= t
->speed_hz
) {
170 dev_err(espi
->dev
, "bits_per_word/speed_hz should be the same for all transfers\n");
175 /* ESPI supports MSB-first transfers for word size 8 / 16 only */
176 if (!(m
->spi
->mode
& SPI_LSB_FIRST
) && first
->bits_per_word
!= 8 &&
177 first
->bits_per_word
!= 16) {
179 "MSB-first transfer not supported for wordsize %u\n",
180 first
->bits_per_word
);
187 static unsigned int fsl_espi_check_rxskip_mode(struct spi_message
*m
)
189 struct spi_transfer
*t
;
190 unsigned int i
= 0, rxskip
= 0;
193 * prerequisites for ESPI rxskip mode:
194 * - message has two transfers
195 * - first transfer is a write and second is a read
197 * In addition the current low-level transfer mechanism requires
198 * that the rxskip bytes fit into the TX FIFO. Else the transfer
199 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
200 * the TX FIFO isn't re-filled.
202 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
204 if (!t
->tx_buf
|| t
->rx_buf
||
205 t
->len
> FSL_ESPI_FIFO_SIZE
)
209 if (t
->tx_buf
|| !t
->rx_buf
)
215 return i
== 2 ? rxskip
: 0;
218 static void fsl_espi_fill_tx_fifo(struct fsl_espi
*espi
, u32 events
)
221 unsigned int tx_left
;
224 /* if events is zero transfer has not started and tx fifo is empty */
225 tx_fifo_avail
= events
? SPIE_TXCNT(events
) : FSL_ESPI_FIFO_SIZE
;
227 tx_left
= espi
->tx_t
->len
- espi
->tx_pos
;
228 tx_buf
= espi
->tx_t
->tx_buf
;
229 while (tx_fifo_avail
>= min(4U, tx_left
) && tx_left
) {
232 fsl_espi_write_reg(espi
, ESPI_SPITF
, 0);
234 fsl_espi_write_reg(espi
, ESPI_SPITF
,
235 swahb32p(tx_buf
+ espi
->tx_pos
));
237 fsl_espi_write_reg(espi
, ESPI_SPITF
,
238 *(u32
*)(tx_buf
+ espi
->tx_pos
));
242 } else if (tx_left
>= 2 && tx_buf
&& espi
->swab
) {
243 fsl_espi_write_reg16(espi
, ESPI_SPITF
,
244 swab16p(tx_buf
+ espi
->tx_pos
));
250 fsl_espi_write_reg8(espi
, ESPI_SPITF
, 0);
252 fsl_espi_write_reg8(espi
, ESPI_SPITF
,
253 *(u8
*)(tx_buf
+ espi
->tx_pos
));
261 /* Last transfer finished, in rxskip mode only one is needed */
262 if (list_is_last(&espi
->tx_t
->transfer_list
,
263 espi
->m_transfers
) || espi
->rxskip
) {
264 espi
->tx_done
= true;
267 espi
->tx_t
= list_next_entry(espi
->tx_t
, transfer_list
);
269 /* continue with next transfer if tx fifo is not full */
275 static void fsl_espi_read_rx_fifo(struct fsl_espi
*espi
, u32 events
)
277 u32 rx_fifo_avail
= SPIE_RXCNT(events
);
278 unsigned int rx_left
;
282 rx_left
= espi
->rx_t
->len
- espi
->rx_pos
;
283 rx_buf
= espi
->rx_t
->rx_buf
;
284 while (rx_fifo_avail
>= min(4U, rx_left
) && rx_left
) {
286 u32 val
= fsl_espi_read_reg(espi
, ESPI_SPIRF
);
288 if (rx_buf
&& espi
->swab
)
289 *(u32
*)(rx_buf
+ espi
->rx_pos
) = swahb32(val
);
291 *(u32
*)(rx_buf
+ espi
->rx_pos
) = val
;
295 } else if (rx_left
>= 2 && rx_buf
&& espi
->swab
) {
296 u16 val
= fsl_espi_read_reg16(espi
, ESPI_SPIRF
);
298 *(u16
*)(rx_buf
+ espi
->rx_pos
) = swab16(val
);
303 u8 val
= fsl_espi_read_reg8(espi
, ESPI_SPIRF
);
306 *(u8
*)(rx_buf
+ espi
->rx_pos
) = val
;
314 if (list_is_last(&espi
->rx_t
->transfer_list
,
315 espi
->m_transfers
)) {
316 espi
->rx_done
= true;
319 espi
->rx_t
= list_next_entry(espi
->rx_t
, transfer_list
);
321 /* continue with next transfer if rx fifo is not empty */
327 static void fsl_espi_setup_transfer(struct spi_device
*spi
,
328 struct spi_transfer
*t
)
330 struct fsl_espi
*espi
= spi_master_get_devdata(spi
->master
);
331 int bits_per_word
= t
? t
->bits_per_word
: spi
->bits_per_word
;
332 u32 pm
, hz
= t
? t
->speed_hz
: spi
->max_speed_hz
;
333 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
334 u32 hw_mode_old
= cs
->hw_mode
;
336 /* mask out bits we are going to set */
337 cs
->hw_mode
&= ~(CSMODE_LEN(0xF) | CSMODE_DIV16
| CSMODE_PM(0xF));
339 cs
->hw_mode
|= CSMODE_LEN(bits_per_word
- 1);
341 pm
= DIV_ROUND_UP(espi
->spibrg
, hz
* 4) - 1;
344 cs
->hw_mode
|= CSMODE_DIV16
;
345 pm
= DIV_ROUND_UP(espi
->spibrg
, hz
* 16 * 4) - 1;
348 cs
->hw_mode
|= CSMODE_PM(pm
);
350 /* don't write the mode register if the mode doesn't change */
351 if (cs
->hw_mode
!= hw_mode_old
)
352 fsl_espi_write_reg(espi
, ESPI_SPMODEx(spi
->chip_select
),
356 static int fsl_espi_bufs(struct spi_device
*spi
, struct spi_transfer
*t
)
358 struct fsl_espi
*espi
= spi_master_get_devdata(spi
->master
);
359 unsigned int rx_len
= t
->len
;
363 reinit_completion(&espi
->done
);
365 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
366 spcom
= SPCOM_CS(spi
->chip_select
);
367 spcom
|= SPCOM_TRANLEN(t
->len
- 1);
369 /* configure RXSKIP mode */
371 spcom
|= SPCOM_RXSKIP(espi
->rxskip
);
372 rx_len
= t
->len
- espi
->rxskip
;
373 if (t
->rx_nbits
== SPI_NBITS_DUAL
)
377 fsl_espi_write_reg(espi
, ESPI_SPCOM
, spcom
);
379 /* enable interrupts */
381 if (rx_len
> FSL_ESPI_FIFO_SIZE
)
383 fsl_espi_write_reg(espi
, ESPI_SPIM
, mask
);
385 /* Prevent filling the fifo from getting interrupted */
386 spin_lock_irq(&espi
->lock
);
387 fsl_espi_fill_tx_fifo(espi
, 0);
388 spin_unlock_irq(&espi
->lock
);
390 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
391 ret
= wait_for_completion_timeout(&espi
->done
, 2 * HZ
);
393 dev_err(espi
->dev
, "Transfer timed out!\n");
395 /* disable rx ints */
396 fsl_espi_write_reg(espi
, ESPI_SPIM
, 0);
398 return ret
== 0 ? -ETIMEDOUT
: 0;
401 static int fsl_espi_trans(struct spi_message
*m
, struct spi_transfer
*trans
)
403 struct fsl_espi
*espi
= spi_master_get_devdata(m
->spi
->master
);
404 struct spi_device
*spi
= m
->spi
;
407 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
408 espi
->swab
= spi
->mode
& SPI_LSB_FIRST
&& trans
->bits_per_word
> 8;
410 espi
->m_transfers
= &m
->transfers
;
411 espi
->tx_t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
414 espi
->tx_done
= false;
415 espi
->rx_t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
418 espi
->rx_done
= false;
420 espi
->rxskip
= fsl_espi_check_rxskip_mode(m
);
421 if (trans
->rx_nbits
== SPI_NBITS_DUAL
&& !espi
->rxskip
) {
422 dev_err(espi
->dev
, "Dual output mode requires RXSKIP mode!\n");
426 /* In RXSKIP mode skip first transfer for reads */
428 espi
->rx_t
= list_next_entry(espi
->rx_t
, transfer_list
);
430 fsl_espi_setup_transfer(spi
, trans
);
432 ret
= fsl_espi_bufs(spi
, trans
);
434 if (trans
->delay_usecs
)
435 udelay(trans
->delay_usecs
);
440 static int fsl_espi_do_one_msg(struct spi_master
*master
,
441 struct spi_message
*m
)
443 unsigned int delay_usecs
= 0, rx_nbits
= 0;
444 struct spi_transfer
*t
, trans
= {};
447 ret
= fsl_espi_check_message(m
);
451 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
452 if (t
->delay_usecs
> delay_usecs
)
453 delay_usecs
= t
->delay_usecs
;
454 if (t
->rx_nbits
> rx_nbits
)
455 rx_nbits
= t
->rx_nbits
;
458 t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
461 trans
.len
= m
->frame_length
;
462 trans
.speed_hz
= t
->speed_hz
;
463 trans
.bits_per_word
= t
->bits_per_word
;
464 trans
.delay_usecs
= delay_usecs
;
465 trans
.rx_nbits
= rx_nbits
;
468 ret
= fsl_espi_trans(m
, &trans
);
470 m
->actual_length
= ret
? 0 : trans
.len
;
472 if (m
->status
== -EINPROGRESS
)
475 spi_finalize_current_message(master
);
480 static int fsl_espi_setup(struct spi_device
*spi
)
482 struct fsl_espi
*espi
;
484 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
487 cs
= kzalloc(sizeof(*cs
), GFP_KERNEL
);
490 spi_set_ctldata(spi
, cs
);
493 espi
= spi_master_get_devdata(spi
->master
);
495 pm_runtime_get_sync(espi
->dev
);
497 cs
->hw_mode
= fsl_espi_read_reg(espi
, ESPI_SPMODEx(spi
->chip_select
));
498 /* mask out bits we are going to set */
499 cs
->hw_mode
&= ~(CSMODE_CP_BEGIN_EDGECLK
| CSMODE_CI_INACTIVEHIGH
502 if (spi
->mode
& SPI_CPHA
)
503 cs
->hw_mode
|= CSMODE_CP_BEGIN_EDGECLK
;
504 if (spi
->mode
& SPI_CPOL
)
505 cs
->hw_mode
|= CSMODE_CI_INACTIVEHIGH
;
506 if (!(spi
->mode
& SPI_LSB_FIRST
))
507 cs
->hw_mode
|= CSMODE_REV
;
509 /* Handle the loop mode */
510 loop_mode
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
511 loop_mode
&= ~SPMODE_LOOP
;
512 if (spi
->mode
& SPI_LOOP
)
513 loop_mode
|= SPMODE_LOOP
;
514 fsl_espi_write_reg(espi
, ESPI_SPMODE
, loop_mode
);
516 fsl_espi_setup_transfer(spi
, NULL
);
518 pm_runtime_mark_last_busy(espi
->dev
);
519 pm_runtime_put_autosuspend(espi
->dev
);
524 static void fsl_espi_cleanup(struct spi_device
*spi
)
526 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
529 spi_set_ctldata(spi
, NULL
);
532 static void fsl_espi_cpu_irq(struct fsl_espi
*espi
, u32 events
)
535 fsl_espi_read_rx_fifo(espi
, events
);
538 fsl_espi_fill_tx_fifo(espi
, events
);
540 if (!espi
->tx_done
|| !espi
->rx_done
)
543 /* we're done, but check for errors before returning */
544 events
= fsl_espi_read_reg(espi
, ESPI_SPIE
);
546 if (!(events
& SPIE_DON
))
548 "Transfer done but SPIE_DON isn't set!\n");
550 if (SPIE_RXCNT(events
) || SPIE_TXCNT(events
) != FSL_ESPI_FIFO_SIZE
) {
551 dev_err(espi
->dev
, "Transfer done but rx/tx fifo's aren't empty!\n");
552 dev_err(espi
->dev
, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
553 SPIE_RXCNT(events
), SPIE_TXCNT(events
));
556 complete(&espi
->done
);
559 static irqreturn_t
fsl_espi_irq(s32 irq
, void *context_data
)
561 struct fsl_espi
*espi
= context_data
;
564 spin_lock(&espi
->lock
);
566 /* Get interrupt events(tx/rx) */
567 events
= fsl_espi_read_reg(espi
, ESPI_SPIE
);
569 spin_unlock(&espi
->lock
);
573 dev_vdbg(espi
->dev
, "%s: events %x\n", __func__
, events
);
575 fsl_espi_cpu_irq(espi
, events
);
577 /* Clear the events */
578 fsl_espi_write_reg(espi
, ESPI_SPIE
, events
);
580 spin_unlock(&espi
->lock
);
586 static int fsl_espi_runtime_suspend(struct device
*dev
)
588 struct spi_master
*master
= dev_get_drvdata(dev
);
589 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
592 regval
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
593 regval
&= ~SPMODE_ENABLE
;
594 fsl_espi_write_reg(espi
, ESPI_SPMODE
, regval
);
599 static int fsl_espi_runtime_resume(struct device
*dev
)
601 struct spi_master
*master
= dev_get_drvdata(dev
);
602 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
605 regval
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
606 regval
|= SPMODE_ENABLE
;
607 fsl_espi_write_reg(espi
, ESPI_SPMODE
, regval
);
613 static size_t fsl_espi_max_message_size(struct spi_device
*spi
)
615 return SPCOM_TRANLEN_MAX
;
618 static void fsl_espi_init_regs(struct device
*dev
, bool initial
)
620 struct spi_master
*master
= dev_get_drvdata(dev
);
621 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
622 struct device_node
*nc
;
623 u32 csmode
, cs
, prop
;
626 /* SPI controller initializations */
627 fsl_espi_write_reg(espi
, ESPI_SPMODE
, 0);
628 fsl_espi_write_reg(espi
, ESPI_SPIM
, 0);
629 fsl_espi_write_reg(espi
, ESPI_SPCOM
, 0);
630 fsl_espi_write_reg(espi
, ESPI_SPIE
, 0xffffffff);
632 /* Init eSPI CS mode register */
633 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
634 /* get chip select */
635 ret
= of_property_read_u32(nc
, "reg", &cs
);
636 if (ret
|| cs
>= master
->num_chipselect
)
639 csmode
= CSMODE_INIT_VAL
;
641 /* check if CSBEF is set in device tree */
642 ret
= of_property_read_u32(nc
, "fsl,csbef", &prop
);
644 csmode
&= ~(CSMODE_BEF(0xf));
645 csmode
|= CSMODE_BEF(prop
);
648 /* check if CSAFT is set in device tree */
649 ret
= of_property_read_u32(nc
, "fsl,csaft", &prop
);
651 csmode
&= ~(CSMODE_AFT(0xf));
652 csmode
|= CSMODE_AFT(prop
);
655 fsl_espi_write_reg(espi
, ESPI_SPMODEx(cs
), csmode
);
658 dev_info(dev
, "cs=%u, init_csmode=0x%x\n", cs
, csmode
);
661 /* Enable SPI interface */
662 fsl_espi_write_reg(espi
, ESPI_SPMODE
, SPMODE_INIT_VAL
| SPMODE_ENABLE
);
665 static int fsl_espi_probe(struct device
*dev
, struct resource
*mem
,
666 unsigned int irq
, unsigned int num_cs
)
668 struct spi_master
*master
;
669 struct fsl_espi
*espi
;
672 master
= spi_alloc_master(dev
, sizeof(struct fsl_espi
));
676 dev_set_drvdata(dev
, master
);
678 master
->mode_bits
= SPI_RX_DUAL
| SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
|
679 SPI_LSB_FIRST
| SPI_LOOP
;
680 master
->dev
.of_node
= dev
->of_node
;
681 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
682 master
->setup
= fsl_espi_setup
;
683 master
->cleanup
= fsl_espi_cleanup
;
684 master
->transfer_one_message
= fsl_espi_do_one_msg
;
685 master
->auto_runtime_pm
= true;
686 master
->max_message_size
= fsl_espi_max_message_size
;
687 master
->num_chipselect
= num_cs
;
689 espi
= spi_master_get_devdata(master
);
690 spin_lock_init(&espi
->lock
);
693 espi
->spibrg
= fsl_get_sys_freq();
694 if (espi
->spibrg
== -1) {
695 dev_err(dev
, "Can't get sys frequency!\n");
699 /* determined by clock divider fields DIV16/PM in register SPMODEx */
700 master
->min_speed_hz
= DIV_ROUND_UP(espi
->spibrg
, 4 * 16 * 16);
701 master
->max_speed_hz
= DIV_ROUND_UP(espi
->spibrg
, 4);
703 init_completion(&espi
->done
);
705 espi
->reg_base
= devm_ioremap_resource(dev
, mem
);
706 if (IS_ERR(espi
->reg_base
)) {
707 ret
= PTR_ERR(espi
->reg_base
);
711 /* Register for SPI Interrupt */
712 ret
= devm_request_irq(dev
, irq
, fsl_espi_irq
, 0, "fsl_espi", espi
);
716 fsl_espi_init_regs(dev
, true);
718 pm_runtime_set_autosuspend_delay(dev
, AUTOSUSPEND_TIMEOUT
);
719 pm_runtime_use_autosuspend(dev
);
720 pm_runtime_set_active(dev
);
721 pm_runtime_enable(dev
);
722 pm_runtime_get_sync(dev
);
724 ret
= devm_spi_register_master(dev
, master
);
728 dev_info(dev
, "at 0x%p (irq = %u)\n", espi
->reg_base
, irq
);
730 pm_runtime_mark_last_busy(dev
);
731 pm_runtime_put_autosuspend(dev
);
736 pm_runtime_put_noidle(dev
);
737 pm_runtime_disable(dev
);
738 pm_runtime_set_suspended(dev
);
740 spi_master_put(master
);
744 static int of_fsl_espi_get_chipselects(struct device
*dev
)
746 struct device_node
*np
= dev
->of_node
;
750 ret
= of_property_read_u32(np
, "fsl,espi-num-chipselects", &num_cs
);
752 dev_err(dev
, "No 'fsl,espi-num-chipselects' property\n");
759 static int of_fsl_espi_probe(struct platform_device
*ofdev
)
761 struct device
*dev
= &ofdev
->dev
;
762 struct device_node
*np
= ofdev
->dev
.of_node
;
764 unsigned int irq
, num_cs
;
767 if (of_property_read_bool(np
, "mode")) {
768 dev_err(dev
, "mode property is not supported on ESPI!\n");
772 num_cs
= of_fsl_espi_get_chipselects(dev
);
776 ret
= of_address_to_resource(np
, 0, &mem
);
780 irq
= irq_of_parse_and_map(np
, 0);
784 return fsl_espi_probe(dev
, &mem
, irq
, num_cs
);
787 static int of_fsl_espi_remove(struct platform_device
*dev
)
789 pm_runtime_disable(&dev
->dev
);
794 #ifdef CONFIG_PM_SLEEP
795 static int of_fsl_espi_suspend(struct device
*dev
)
797 struct spi_master
*master
= dev_get_drvdata(dev
);
800 ret
= spi_master_suspend(master
);
802 dev_warn(dev
, "cannot suspend master\n");
806 return pm_runtime_force_suspend(dev
);
809 static int of_fsl_espi_resume(struct device
*dev
)
811 struct spi_master
*master
= dev_get_drvdata(dev
);
814 fsl_espi_init_regs(dev
, false);
816 ret
= pm_runtime_force_resume(dev
);
820 return spi_master_resume(master
);
822 #endif /* CONFIG_PM_SLEEP */
824 static const struct dev_pm_ops espi_pm
= {
825 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend
,
826 fsl_espi_runtime_resume
, NULL
)
827 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend
, of_fsl_espi_resume
)
830 static const struct of_device_id of_fsl_espi_match
[] = {
831 { .compatible
= "fsl,mpc8536-espi" },
834 MODULE_DEVICE_TABLE(of
, of_fsl_espi_match
);
836 static struct platform_driver fsl_espi_driver
= {
839 .of_match_table
= of_fsl_espi_match
,
842 .probe
= of_fsl_espi_probe
,
843 .remove
= of_fsl_espi_remove
,
845 module_platform_driver(fsl_espi_driver
);
847 MODULE_AUTHOR("Mingkai Hu");
848 MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
849 MODULE_LICENSE("GPL");