1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Freescale eSPI controller driver.
5 * Copyright 2010 Freescale Semiconductor, Inc.
7 #include <linux/delay.h>
9 #include <linux/fsl_devices.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/spi/spi.h>
19 #include <linux/pm_runtime.h>
20 #include <sysdev/fsl_soc.h>
22 /* eSPI Controller registers */
23 #define ESPI_SPMODE 0x00 /* eSPI mode register */
24 #define ESPI_SPIE 0x04 /* eSPI event register */
25 #define ESPI_SPIM 0x08 /* eSPI mask register */
26 #define ESPI_SPCOM 0x0c /* eSPI command register */
27 #define ESPI_SPITF 0x10 /* eSPI transmit FIFO access register*/
28 #define ESPI_SPIRF 0x14 /* eSPI receive FIFO access register*/
29 #define ESPI_SPMODE0 0x20 /* eSPI cs0 mode register */
31 #define ESPI_SPMODEx(x) (ESPI_SPMODE0 + (x) * 4)
33 /* eSPI Controller mode register definitions */
34 #define SPMODE_ENABLE BIT(31)
35 #define SPMODE_LOOP BIT(30)
36 #define SPMODE_TXTHR(x) ((x) << 8)
37 #define SPMODE_RXTHR(x) ((x) << 0)
39 /* eSPI Controller CS mode register definitions */
40 #define CSMODE_CI_INACTIVEHIGH BIT(31)
41 #define CSMODE_CP_BEGIN_EDGECLK BIT(30)
42 #define CSMODE_REV BIT(29)
43 #define CSMODE_DIV16 BIT(28)
44 #define CSMODE_PM(x) ((x) << 24)
45 #define CSMODE_POL_1 BIT(20)
46 #define CSMODE_LEN(x) ((x) << 16)
47 #define CSMODE_BEF(x) ((x) << 12)
48 #define CSMODE_AFT(x) ((x) << 8)
49 #define CSMODE_CG(x) ((x) << 3)
51 #define FSL_ESPI_FIFO_SIZE 32
52 #define FSL_ESPI_RXTHR 15
54 /* Default mode/csmode for eSPI controller */
55 #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(FSL_ESPI_RXTHR))
56 #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
57 | CSMODE_AFT(0) | CSMODE_CG(1))
59 /* SPIE register values */
60 #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
61 #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
62 #define SPIE_TXE BIT(15) /* TX FIFO empty */
63 #define SPIE_DON BIT(14) /* TX done */
64 #define SPIE_RXT BIT(13) /* RX FIFO threshold */
65 #define SPIE_RXF BIT(12) /* RX FIFO full */
66 #define SPIE_TXT BIT(11) /* TX FIFO threshold*/
67 #define SPIE_RNE BIT(9) /* RX FIFO not empty */
68 #define SPIE_TNF BIT(8) /* TX FIFO not full */
70 /* SPIM register values */
71 #define SPIM_TXE BIT(15) /* TX FIFO empty */
72 #define SPIM_DON BIT(14) /* TX done */
73 #define SPIM_RXT BIT(13) /* RX FIFO threshold */
74 #define SPIM_RXF BIT(12) /* RX FIFO full */
75 #define SPIM_TXT BIT(11) /* TX FIFO threshold*/
76 #define SPIM_RNE BIT(9) /* RX FIFO not empty */
77 #define SPIM_TNF BIT(8) /* TX FIFO not full */
79 /* SPCOM register values */
80 #define SPCOM_CS(x) ((x) << 30)
81 #define SPCOM_DO BIT(28) /* Dual output */
82 #define SPCOM_TO BIT(27) /* TX only */
83 #define SPCOM_RXSKIP(x) ((x) << 16)
84 #define SPCOM_TRANLEN(x) ((x) << 0)
86 #define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 #define AUTOSUSPEND_TIMEOUT 2000
92 void __iomem
*reg_base
;
94 struct list_head
*m_transfers
;
95 struct spi_transfer
*tx_t
;
98 struct spi_transfer
*rx_t
;
107 u32 spibrg
; /* SPIBRG input clock */
109 struct completion done
;
116 static inline u32
fsl_espi_read_reg(struct fsl_espi
*espi
, int offset
)
118 return ioread32be(espi
->reg_base
+ offset
);
121 static inline u16
fsl_espi_read_reg16(struct fsl_espi
*espi
, int offset
)
123 return ioread16be(espi
->reg_base
+ offset
);
126 static inline u8
fsl_espi_read_reg8(struct fsl_espi
*espi
, int offset
)
128 return ioread8(espi
->reg_base
+ offset
);
131 static inline void fsl_espi_write_reg(struct fsl_espi
*espi
, int offset
,
134 iowrite32be(val
, espi
->reg_base
+ offset
);
137 static inline void fsl_espi_write_reg16(struct fsl_espi
*espi
, int offset
,
140 iowrite16be(val
, espi
->reg_base
+ offset
);
143 static inline void fsl_espi_write_reg8(struct fsl_espi
*espi
, int offset
,
146 iowrite8(val
, espi
->reg_base
+ offset
);
149 static int fsl_espi_check_message(struct spi_message
*m
)
151 struct fsl_espi
*espi
= spi_master_get_devdata(m
->spi
->master
);
152 struct spi_transfer
*t
, *first
;
154 if (m
->frame_length
> SPCOM_TRANLEN_MAX
) {
155 dev_err(espi
->dev
, "message too long, size is %u bytes\n",
160 first
= list_first_entry(&m
->transfers
, struct spi_transfer
,
163 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
164 if (first
->bits_per_word
!= t
->bits_per_word
||
165 first
->speed_hz
!= t
->speed_hz
) {
166 dev_err(espi
->dev
, "bits_per_word/speed_hz should be the same for all transfers\n");
171 /* ESPI supports MSB-first transfers for word size 8 / 16 only */
172 if (!(m
->spi
->mode
& SPI_LSB_FIRST
) && first
->bits_per_word
!= 8 &&
173 first
->bits_per_word
!= 16) {
175 "MSB-first transfer not supported for wordsize %u\n",
176 first
->bits_per_word
);
183 static unsigned int fsl_espi_check_rxskip_mode(struct spi_message
*m
)
185 struct spi_transfer
*t
;
186 unsigned int i
= 0, rxskip
= 0;
189 * prerequisites for ESPI rxskip mode:
190 * - message has two transfers
191 * - first transfer is a write and second is a read
193 * In addition the current low-level transfer mechanism requires
194 * that the rxskip bytes fit into the TX FIFO. Else the transfer
195 * would hang because after the first FSL_ESPI_FIFO_SIZE bytes
196 * the TX FIFO isn't re-filled.
198 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
200 if (!t
->tx_buf
|| t
->rx_buf
||
201 t
->len
> FSL_ESPI_FIFO_SIZE
)
205 if (t
->tx_buf
|| !t
->rx_buf
)
211 return i
== 2 ? rxskip
: 0;
214 static void fsl_espi_fill_tx_fifo(struct fsl_espi
*espi
, u32 events
)
217 unsigned int tx_left
;
220 /* if events is zero transfer has not started and tx fifo is empty */
221 tx_fifo_avail
= events
? SPIE_TXCNT(events
) : FSL_ESPI_FIFO_SIZE
;
223 tx_left
= espi
->tx_t
->len
- espi
->tx_pos
;
224 tx_buf
= espi
->tx_t
->tx_buf
;
225 while (tx_fifo_avail
>= min(4U, tx_left
) && tx_left
) {
228 fsl_espi_write_reg(espi
, ESPI_SPITF
, 0);
230 fsl_espi_write_reg(espi
, ESPI_SPITF
,
231 swahb32p(tx_buf
+ espi
->tx_pos
));
233 fsl_espi_write_reg(espi
, ESPI_SPITF
,
234 *(u32
*)(tx_buf
+ espi
->tx_pos
));
238 } else if (tx_left
>= 2 && tx_buf
&& espi
->swab
) {
239 fsl_espi_write_reg16(espi
, ESPI_SPITF
,
240 swab16p(tx_buf
+ espi
->tx_pos
));
246 fsl_espi_write_reg8(espi
, ESPI_SPITF
, 0);
248 fsl_espi_write_reg8(espi
, ESPI_SPITF
,
249 *(u8
*)(tx_buf
+ espi
->tx_pos
));
257 /* Last transfer finished, in rxskip mode only one is needed */
258 if (list_is_last(&espi
->tx_t
->transfer_list
,
259 espi
->m_transfers
) || espi
->rxskip
) {
260 espi
->tx_done
= true;
263 espi
->tx_t
= list_next_entry(espi
->tx_t
, transfer_list
);
265 /* continue with next transfer if tx fifo is not full */
271 static void fsl_espi_read_rx_fifo(struct fsl_espi
*espi
, u32 events
)
273 u32 rx_fifo_avail
= SPIE_RXCNT(events
);
274 unsigned int rx_left
;
278 rx_left
= espi
->rx_t
->len
- espi
->rx_pos
;
279 rx_buf
= espi
->rx_t
->rx_buf
;
280 while (rx_fifo_avail
>= min(4U, rx_left
) && rx_left
) {
282 u32 val
= fsl_espi_read_reg(espi
, ESPI_SPIRF
);
284 if (rx_buf
&& espi
->swab
)
285 *(u32
*)(rx_buf
+ espi
->rx_pos
) = swahb32(val
);
287 *(u32
*)(rx_buf
+ espi
->rx_pos
) = val
;
291 } else if (rx_left
>= 2 && rx_buf
&& espi
->swab
) {
292 u16 val
= fsl_espi_read_reg16(espi
, ESPI_SPIRF
);
294 *(u16
*)(rx_buf
+ espi
->rx_pos
) = swab16(val
);
299 u8 val
= fsl_espi_read_reg8(espi
, ESPI_SPIRF
);
302 *(u8
*)(rx_buf
+ espi
->rx_pos
) = val
;
310 if (list_is_last(&espi
->rx_t
->transfer_list
,
311 espi
->m_transfers
)) {
312 espi
->rx_done
= true;
315 espi
->rx_t
= list_next_entry(espi
->rx_t
, transfer_list
);
317 /* continue with next transfer if rx fifo is not empty */
323 static void fsl_espi_setup_transfer(struct spi_device
*spi
,
324 struct spi_transfer
*t
)
326 struct fsl_espi
*espi
= spi_master_get_devdata(spi
->master
);
327 int bits_per_word
= t
? t
->bits_per_word
: spi
->bits_per_word
;
328 u32 pm
, hz
= t
? t
->speed_hz
: spi
->max_speed_hz
;
329 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
330 u32 hw_mode_old
= cs
->hw_mode
;
332 /* mask out bits we are going to set */
333 cs
->hw_mode
&= ~(CSMODE_LEN(0xF) | CSMODE_DIV16
| CSMODE_PM(0xF));
335 cs
->hw_mode
|= CSMODE_LEN(bits_per_word
- 1);
337 pm
= DIV_ROUND_UP(espi
->spibrg
, hz
* 4) - 1;
340 cs
->hw_mode
|= CSMODE_DIV16
;
341 pm
= DIV_ROUND_UP(espi
->spibrg
, hz
* 16 * 4) - 1;
344 cs
->hw_mode
|= CSMODE_PM(pm
);
346 /* don't write the mode register if the mode doesn't change */
347 if (cs
->hw_mode
!= hw_mode_old
)
348 fsl_espi_write_reg(espi
, ESPI_SPMODEx(spi
->chip_select
),
352 static int fsl_espi_bufs(struct spi_device
*spi
, struct spi_transfer
*t
)
354 struct fsl_espi
*espi
= spi_master_get_devdata(spi
->master
);
355 unsigned int rx_len
= t
->len
;
359 reinit_completion(&espi
->done
);
361 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
362 spcom
= SPCOM_CS(spi
->chip_select
);
363 spcom
|= SPCOM_TRANLEN(t
->len
- 1);
365 /* configure RXSKIP mode */
367 spcom
|= SPCOM_RXSKIP(espi
->rxskip
);
368 rx_len
= t
->len
- espi
->rxskip
;
369 if (t
->rx_nbits
== SPI_NBITS_DUAL
)
373 fsl_espi_write_reg(espi
, ESPI_SPCOM
, spcom
);
375 /* enable interrupts */
377 if (rx_len
> FSL_ESPI_FIFO_SIZE
)
379 fsl_espi_write_reg(espi
, ESPI_SPIM
, mask
);
381 /* Prevent filling the fifo from getting interrupted */
382 spin_lock_irq(&espi
->lock
);
383 fsl_espi_fill_tx_fifo(espi
, 0);
384 spin_unlock_irq(&espi
->lock
);
386 /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
387 ret
= wait_for_completion_timeout(&espi
->done
, 2 * HZ
);
389 dev_err(espi
->dev
, "Transfer timed out!\n");
391 /* disable rx ints */
392 fsl_espi_write_reg(espi
, ESPI_SPIM
, 0);
394 return ret
== 0 ? -ETIMEDOUT
: 0;
397 static int fsl_espi_trans(struct spi_message
*m
, struct spi_transfer
*trans
)
399 struct fsl_espi
*espi
= spi_master_get_devdata(m
->spi
->master
);
400 struct spi_device
*spi
= m
->spi
;
403 /* In case of LSB-first and bits_per_word > 8 byte-swap all words */
404 espi
->swab
= spi
->mode
& SPI_LSB_FIRST
&& trans
->bits_per_word
> 8;
406 espi
->m_transfers
= &m
->transfers
;
407 espi
->tx_t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
410 espi
->tx_done
= false;
411 espi
->rx_t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
414 espi
->rx_done
= false;
416 espi
->rxskip
= fsl_espi_check_rxskip_mode(m
);
417 if (trans
->rx_nbits
== SPI_NBITS_DUAL
&& !espi
->rxskip
) {
418 dev_err(espi
->dev
, "Dual output mode requires RXSKIP mode!\n");
422 /* In RXSKIP mode skip first transfer for reads */
424 espi
->rx_t
= list_next_entry(espi
->rx_t
, transfer_list
);
426 fsl_espi_setup_transfer(spi
, trans
);
428 ret
= fsl_espi_bufs(spi
, trans
);
430 spi_transfer_delay_exec(trans
);
435 static int fsl_espi_do_one_msg(struct spi_master
*master
,
436 struct spi_message
*m
)
438 unsigned int delay_usecs
= 0, rx_nbits
= 0;
439 unsigned int delay_nsecs
= 0, delay_nsecs1
= 0;
440 struct spi_transfer
*t
, trans
= {};
443 ret
= fsl_espi_check_message(m
);
447 list_for_each_entry(t
, &m
->transfers
, transfer_list
) {
448 if (t
->delay_usecs
) {
449 if (t
->delay_usecs
> delay_usecs
) {
450 delay_usecs
= t
->delay_usecs
;
451 delay_nsecs
= delay_usecs
* 1000;
454 delay_nsecs1
= spi_delay_to_ns(&t
->delay
, t
);
455 if (delay_nsecs1
> delay_nsecs
)
456 delay_nsecs
= delay_nsecs1
;
458 if (t
->rx_nbits
> rx_nbits
)
459 rx_nbits
= t
->rx_nbits
;
462 t
= list_first_entry(&m
->transfers
, struct spi_transfer
,
465 trans
.len
= m
->frame_length
;
466 trans
.speed_hz
= t
->speed_hz
;
467 trans
.bits_per_word
= t
->bits_per_word
;
468 trans
.delay
.value
= delay_nsecs
;
469 trans
.delay
.unit
= SPI_DELAY_UNIT_NSECS
;
470 trans
.rx_nbits
= rx_nbits
;
473 ret
= fsl_espi_trans(m
, &trans
);
475 m
->actual_length
= ret
? 0 : trans
.len
;
477 if (m
->status
== -EINPROGRESS
)
480 spi_finalize_current_message(master
);
485 static int fsl_espi_setup(struct spi_device
*spi
)
487 struct fsl_espi
*espi
;
489 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
492 cs
= kzalloc(sizeof(*cs
), GFP_KERNEL
);
495 spi_set_ctldata(spi
, cs
);
498 espi
= spi_master_get_devdata(spi
->master
);
500 pm_runtime_get_sync(espi
->dev
);
502 cs
->hw_mode
= fsl_espi_read_reg(espi
, ESPI_SPMODEx(spi
->chip_select
));
503 /* mask out bits we are going to set */
504 cs
->hw_mode
&= ~(CSMODE_CP_BEGIN_EDGECLK
| CSMODE_CI_INACTIVEHIGH
507 if (spi
->mode
& SPI_CPHA
)
508 cs
->hw_mode
|= CSMODE_CP_BEGIN_EDGECLK
;
509 if (spi
->mode
& SPI_CPOL
)
510 cs
->hw_mode
|= CSMODE_CI_INACTIVEHIGH
;
511 if (!(spi
->mode
& SPI_LSB_FIRST
))
512 cs
->hw_mode
|= CSMODE_REV
;
514 /* Handle the loop mode */
515 loop_mode
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
516 loop_mode
&= ~SPMODE_LOOP
;
517 if (spi
->mode
& SPI_LOOP
)
518 loop_mode
|= SPMODE_LOOP
;
519 fsl_espi_write_reg(espi
, ESPI_SPMODE
, loop_mode
);
521 fsl_espi_setup_transfer(spi
, NULL
);
523 pm_runtime_mark_last_busy(espi
->dev
);
524 pm_runtime_put_autosuspend(espi
->dev
);
529 static void fsl_espi_cleanup(struct spi_device
*spi
)
531 struct fsl_espi_cs
*cs
= spi_get_ctldata(spi
);
534 spi_set_ctldata(spi
, NULL
);
537 static void fsl_espi_cpu_irq(struct fsl_espi
*espi
, u32 events
)
540 fsl_espi_read_rx_fifo(espi
, events
);
543 fsl_espi_fill_tx_fifo(espi
, events
);
545 if (!espi
->tx_done
|| !espi
->rx_done
)
548 /* we're done, but check for errors before returning */
549 events
= fsl_espi_read_reg(espi
, ESPI_SPIE
);
551 if (!(events
& SPIE_DON
))
553 "Transfer done but SPIE_DON isn't set!\n");
555 if (SPIE_RXCNT(events
) || SPIE_TXCNT(events
) != FSL_ESPI_FIFO_SIZE
) {
556 dev_err(espi
->dev
, "Transfer done but rx/tx fifo's aren't empty!\n");
557 dev_err(espi
->dev
, "SPIE_RXCNT = %d, SPIE_TXCNT = %d\n",
558 SPIE_RXCNT(events
), SPIE_TXCNT(events
));
561 complete(&espi
->done
);
564 static irqreturn_t
fsl_espi_irq(s32 irq
, void *context_data
)
566 struct fsl_espi
*espi
= context_data
;
569 spin_lock(&espi
->lock
);
571 /* Get interrupt events(tx/rx) */
572 events
= fsl_espi_read_reg(espi
, ESPI_SPIE
);
573 mask
= fsl_espi_read_reg(espi
, ESPI_SPIM
);
574 if (!(events
& mask
)) {
575 spin_unlock(&espi
->lock
);
579 dev_vdbg(espi
->dev
, "%s: events %x\n", __func__
, events
);
581 fsl_espi_cpu_irq(espi
, events
);
583 /* Clear the events */
584 fsl_espi_write_reg(espi
, ESPI_SPIE
, events
);
586 spin_unlock(&espi
->lock
);
592 static int fsl_espi_runtime_suspend(struct device
*dev
)
594 struct spi_master
*master
= dev_get_drvdata(dev
);
595 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
598 regval
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
599 regval
&= ~SPMODE_ENABLE
;
600 fsl_espi_write_reg(espi
, ESPI_SPMODE
, regval
);
605 static int fsl_espi_runtime_resume(struct device
*dev
)
607 struct spi_master
*master
= dev_get_drvdata(dev
);
608 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
611 regval
= fsl_espi_read_reg(espi
, ESPI_SPMODE
);
612 regval
|= SPMODE_ENABLE
;
613 fsl_espi_write_reg(espi
, ESPI_SPMODE
, regval
);
619 static size_t fsl_espi_max_message_size(struct spi_device
*spi
)
621 return SPCOM_TRANLEN_MAX
;
624 static void fsl_espi_init_regs(struct device
*dev
, bool initial
)
626 struct spi_master
*master
= dev_get_drvdata(dev
);
627 struct fsl_espi
*espi
= spi_master_get_devdata(master
);
628 struct device_node
*nc
;
629 u32 csmode
, cs
, prop
;
632 /* SPI controller initializations */
633 fsl_espi_write_reg(espi
, ESPI_SPMODE
, 0);
634 fsl_espi_write_reg(espi
, ESPI_SPIM
, 0);
635 fsl_espi_write_reg(espi
, ESPI_SPCOM
, 0);
636 fsl_espi_write_reg(espi
, ESPI_SPIE
, 0xffffffff);
638 /* Init eSPI CS mode register */
639 for_each_available_child_of_node(master
->dev
.of_node
, nc
) {
640 /* get chip select */
641 ret
= of_property_read_u32(nc
, "reg", &cs
);
642 if (ret
|| cs
>= master
->num_chipselect
)
645 csmode
= CSMODE_INIT_VAL
;
647 /* check if CSBEF is set in device tree */
648 ret
= of_property_read_u32(nc
, "fsl,csbef", &prop
);
650 csmode
&= ~(CSMODE_BEF(0xf));
651 csmode
|= CSMODE_BEF(prop
);
654 /* check if CSAFT is set in device tree */
655 ret
= of_property_read_u32(nc
, "fsl,csaft", &prop
);
657 csmode
&= ~(CSMODE_AFT(0xf));
658 csmode
|= CSMODE_AFT(prop
);
661 fsl_espi_write_reg(espi
, ESPI_SPMODEx(cs
), csmode
);
664 dev_info(dev
, "cs=%u, init_csmode=0x%x\n", cs
, csmode
);
667 /* Enable SPI interface */
668 fsl_espi_write_reg(espi
, ESPI_SPMODE
, SPMODE_INIT_VAL
| SPMODE_ENABLE
);
671 static int fsl_espi_probe(struct device
*dev
, struct resource
*mem
,
672 unsigned int irq
, unsigned int num_cs
)
674 struct spi_master
*master
;
675 struct fsl_espi
*espi
;
678 master
= spi_alloc_master(dev
, sizeof(struct fsl_espi
));
682 dev_set_drvdata(dev
, master
);
684 master
->mode_bits
= SPI_RX_DUAL
| SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
|
685 SPI_LSB_FIRST
| SPI_LOOP
;
686 master
->dev
.of_node
= dev
->of_node
;
687 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(4, 16);
688 master
->setup
= fsl_espi_setup
;
689 master
->cleanup
= fsl_espi_cleanup
;
690 master
->transfer_one_message
= fsl_espi_do_one_msg
;
691 master
->auto_runtime_pm
= true;
692 master
->max_message_size
= fsl_espi_max_message_size
;
693 master
->num_chipselect
= num_cs
;
695 espi
= spi_master_get_devdata(master
);
696 spin_lock_init(&espi
->lock
);
699 espi
->spibrg
= fsl_get_sys_freq();
700 if (espi
->spibrg
== -1) {
701 dev_err(dev
, "Can't get sys frequency!\n");
705 /* determined by clock divider fields DIV16/PM in register SPMODEx */
706 master
->min_speed_hz
= DIV_ROUND_UP(espi
->spibrg
, 4 * 16 * 16);
707 master
->max_speed_hz
= DIV_ROUND_UP(espi
->spibrg
, 4);
709 init_completion(&espi
->done
);
711 espi
->reg_base
= devm_ioremap_resource(dev
, mem
);
712 if (IS_ERR(espi
->reg_base
)) {
713 ret
= PTR_ERR(espi
->reg_base
);
717 /* Register for SPI Interrupt */
718 ret
= devm_request_irq(dev
, irq
, fsl_espi_irq
, 0, "fsl_espi", espi
);
722 fsl_espi_init_regs(dev
, true);
724 pm_runtime_set_autosuspend_delay(dev
, AUTOSUSPEND_TIMEOUT
);
725 pm_runtime_use_autosuspend(dev
);
726 pm_runtime_set_active(dev
);
727 pm_runtime_enable(dev
);
728 pm_runtime_get_sync(dev
);
730 ret
= devm_spi_register_master(dev
, master
);
734 dev_info(dev
, "irq = %u\n", irq
);
736 pm_runtime_mark_last_busy(dev
);
737 pm_runtime_put_autosuspend(dev
);
742 pm_runtime_put_noidle(dev
);
743 pm_runtime_disable(dev
);
744 pm_runtime_set_suspended(dev
);
746 spi_master_put(master
);
750 static int of_fsl_espi_get_chipselects(struct device
*dev
)
752 struct device_node
*np
= dev
->of_node
;
756 ret
= of_property_read_u32(np
, "fsl,espi-num-chipselects", &num_cs
);
758 dev_err(dev
, "No 'fsl,espi-num-chipselects' property\n");
765 static int of_fsl_espi_probe(struct platform_device
*ofdev
)
767 struct device
*dev
= &ofdev
->dev
;
768 struct device_node
*np
= ofdev
->dev
.of_node
;
770 unsigned int irq
, num_cs
;
773 if (of_property_read_bool(np
, "mode")) {
774 dev_err(dev
, "mode property is not supported on ESPI!\n");
778 num_cs
= of_fsl_espi_get_chipselects(dev
);
782 ret
= of_address_to_resource(np
, 0, &mem
);
786 irq
= irq_of_parse_and_map(np
, 0);
790 return fsl_espi_probe(dev
, &mem
, irq
, num_cs
);
793 static int of_fsl_espi_remove(struct platform_device
*dev
)
795 pm_runtime_disable(&dev
->dev
);
800 #ifdef CONFIG_PM_SLEEP
801 static int of_fsl_espi_suspend(struct device
*dev
)
803 struct spi_master
*master
= dev_get_drvdata(dev
);
806 ret
= spi_master_suspend(master
);
810 return pm_runtime_force_suspend(dev
);
813 static int of_fsl_espi_resume(struct device
*dev
)
815 struct spi_master
*master
= dev_get_drvdata(dev
);
818 fsl_espi_init_regs(dev
, false);
820 ret
= pm_runtime_force_resume(dev
);
824 return spi_master_resume(master
);
826 #endif /* CONFIG_PM_SLEEP */
828 static const struct dev_pm_ops espi_pm
= {
829 SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend
,
830 fsl_espi_runtime_resume
, NULL
)
831 SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend
, of_fsl_espi_resume
)
834 static const struct of_device_id of_fsl_espi_match
[] = {
835 { .compatible
= "fsl,mpc8536-espi" },
838 MODULE_DEVICE_TABLE(of
, of_fsl_espi_match
);
840 static struct platform_driver fsl_espi_driver
= {
843 .of_match_table
= of_fsl_espi_match
,
846 .probe
= of_fsl_espi_probe
,
847 .remove
= of_fsl_espi_remove
,
849 module_platform_driver(fsl_espi_driver
);
851 MODULE_AUTHOR("Mingkai Hu");
852 MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
853 MODULE_LICENSE("GPL");