2 * Driver for Broadcom BRCMSTB, NSP, NS2, Cygnus SPI Controllers
4 * Copyright 2016 Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation (the "GPL").
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 (GPLv2) for more details.
15 * You should have received a copy of the GNU General Public License
16 * version 2 (GPLv2) along with this source code.
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
29 #include <linux/of_irq.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/spi/spi.h>
33 #include <linux/spi/spi-mem.h>
34 #include <linux/sysfs.h>
35 #include <linux/types.h>
36 #include "spi-bcm-qspi.h"
38 #define DRIVER_NAME "bcm_qspi"
41 /* BSPI register offsets */
42 #define BSPI_REVISION_ID 0x000
43 #define BSPI_SCRATCH 0x004
44 #define BSPI_MAST_N_BOOT_CTRL 0x008
45 #define BSPI_BUSY_STATUS 0x00c
46 #define BSPI_INTR_STATUS 0x010
47 #define BSPI_B0_STATUS 0x014
48 #define BSPI_B0_CTRL 0x018
49 #define BSPI_B1_STATUS 0x01c
50 #define BSPI_B1_CTRL 0x020
51 #define BSPI_STRAP_OVERRIDE_CTRL 0x024
52 #define BSPI_FLEX_MODE_ENABLE 0x028
53 #define BSPI_BITS_PER_CYCLE 0x02c
54 #define BSPI_BITS_PER_PHASE 0x030
55 #define BSPI_CMD_AND_MODE_BYTE 0x034
56 #define BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
57 #define BSPI_BSPI_XOR_VALUE 0x03c
58 #define BSPI_BSPI_XOR_ENABLE 0x040
59 #define BSPI_BSPI_PIO_MODE_ENABLE 0x044
60 #define BSPI_BSPI_PIO_IODIR 0x048
61 #define BSPI_BSPI_PIO_DATA 0x04c
63 /* RAF register offsets */
64 #define BSPI_RAF_START_ADDR 0x100
65 #define BSPI_RAF_NUM_WORDS 0x104
66 #define BSPI_RAF_CTRL 0x108
67 #define BSPI_RAF_FULLNESS 0x10c
68 #define BSPI_RAF_WATERMARK 0x110
69 #define BSPI_RAF_STATUS 0x114
70 #define BSPI_RAF_READ_DATA 0x118
71 #define BSPI_RAF_WORD_CNT 0x11c
72 #define BSPI_RAF_CURR_ADDR 0x120
74 /* Override mode masks */
75 #define BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE BIT(0)
76 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL BIT(1)
77 #define BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE BIT(2)
78 #define BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD BIT(3)
79 #define BSPI_STRAP_OVERRIDE_CTRL_ENDAIN_MODE BIT(4)
81 #define BSPI_ADDRLEN_3BYTES 3
82 #define BSPI_ADDRLEN_4BYTES 4
84 #define BSPI_RAF_STATUS_FIFO_EMPTY_MASK BIT(1)
86 #define BSPI_RAF_CTRL_START_MASK BIT(0)
87 #define BSPI_RAF_CTRL_CLEAR_MASK BIT(1)
89 #define BSPI_BPP_MODE_SELECT_MASK BIT(8)
90 #define BSPI_BPP_ADDR_SELECT_MASK BIT(16)
92 #define BSPI_READ_LENGTH 256
94 /* MSPI register offsets */
95 #define MSPI_SPCR0_LSB 0x000
96 #define MSPI_SPCR0_MSB 0x004
97 #define MSPI_SPCR1_LSB 0x008
98 #define MSPI_SPCR1_MSB 0x00c
99 #define MSPI_NEWQP 0x010
100 #define MSPI_ENDQP 0x014
101 #define MSPI_SPCR2 0x018
102 #define MSPI_MSPI_STATUS 0x020
103 #define MSPI_CPTQP 0x024
104 #define MSPI_SPCR3 0x028
105 #define MSPI_TXRAM 0x040
106 #define MSPI_RXRAM 0x0c0
107 #define MSPI_CDRAM 0x140
108 #define MSPI_WRITE_LOCK 0x180
110 #define MSPI_MASTER_BIT BIT(7)
112 #define MSPI_NUM_CDRAM 16
113 #define MSPI_CDRAM_CONT_BIT BIT(7)
114 #define MSPI_CDRAM_BITSE_BIT BIT(6)
115 #define MSPI_CDRAM_PCS 0xf
117 #define MSPI_SPCR2_SPE BIT(6)
118 #define MSPI_SPCR2_CONT_AFTER_CMD BIT(7)
120 #define MSPI_MSPI_STATUS_SPIF BIT(0)
122 #define INTR_BASE_BIT_SHIFT 0x02
123 #define INTR_COUNT 0x07
125 #define NUM_CHIPSELECT 4
126 #define QSPI_SPBR_MIN 8U
127 #define QSPI_SPBR_MAX 255U
129 #define OPCODE_DIOR 0xBB
130 #define OPCODE_QIOR 0xEB
131 #define OPCODE_DIOR_4B 0xBC
132 #define OPCODE_QIOR_4B 0xEC
134 #define MAX_CMD_SIZE 6
136 #define ADDR_4MB_MASK GENMASK(22, 0)
138 /* stop at end of transfer, no other reason */
139 #define TRANS_STATUS_BREAK_NONE 0
140 /* stop at end of spi_message */
141 #define TRANS_STATUS_BREAK_EOM 1
142 /* stop at end of spi_transfer if delay */
143 #define TRANS_STATUS_BREAK_DELAY 2
144 /* stop at end of spi_transfer if cs_change */
145 #define TRANS_STATUS_BREAK_CS_CHANGE 4
146 /* stop if we run out of bytes */
147 #define TRANS_STATUS_BREAK_NO_BYTES 8
149 /* events that make us stop filling TX slots */
150 #define TRANS_STATUS_BREAK_TX (TRANS_STATUS_BREAK_EOM | \
151 TRANS_STATUS_BREAK_DELAY | \
152 TRANS_STATUS_BREAK_CS_CHANGE)
154 /* events that make us deassert CS */
155 #define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
156 TRANS_STATUS_BREAK_CS_CHANGE)
158 struct bcm_qspi_parms
{
164 struct bcm_xfer_mode
{
167 unsigned int addrlen
;
183 struct bcm_qspi_irq
{
184 const char *irq_name
;
185 const irq_handler_t irq_handler
;
190 struct bcm_qspi_dev_id
{
191 const struct bcm_qspi_irq
*irqp
;
197 struct spi_transfer
*trans
;
199 bool mspi_last_trans
;
203 struct platform_device
*pdev
;
204 struct spi_master
*master
;
208 void __iomem
*base
[BASEMAX
];
210 /* Some SoCs provide custom interrupt status register(s) */
211 struct bcm_qspi_soc_intc
*soc_intc
;
213 struct bcm_qspi_parms last_parms
;
214 struct qspi_trans trans_pos
;
219 const struct spi_mem_op
*bspi_rf_op
;
222 u32 bspi_rf_op_status
;
223 struct bcm_xfer_mode xfer_mode
;
224 u32 s3_strap_override_ctrl
;
228 struct bcm_qspi_dev_id
*dev_ids
;
229 struct completion mspi_done
;
230 struct completion bspi_done
;
233 static inline bool has_bspi(struct bcm_qspi
*qspi
)
235 return qspi
->bspi_mode
;
238 /* Read qspi controller register*/
239 static inline u32
bcm_qspi_read(struct bcm_qspi
*qspi
, enum base_type type
,
242 return bcm_qspi_readl(qspi
->big_endian
, qspi
->base
[type
] + offset
);
245 /* Write qspi controller register*/
246 static inline void bcm_qspi_write(struct bcm_qspi
*qspi
, enum base_type type
,
247 unsigned int offset
, unsigned int data
)
249 bcm_qspi_writel(qspi
->big_endian
, data
, qspi
->base
[type
] + offset
);
253 static int bcm_qspi_bspi_busy_poll(struct bcm_qspi
*qspi
)
257 /* this should normally finish within 10us */
258 for (i
= 0; i
< 1000; i
++) {
259 if (!(bcm_qspi_read(qspi
, BSPI
, BSPI_BUSY_STATUS
) & 1))
263 dev_warn(&qspi
->pdev
->dev
, "timeout waiting for !busy_status\n");
267 static inline bool bcm_qspi_bspi_ver_three(struct bcm_qspi
*qspi
)
269 if (qspi
->bspi_maj_rev
< 4)
274 static void bcm_qspi_bspi_flush_prefetch_buffers(struct bcm_qspi
*qspi
)
276 bcm_qspi_bspi_busy_poll(qspi
);
277 /* Force rising edge for the b0/b1 'flush' field */
278 bcm_qspi_write(qspi
, BSPI
, BSPI_B0_CTRL
, 1);
279 bcm_qspi_write(qspi
, BSPI
, BSPI_B1_CTRL
, 1);
280 bcm_qspi_write(qspi
, BSPI
, BSPI_B0_CTRL
, 0);
281 bcm_qspi_write(qspi
, BSPI
, BSPI_B1_CTRL
, 0);
284 static int bcm_qspi_bspi_lr_is_fifo_empty(struct bcm_qspi
*qspi
)
286 return (bcm_qspi_read(qspi
, BSPI
, BSPI_RAF_STATUS
) &
287 BSPI_RAF_STATUS_FIFO_EMPTY_MASK
);
290 static inline u32
bcm_qspi_bspi_lr_read_fifo(struct bcm_qspi
*qspi
)
292 u32 data
= bcm_qspi_read(qspi
, BSPI
, BSPI_RAF_READ_DATA
);
294 /* BSPI v3 LR is LE only, convert data to host endianness */
295 if (bcm_qspi_bspi_ver_three(qspi
))
296 data
= le32_to_cpu(data
);
301 static inline void bcm_qspi_bspi_lr_start(struct bcm_qspi
*qspi
)
303 bcm_qspi_bspi_busy_poll(qspi
);
304 bcm_qspi_write(qspi
, BSPI
, BSPI_RAF_CTRL
,
305 BSPI_RAF_CTRL_START_MASK
);
308 static inline void bcm_qspi_bspi_lr_clear(struct bcm_qspi
*qspi
)
310 bcm_qspi_write(qspi
, BSPI
, BSPI_RAF_CTRL
,
311 BSPI_RAF_CTRL_CLEAR_MASK
);
312 bcm_qspi_bspi_flush_prefetch_buffers(qspi
);
315 static void bcm_qspi_bspi_lr_data_read(struct bcm_qspi
*qspi
)
317 u32
*buf
= (u32
*)qspi
->bspi_rf_op
->data
.buf
.in
;
320 dev_dbg(&qspi
->pdev
->dev
, "xfer %p rx %p rxlen %d\n", qspi
->bspi_rf_op
,
321 qspi
->bspi_rf_op
->data
.buf
.in
, qspi
->bspi_rf_op_len
);
322 while (!bcm_qspi_bspi_lr_is_fifo_empty(qspi
)) {
323 data
= bcm_qspi_bspi_lr_read_fifo(qspi
);
324 if (likely(qspi
->bspi_rf_op_len
>= 4) &&
325 IS_ALIGNED((uintptr_t)buf
, 4)) {
326 buf
[qspi
->bspi_rf_op_idx
++] = data
;
327 qspi
->bspi_rf_op_len
-= 4;
329 /* Read out remaining bytes, make sure*/
330 u8
*cbuf
= (u8
*)&buf
[qspi
->bspi_rf_op_idx
];
332 data
= cpu_to_le32(data
);
333 while (qspi
->bspi_rf_op_len
) {
336 qspi
->bspi_rf_op_len
--;
342 static void bcm_qspi_bspi_set_xfer_params(struct bcm_qspi
*qspi
, u8 cmd_byte
,
343 int bpp
, int bpc
, int flex_mode
)
345 bcm_qspi_write(qspi
, BSPI
, BSPI_FLEX_MODE_ENABLE
, 0);
346 bcm_qspi_write(qspi
, BSPI
, BSPI_BITS_PER_CYCLE
, bpc
);
347 bcm_qspi_write(qspi
, BSPI
, BSPI_BITS_PER_PHASE
, bpp
);
348 bcm_qspi_write(qspi
, BSPI
, BSPI_CMD_AND_MODE_BYTE
, cmd_byte
);
349 bcm_qspi_write(qspi
, BSPI
, BSPI_FLEX_MODE_ENABLE
, flex_mode
);
352 static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi
*qspi
,
353 const struct spi_mem_op
*op
, int hp
)
355 int bpc
= 0, bpp
= 0;
356 u8 command
= op
->cmd
.opcode
;
357 int width
= op
->data
.buswidth
? op
->data
.buswidth
: SPI_NBITS_SINGLE
;
358 int addrlen
= op
->addr
.nbytes
;
361 dev_dbg(&qspi
->pdev
->dev
, "set flex mode w %x addrlen %x hp %d\n",
364 if (addrlen
== BSPI_ADDRLEN_4BYTES
)
365 bpp
= BSPI_BPP_ADDR_SELECT_MASK
;
367 bpp
|= (op
->dummy
.nbytes
* 8) / op
->dummy
.buswidth
;
370 case SPI_NBITS_SINGLE
:
371 if (addrlen
== BSPI_ADDRLEN_3BYTES
)
372 /* default mode, does not need flex_cmd */
378 bpc
|= 0x00010100; /* address and mode are 2-bit */
379 bpp
= BSPI_BPP_MODE_SELECT_MASK
;
385 bpc
|= 0x00020200; /* address and mode are 4-bit */
386 bpp
|= BSPI_BPP_MODE_SELECT_MASK
;
393 bcm_qspi_bspi_set_xfer_params(qspi
, command
, bpp
, bpc
, flex_mode
);
398 static int bcm_qspi_bspi_set_override(struct bcm_qspi
*qspi
,
399 const struct spi_mem_op
*op
, int hp
)
401 int width
= op
->data
.buswidth
? op
->data
.buswidth
: SPI_NBITS_SINGLE
;
402 int addrlen
= op
->addr
.nbytes
;
403 u32 data
= bcm_qspi_read(qspi
, BSPI
, BSPI_STRAP_OVERRIDE_CTRL
);
405 dev_dbg(&qspi
->pdev
->dev
, "set override mode w %x addrlen %x hp %d\n",
409 case SPI_NBITS_SINGLE
:
410 /* clear quad/dual mode */
411 data
&= ~(BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD
|
412 BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL
);
415 /* clear dual mode and set quad mode */
416 data
&= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL
;
417 data
|= BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD
;
420 /* clear quad mode set dual mode */
421 data
&= ~BSPI_STRAP_OVERRIDE_CTRL_DATA_QUAD
;
422 data
|= BSPI_STRAP_OVERRIDE_CTRL_DATA_DUAL
;
428 if (addrlen
== BSPI_ADDRLEN_4BYTES
)
430 data
|= BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE
;
432 /* clear 4 byte mode */
433 data
&= ~BSPI_STRAP_OVERRIDE_CTRL_ADDR_4BYTE
;
435 /* set the override mode */
436 data
|= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE
;
437 bcm_qspi_write(qspi
, BSPI
, BSPI_STRAP_OVERRIDE_CTRL
, data
);
438 bcm_qspi_bspi_set_xfer_params(qspi
, op
->cmd
.opcode
, 0, 0, 0);
443 static int bcm_qspi_bspi_set_mode(struct bcm_qspi
*qspi
,
444 const struct spi_mem_op
*op
, int hp
)
447 int width
= op
->data
.buswidth
? op
->data
.buswidth
: SPI_NBITS_SINGLE
;
448 int addrlen
= op
->addr
.nbytes
;
451 qspi
->xfer_mode
.flex_mode
= true;
453 if (!bcm_qspi_bspi_ver_three(qspi
)) {
456 val
= bcm_qspi_read(qspi
, BSPI
, BSPI_STRAP_OVERRIDE_CTRL
);
457 mask
= BSPI_STRAP_OVERRIDE_CTRL_OVERRIDE
;
458 if (val
& mask
|| qspi
->s3_strap_override_ctrl
& mask
) {
459 qspi
->xfer_mode
.flex_mode
= false;
460 bcm_qspi_write(qspi
, BSPI
, BSPI_FLEX_MODE_ENABLE
, 0);
461 error
= bcm_qspi_bspi_set_override(qspi
, op
, hp
);
465 if (qspi
->xfer_mode
.flex_mode
)
466 error
= bcm_qspi_bspi_set_flex_mode(qspi
, op
, hp
);
469 dev_warn(&qspi
->pdev
->dev
,
470 "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n",
472 } else if (qspi
->xfer_mode
.width
!= width
||
473 qspi
->xfer_mode
.addrlen
!= addrlen
||
474 qspi
->xfer_mode
.hp
!= hp
) {
475 qspi
->xfer_mode
.width
= width
;
476 qspi
->xfer_mode
.addrlen
= addrlen
;
477 qspi
->xfer_mode
.hp
= hp
;
478 dev_dbg(&qspi
->pdev
->dev
,
479 "cs:%d %d-lane output, %d-byte address%s\n",
481 qspi
->xfer_mode
.width
,
482 qspi
->xfer_mode
.addrlen
,
483 qspi
->xfer_mode
.hp
!= -1 ? ", hp mode" : "");
489 static void bcm_qspi_enable_bspi(struct bcm_qspi
*qspi
)
494 qspi
->bspi_enabled
= 1;
495 if ((bcm_qspi_read(qspi
, BSPI
, BSPI_MAST_N_BOOT_CTRL
) & 1) == 0)
498 bcm_qspi_bspi_flush_prefetch_buffers(qspi
);
500 bcm_qspi_write(qspi
, BSPI
, BSPI_MAST_N_BOOT_CTRL
, 0);
504 static void bcm_qspi_disable_bspi(struct bcm_qspi
*qspi
)
509 qspi
->bspi_enabled
= 0;
510 if ((bcm_qspi_read(qspi
, BSPI
, BSPI_MAST_N_BOOT_CTRL
) & 1))
513 bcm_qspi_bspi_busy_poll(qspi
);
514 bcm_qspi_write(qspi
, BSPI
, BSPI_MAST_N_BOOT_CTRL
, 1);
518 static void bcm_qspi_chip_select(struct bcm_qspi
*qspi
, int cs
)
523 if (qspi
->base
[CHIP_SELECT
]) {
524 rd
= bcm_qspi_read(qspi
, CHIP_SELECT
, 0);
525 wr
= (rd
& ~0xff) | (1 << cs
);
528 bcm_qspi_write(qspi
, CHIP_SELECT
, 0, wr
);
529 usleep_range(10, 20);
532 dev_dbg(&qspi
->pdev
->dev
, "using cs:%d\n", cs
);
537 static void bcm_qspi_hw_set_parms(struct bcm_qspi
*qspi
,
538 const struct bcm_qspi_parms
*xp
)
543 spbr
= qspi
->base_clk
/ (2 * xp
->speed_hz
);
545 spcr
= clamp_val(spbr
, QSPI_SPBR_MIN
, QSPI_SPBR_MAX
);
546 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR0_LSB
, spcr
);
548 spcr
= MSPI_MASTER_BIT
;
549 /* for 16 bit the data should be zero */
550 if (xp
->bits_per_word
!= 16)
551 spcr
|= xp
->bits_per_word
<< 2;
552 spcr
|= xp
->mode
& 3;
553 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR0_MSB
, spcr
);
555 qspi
->last_parms
= *xp
;
558 static void bcm_qspi_update_parms(struct bcm_qspi
*qspi
,
559 struct spi_device
*spi
,
560 struct spi_transfer
*trans
)
562 struct bcm_qspi_parms xp
;
564 xp
.speed_hz
= trans
->speed_hz
;
565 xp
.bits_per_word
= trans
->bits_per_word
;
568 bcm_qspi_hw_set_parms(qspi
, &xp
);
571 static int bcm_qspi_setup(struct spi_device
*spi
)
573 struct bcm_qspi_parms
*xp
;
575 if (spi
->bits_per_word
> 16)
578 xp
= spi_get_ctldata(spi
);
580 xp
= kzalloc(sizeof(*xp
), GFP_KERNEL
);
583 spi_set_ctldata(spi
, xp
);
585 xp
->speed_hz
= spi
->max_speed_hz
;
586 xp
->mode
= spi
->mode
;
588 if (spi
->bits_per_word
)
589 xp
->bits_per_word
= spi
->bits_per_word
;
591 xp
->bits_per_word
= 8;
596 static bool bcm_qspi_mspi_transfer_is_last(struct bcm_qspi
*qspi
,
597 struct qspi_trans
*qt
)
599 if (qt
->mspi_last_trans
&&
600 spi_transfer_is_last(qspi
->master
, qt
->trans
))
606 static int update_qspi_trans_byte_count(struct bcm_qspi
*qspi
,
607 struct qspi_trans
*qt
, int flags
)
609 int ret
= TRANS_STATUS_BREAK_NONE
;
611 /* count the last transferred bytes */
612 if (qt
->trans
->bits_per_word
<= 8)
617 if (qt
->byte
>= qt
->trans
->len
) {
618 /* we're at the end of the spi_transfer */
619 /* in TX mode, need to pause for a delay or CS change */
620 if (qt
->trans
->delay_usecs
&&
621 (flags
& TRANS_STATUS_BREAK_DELAY
))
622 ret
|= TRANS_STATUS_BREAK_DELAY
;
623 if (qt
->trans
->cs_change
&&
624 (flags
& TRANS_STATUS_BREAK_CS_CHANGE
))
625 ret
|= TRANS_STATUS_BREAK_CS_CHANGE
;
629 dev_dbg(&qspi
->pdev
->dev
, "advance msg exit\n");
630 if (bcm_qspi_mspi_transfer_is_last(qspi
, qt
))
631 ret
= TRANS_STATUS_BREAK_EOM
;
633 ret
= TRANS_STATUS_BREAK_NO_BYTES
;
639 dev_dbg(&qspi
->pdev
->dev
, "trans %p len %d byte %d ret %x\n",
640 qt
->trans
, qt
->trans
? qt
->trans
->len
: 0, qt
->byte
, ret
);
644 static inline u8
read_rxram_slot_u8(struct bcm_qspi
*qspi
, int slot
)
646 u32 slot_offset
= MSPI_RXRAM
+ (slot
<< 3) + 0x4;
648 /* mask out reserved bits */
649 return bcm_qspi_read(qspi
, MSPI
, slot_offset
) & 0xff;
652 static inline u16
read_rxram_slot_u16(struct bcm_qspi
*qspi
, int slot
)
654 u32 reg_offset
= MSPI_RXRAM
;
655 u32 lsb_offset
= reg_offset
+ (slot
<< 3) + 0x4;
656 u32 msb_offset
= reg_offset
+ (slot
<< 3);
658 return (bcm_qspi_read(qspi
, MSPI
, lsb_offset
) & 0xff) |
659 ((bcm_qspi_read(qspi
, MSPI
, msb_offset
) & 0xff) << 8);
662 static void read_from_hw(struct bcm_qspi
*qspi
, int slots
)
664 struct qspi_trans tp
;
667 bcm_qspi_disable_bspi(qspi
);
669 if (slots
> MSPI_NUM_CDRAM
) {
670 /* should never happen */
671 dev_err(&qspi
->pdev
->dev
, "%s: too many slots!\n", __func__
);
675 tp
= qspi
->trans_pos
;
677 for (slot
= 0; slot
< slots
; slot
++) {
678 if (tp
.trans
->bits_per_word
<= 8) {
679 u8
*buf
= tp
.trans
->rx_buf
;
682 buf
[tp
.byte
] = read_rxram_slot_u8(qspi
, slot
);
683 dev_dbg(&qspi
->pdev
->dev
, "RD %02x\n",
684 buf
? buf
[tp
.byte
] : 0x0);
686 u16
*buf
= tp
.trans
->rx_buf
;
689 buf
[tp
.byte
/ 2] = read_rxram_slot_u16(qspi
,
691 dev_dbg(&qspi
->pdev
->dev
, "RD %04x\n",
692 buf
? buf
[tp
.byte
/ 2] : 0x0);
695 update_qspi_trans_byte_count(qspi
, &tp
,
696 TRANS_STATUS_BREAK_NONE
);
699 qspi
->trans_pos
= tp
;
702 static inline void write_txram_slot_u8(struct bcm_qspi
*qspi
, int slot
,
705 u32 reg_offset
= MSPI_TXRAM
+ (slot
<< 3);
707 /* mask out reserved bits */
708 bcm_qspi_write(qspi
, MSPI
, reg_offset
, val
);
711 static inline void write_txram_slot_u16(struct bcm_qspi
*qspi
, int slot
,
714 u32 reg_offset
= MSPI_TXRAM
;
715 u32 msb_offset
= reg_offset
+ (slot
<< 3);
716 u32 lsb_offset
= reg_offset
+ (slot
<< 3) + 0x4;
718 bcm_qspi_write(qspi
, MSPI
, msb_offset
, (val
>> 8));
719 bcm_qspi_write(qspi
, MSPI
, lsb_offset
, (val
& 0xff));
722 static inline u32
read_cdram_slot(struct bcm_qspi
*qspi
, int slot
)
724 return bcm_qspi_read(qspi
, MSPI
, MSPI_CDRAM
+ (slot
<< 2));
727 static inline void write_cdram_slot(struct bcm_qspi
*qspi
, int slot
, u32 val
)
729 bcm_qspi_write(qspi
, MSPI
, (MSPI_CDRAM
+ (slot
<< 2)), val
);
732 /* Return number of slots written */
733 static int write_to_hw(struct bcm_qspi
*qspi
, struct spi_device
*spi
)
735 struct qspi_trans tp
;
736 int slot
= 0, tstatus
= 0;
739 bcm_qspi_disable_bspi(qspi
);
740 tp
= qspi
->trans_pos
;
741 bcm_qspi_update_parms(qspi
, spi
, tp
.trans
);
743 /* Run until end of transfer or reached the max data */
744 while (!tstatus
&& slot
< MSPI_NUM_CDRAM
) {
745 if (tp
.trans
->bits_per_word
<= 8) {
746 const u8
*buf
= tp
.trans
->tx_buf
;
747 u8 val
= buf
? buf
[tp
.byte
] : 0x00;
749 write_txram_slot_u8(qspi
, slot
, val
);
750 dev_dbg(&qspi
->pdev
->dev
, "WR %02x\n", val
);
752 const u16
*buf
= tp
.trans
->tx_buf
;
753 u16 val
= buf
? buf
[tp
.byte
/ 2] : 0x0000;
755 write_txram_slot_u16(qspi
, slot
, val
);
756 dev_dbg(&qspi
->pdev
->dev
, "WR %04x\n", val
);
758 mspi_cdram
= MSPI_CDRAM_CONT_BIT
;
763 mspi_cdram
|= (~(1 << spi
->chip_select
) &
766 mspi_cdram
|= ((tp
.trans
->bits_per_word
<= 8) ? 0 :
767 MSPI_CDRAM_BITSE_BIT
);
769 write_cdram_slot(qspi
, slot
, mspi_cdram
);
771 tstatus
= update_qspi_trans_byte_count(qspi
, &tp
,
772 TRANS_STATUS_BREAK_TX
);
777 dev_err(&qspi
->pdev
->dev
, "%s: no data to send?", __func__
);
781 dev_dbg(&qspi
->pdev
->dev
, "submitting %d slots\n", slot
);
782 bcm_qspi_write(qspi
, MSPI
, MSPI_NEWQP
, 0);
783 bcm_qspi_write(qspi
, MSPI
, MSPI_ENDQP
, slot
- 1);
785 if (tstatus
& TRANS_STATUS_BREAK_DESELECT
) {
786 mspi_cdram
= read_cdram_slot(qspi
, slot
- 1) &
787 ~MSPI_CDRAM_CONT_BIT
;
788 write_cdram_slot(qspi
, slot
- 1, mspi_cdram
);
792 bcm_qspi_write(qspi
, MSPI
, MSPI_WRITE_LOCK
, 1);
794 /* Must flush previous writes before starting MSPI operation */
796 /* Set cont | spe | spifie */
797 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR2
, 0xe0);
803 static int bcm_qspi_bspi_exec_mem_op(struct spi_device
*spi
,
804 const struct spi_mem_op
*op
)
806 struct bcm_qspi
*qspi
= spi_master_get_devdata(spi
->master
);
807 u32 addr
= 0, len
, rdlen
, len_words
, from
= 0;
809 unsigned long timeo
= msecs_to_jiffies(100);
810 struct bcm_qspi_soc_intc
*soc_intc
= qspi
->soc_intc
;
812 if (bcm_qspi_bspi_ver_three(qspi
))
813 if (op
->addr
.nbytes
== BSPI_ADDRLEN_4BYTES
)
817 bcm_qspi_chip_select(qspi
, spi
->chip_select
);
818 bcm_qspi_write(qspi
, MSPI
, MSPI_WRITE_LOCK
, 0);
821 * when using flex mode we need to send
822 * the upper address byte to bspi
824 if (bcm_qspi_bspi_ver_three(qspi
) == false) {
825 addr
= from
& 0xff000000;
826 bcm_qspi_write(qspi
, BSPI
,
827 BSPI_BSPI_FLASH_UPPER_ADDR_BYTE
, addr
);
830 if (!qspi
->xfer_mode
.flex_mode
)
833 addr
= from
& 0x00ffffff;
835 if (bcm_qspi_bspi_ver_three(qspi
) == true)
836 addr
= (addr
+ 0xc00000) & 0xffffff;
839 * read into the entire buffer by breaking the reads
840 * into RAF buffer read lengths
842 len
= op
->data
.nbytes
;
843 qspi
->bspi_rf_op_idx
= 0;
846 if (len
> BSPI_READ_LENGTH
)
847 rdlen
= BSPI_READ_LENGTH
;
851 reinit_completion(&qspi
->bspi_done
);
852 bcm_qspi_enable_bspi(qspi
);
853 len_words
= (rdlen
+ 3) >> 2;
854 qspi
->bspi_rf_op
= op
;
855 qspi
->bspi_rf_op_status
= 0;
856 qspi
->bspi_rf_op_len
= rdlen
;
857 dev_dbg(&qspi
->pdev
->dev
,
858 "bspi xfr addr 0x%x len 0x%x", addr
, rdlen
);
859 bcm_qspi_write(qspi
, BSPI
, BSPI_RAF_START_ADDR
, addr
);
860 bcm_qspi_write(qspi
, BSPI
, BSPI_RAF_NUM_WORDS
, len_words
);
861 bcm_qspi_write(qspi
, BSPI
, BSPI_RAF_WATERMARK
, 0);
862 if (qspi
->soc_intc
) {
864 * clear soc MSPI and BSPI interrupts and enable
867 soc_intc
->bcm_qspi_int_ack(soc_intc
, MSPI_BSPI_DONE
);
868 soc_intc
->bcm_qspi_int_set(soc_intc
, BSPI_DONE
, true);
871 /* Must flush previous writes before starting BSPI operation */
873 bcm_qspi_bspi_lr_start(qspi
);
874 if (!wait_for_completion_timeout(&qspi
->bspi_done
, timeo
)) {
875 dev_err(&qspi
->pdev
->dev
, "timeout waiting for BSPI\n");
880 /* set msg return length */
888 static int bcm_qspi_transfer_one(struct spi_master
*master
,
889 struct spi_device
*spi
,
890 struct spi_transfer
*trans
)
892 struct bcm_qspi
*qspi
= spi_master_get_devdata(master
);
894 unsigned long timeo
= msecs_to_jiffies(100);
896 bcm_qspi_chip_select(qspi
, spi
->chip_select
);
897 qspi
->trans_pos
.trans
= trans
;
898 qspi
->trans_pos
.byte
= 0;
900 while (qspi
->trans_pos
.byte
< trans
->len
) {
901 reinit_completion(&qspi
->mspi_done
);
903 slots
= write_to_hw(qspi
, spi
);
904 if (!wait_for_completion_timeout(&qspi
->mspi_done
, timeo
)) {
905 dev_err(&qspi
->pdev
->dev
, "timeout waiting for MSPI\n");
909 read_from_hw(qspi
, slots
);
915 static int bcm_qspi_mspi_exec_mem_op(struct spi_device
*spi
,
916 const struct spi_mem_op
*op
)
918 struct spi_master
*master
= spi
->master
;
919 struct bcm_qspi
*qspi
= spi_master_get_devdata(master
);
920 struct spi_transfer t
[2];
924 memset(cmd
, 0, sizeof(cmd
));
925 memset(t
, 0, sizeof(t
));
928 /* opcode is in cmd[0] */
929 cmd
[0] = op
->cmd
.opcode
;
930 for (i
= 0; i
< op
->addr
.nbytes
; i
++)
931 cmd
[1 + i
] = op
->addr
.val
>> (8 * (op
->addr
.nbytes
- i
- 1));
934 t
[0].len
= op
->addr
.nbytes
+ op
->dummy
.nbytes
+ 1;
935 t
[0].bits_per_word
= spi
->bits_per_word
;
936 t
[0].tx_nbits
= op
->cmd
.buswidth
;
937 /* lets mspi know that this is not last transfer */
938 qspi
->trans_pos
.mspi_last_trans
= false;
939 ret
= bcm_qspi_transfer_one(master
, spi
, &t
[0]);
942 qspi
->trans_pos
.mspi_last_trans
= true;
945 t
[1].rx_buf
= op
->data
.buf
.in
;
946 t
[1].len
= op
->data
.nbytes
;
947 t
[1].rx_nbits
= op
->data
.buswidth
;
948 t
[1].bits_per_word
= spi
->bits_per_word
;
949 ret
= bcm_qspi_transfer_one(master
, spi
, &t
[1]);
955 static int bcm_qspi_exec_mem_op(struct spi_mem
*mem
,
956 const struct spi_mem_op
*op
)
958 struct spi_device
*spi
= mem
->spi
;
959 struct bcm_qspi
*qspi
= spi_master_get_devdata(spi
->master
);
961 bool mspi_read
= false;
965 if (!op
->data
.nbytes
|| !op
->addr
.nbytes
|| op
->addr
.nbytes
> 4 ||
966 op
->data
.dir
!= SPI_MEM_DATA_IN
)
969 buf
= op
->data
.buf
.in
;
971 len
= op
->data
.nbytes
;
973 if (bcm_qspi_bspi_ver_three(qspi
) == true) {
975 * The address coming into this function is a raw flash offset.
976 * But for BSPI <= V3, we need to convert it to a remapped BSPI
977 * address. If it crosses a 4MB boundary, just revert back to
980 addr
= (addr
+ 0xc00000) & 0xffffff;
982 if ((~ADDR_4MB_MASK
& addr
) ^
983 (~ADDR_4MB_MASK
& (addr
+ len
- 1)))
987 /* non-aligned and very short transfers are handled by MSPI */
988 if (!IS_ALIGNED((uintptr_t)addr
, 4) || !IS_ALIGNED((uintptr_t)buf
, 4) ||
993 return bcm_qspi_mspi_exec_mem_op(spi
, op
);
995 ret
= bcm_qspi_bspi_set_mode(qspi
, op
, 0);
998 ret
= bcm_qspi_bspi_exec_mem_op(spi
, op
);
1003 static void bcm_qspi_cleanup(struct spi_device
*spi
)
1005 struct bcm_qspi_parms
*xp
= spi_get_ctldata(spi
);
1010 static irqreturn_t
bcm_qspi_mspi_l2_isr(int irq
, void *dev_id
)
1012 struct bcm_qspi_dev_id
*qspi_dev_id
= dev_id
;
1013 struct bcm_qspi
*qspi
= qspi_dev_id
->dev
;
1014 u32 status
= bcm_qspi_read(qspi
, MSPI
, MSPI_MSPI_STATUS
);
1016 if (status
& MSPI_MSPI_STATUS_SPIF
) {
1017 struct bcm_qspi_soc_intc
*soc_intc
= qspi
->soc_intc
;
1018 /* clear interrupt */
1019 status
&= ~MSPI_MSPI_STATUS_SPIF
;
1020 bcm_qspi_write(qspi
, MSPI
, MSPI_MSPI_STATUS
, status
);
1022 soc_intc
->bcm_qspi_int_ack(soc_intc
, MSPI_DONE
);
1023 complete(&qspi
->mspi_done
);
1030 static irqreturn_t
bcm_qspi_bspi_lr_l2_isr(int irq
, void *dev_id
)
1032 struct bcm_qspi_dev_id
*qspi_dev_id
= dev_id
;
1033 struct bcm_qspi
*qspi
= qspi_dev_id
->dev
;
1034 struct bcm_qspi_soc_intc
*soc_intc
= qspi
->soc_intc
;
1035 u32 status
= qspi_dev_id
->irqp
->mask
;
1037 if (qspi
->bspi_enabled
&& qspi
->bspi_rf_op
) {
1038 bcm_qspi_bspi_lr_data_read(qspi
);
1039 if (qspi
->bspi_rf_op_len
== 0) {
1040 qspi
->bspi_rf_op
= NULL
;
1041 if (qspi
->soc_intc
) {
1042 /* disable soc BSPI interrupt */
1043 soc_intc
->bcm_qspi_int_set(soc_intc
, BSPI_DONE
,
1046 status
= INTR_BSPI_LR_SESSION_DONE_MASK
;
1049 if (qspi
->bspi_rf_op_status
)
1050 bcm_qspi_bspi_lr_clear(qspi
);
1052 bcm_qspi_bspi_flush_prefetch_buffers(qspi
);
1056 /* clear soc BSPI interrupt */
1057 soc_intc
->bcm_qspi_int_ack(soc_intc
, BSPI_DONE
);
1060 status
&= INTR_BSPI_LR_SESSION_DONE_MASK
;
1061 if (qspi
->bspi_enabled
&& status
&& qspi
->bspi_rf_op_len
== 0)
1062 complete(&qspi
->bspi_done
);
1067 static irqreturn_t
bcm_qspi_bspi_lr_err_l2_isr(int irq
, void *dev_id
)
1069 struct bcm_qspi_dev_id
*qspi_dev_id
= dev_id
;
1070 struct bcm_qspi
*qspi
= qspi_dev_id
->dev
;
1071 struct bcm_qspi_soc_intc
*soc_intc
= qspi
->soc_intc
;
1073 dev_err(&qspi
->pdev
->dev
, "BSPI INT error\n");
1074 qspi
->bspi_rf_op_status
= -EIO
;
1076 /* clear soc interrupt */
1077 soc_intc
->bcm_qspi_int_ack(soc_intc
, BSPI_ERR
);
1079 complete(&qspi
->bspi_done
);
1083 static irqreturn_t
bcm_qspi_l1_isr(int irq
, void *dev_id
)
1085 struct bcm_qspi_dev_id
*qspi_dev_id
= dev_id
;
1086 struct bcm_qspi
*qspi
= qspi_dev_id
->dev
;
1087 struct bcm_qspi_soc_intc
*soc_intc
= qspi
->soc_intc
;
1088 irqreturn_t ret
= IRQ_NONE
;
1091 u32 status
= soc_intc
->bcm_qspi_get_int_status(soc_intc
);
1093 if (status
& MSPI_DONE
)
1094 ret
= bcm_qspi_mspi_l2_isr(irq
, dev_id
);
1095 else if (status
& BSPI_DONE
)
1096 ret
= bcm_qspi_bspi_lr_l2_isr(irq
, dev_id
);
1097 else if (status
& BSPI_ERR
)
1098 ret
= bcm_qspi_bspi_lr_err_l2_isr(irq
, dev_id
);
1104 static const struct bcm_qspi_irq qspi_irq_tab
[] = {
1106 .irq_name
= "spi_lr_fullness_reached",
1107 .irq_handler
= bcm_qspi_bspi_lr_l2_isr
,
1108 .mask
= INTR_BSPI_LR_FULLNESS_REACHED_MASK
,
1111 .irq_name
= "spi_lr_session_aborted",
1112 .irq_handler
= bcm_qspi_bspi_lr_err_l2_isr
,
1113 .mask
= INTR_BSPI_LR_SESSION_ABORTED_MASK
,
1116 .irq_name
= "spi_lr_impatient",
1117 .irq_handler
= bcm_qspi_bspi_lr_err_l2_isr
,
1118 .mask
= INTR_BSPI_LR_IMPATIENT_MASK
,
1121 .irq_name
= "spi_lr_session_done",
1122 .irq_handler
= bcm_qspi_bspi_lr_l2_isr
,
1123 .mask
= INTR_BSPI_LR_SESSION_DONE_MASK
,
1125 #ifdef QSPI_INT_DEBUG
1126 /* this interrupt is for debug purposes only, dont request irq */
1128 .irq_name
= "spi_lr_overread",
1129 .irq_handler
= bcm_qspi_bspi_lr_err_l2_isr
,
1130 .mask
= INTR_BSPI_LR_OVERREAD_MASK
,
1134 .irq_name
= "mspi_done",
1135 .irq_handler
= bcm_qspi_mspi_l2_isr
,
1136 .mask
= INTR_MSPI_DONE_MASK
,
1139 .irq_name
= "mspi_halted",
1140 .irq_handler
= bcm_qspi_mspi_l2_isr
,
1141 .mask
= INTR_MSPI_HALTED_MASK
,
1144 /* single muxed L1 interrupt source */
1145 .irq_name
= "spi_l1_intr",
1146 .irq_handler
= bcm_qspi_l1_isr
,
1147 .irq_source
= MUXED_L1
,
1148 .mask
= QSPI_INTERRUPTS_ALL
,
1152 static void bcm_qspi_bspi_init(struct bcm_qspi
*qspi
)
1156 val
= bcm_qspi_read(qspi
, BSPI
, BSPI_REVISION_ID
);
1157 qspi
->bspi_maj_rev
= (val
>> 8) & 0xff;
1158 qspi
->bspi_min_rev
= val
& 0xff;
1159 if (!(bcm_qspi_bspi_ver_three(qspi
))) {
1160 /* Force mapping of BSPI address -> flash offset */
1161 bcm_qspi_write(qspi
, BSPI
, BSPI_BSPI_XOR_VALUE
, 0);
1162 bcm_qspi_write(qspi
, BSPI
, BSPI_BSPI_XOR_ENABLE
, 1);
1164 qspi
->bspi_enabled
= 1;
1165 bcm_qspi_disable_bspi(qspi
);
1166 bcm_qspi_write(qspi
, BSPI
, BSPI_B0_CTRL
, 0);
1167 bcm_qspi_write(qspi
, BSPI
, BSPI_B1_CTRL
, 0);
1170 static void bcm_qspi_hw_init(struct bcm_qspi
*qspi
)
1172 struct bcm_qspi_parms parms
;
1174 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR1_LSB
, 0);
1175 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR1_MSB
, 0);
1176 bcm_qspi_write(qspi
, MSPI
, MSPI_NEWQP
, 0);
1177 bcm_qspi_write(qspi
, MSPI
, MSPI_ENDQP
, 0);
1178 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR2
, 0x20);
1180 parms
.mode
= SPI_MODE_3
;
1181 parms
.bits_per_word
= 8;
1182 parms
.speed_hz
= qspi
->max_speed_hz
;
1183 bcm_qspi_hw_set_parms(qspi
, &parms
);
1186 bcm_qspi_bspi_init(qspi
);
1189 static void bcm_qspi_hw_uninit(struct bcm_qspi
*qspi
)
1191 bcm_qspi_write(qspi
, MSPI
, MSPI_SPCR2
, 0);
1193 bcm_qspi_write(qspi
, MSPI
, MSPI_WRITE_LOCK
, 0);
1197 static const struct spi_controller_mem_ops bcm_qspi_mem_ops
= {
1198 .exec_op
= bcm_qspi_exec_mem_op
,
1201 static const struct of_device_id bcm_qspi_of_match
[] = {
1202 { .compatible
= "brcm,spi-bcm-qspi" },
1205 MODULE_DEVICE_TABLE(of
, bcm_qspi_of_match
);
1207 int bcm_qspi_probe(struct platform_device
*pdev
,
1208 struct bcm_qspi_soc_intc
*soc_intc
)
1210 struct device
*dev
= &pdev
->dev
;
1211 struct bcm_qspi
*qspi
;
1212 struct spi_master
*master
;
1213 struct resource
*res
;
1214 int irq
, ret
= 0, num_ints
= 0;
1216 const char *name
= NULL
;
1217 int num_irqs
= ARRAY_SIZE(qspi_irq_tab
);
1219 /* We only support device-tree instantiation */
1223 if (!of_match_node(bcm_qspi_of_match
, dev
->of_node
))
1226 master
= spi_alloc_master(dev
, sizeof(struct bcm_qspi
));
1228 dev_err(dev
, "error allocating spi_master\n");
1232 qspi
= spi_master_get_devdata(master
);
1234 qspi
->trans_pos
.trans
= NULL
;
1235 qspi
->trans_pos
.byte
= 0;
1236 qspi
->trans_pos
.mspi_last_trans
= true;
1237 qspi
->master
= master
;
1239 master
->bus_num
= -1;
1240 master
->mode_bits
= SPI_CPHA
| SPI_CPOL
| SPI_RX_DUAL
| SPI_RX_QUAD
;
1241 master
->setup
= bcm_qspi_setup
;
1242 master
->transfer_one
= bcm_qspi_transfer_one
;
1243 master
->mem_ops
= &bcm_qspi_mem_ops
;
1244 master
->cleanup
= bcm_qspi_cleanup
;
1245 master
->dev
.of_node
= dev
->of_node
;
1246 master
->num_chipselect
= NUM_CHIPSELECT
;
1248 qspi
->big_endian
= of_device_is_big_endian(dev
->of_node
);
1250 if (!of_property_read_u32(dev
->of_node
, "num-cs", &val
))
1251 master
->num_chipselect
= val
;
1253 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "hif_mspi");
1255 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
,
1259 qspi
->base
[MSPI
] = devm_ioremap_resource(dev
, res
);
1260 if (IS_ERR(qspi
->base
[MSPI
])) {
1261 ret
= PTR_ERR(qspi
->base
[MSPI
]);
1262 goto qspi_resource_err
;
1265 goto qspi_resource_err
;
1268 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "bspi");
1270 qspi
->base
[BSPI
] = devm_ioremap_resource(dev
, res
);
1271 if (IS_ERR(qspi
->base
[BSPI
])) {
1272 ret
= PTR_ERR(qspi
->base
[BSPI
]);
1273 goto qspi_resource_err
;
1275 qspi
->bspi_mode
= true;
1277 qspi
->bspi_mode
= false;
1280 dev_info(dev
, "using %smspi mode\n", qspi
->bspi_mode
? "bspi-" : "");
1282 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cs_reg");
1284 qspi
->base
[CHIP_SELECT
] = devm_ioremap_resource(dev
, res
);
1285 if (IS_ERR(qspi
->base
[CHIP_SELECT
])) {
1286 ret
= PTR_ERR(qspi
->base
[CHIP_SELECT
]);
1287 goto qspi_resource_err
;
1291 qspi
->dev_ids
= kcalloc(num_irqs
, sizeof(struct bcm_qspi_dev_id
),
1293 if (!qspi
->dev_ids
) {
1295 goto qspi_resource_err
;
1298 for (val
= 0; val
< num_irqs
; val
++) {
1300 name
= qspi_irq_tab
[val
].irq_name
;
1301 if (qspi_irq_tab
[val
].irq_source
== SINGLE_L2
) {
1302 /* get the l2 interrupts */
1303 irq
= platform_get_irq_byname(pdev
, name
);
1304 } else if (!num_ints
&& soc_intc
) {
1305 /* all mspi, bspi intrs muxed to one L1 intr */
1306 irq
= platform_get_irq(pdev
, 0);
1310 ret
= devm_request_irq(&pdev
->dev
, irq
,
1311 qspi_irq_tab
[val
].irq_handler
, 0,
1313 &qspi
->dev_ids
[val
]);
1315 dev_err(&pdev
->dev
, "IRQ %s not found\n", name
);
1316 goto qspi_probe_err
;
1319 qspi
->dev_ids
[val
].dev
= qspi
;
1320 qspi
->dev_ids
[val
].irqp
= &qspi_irq_tab
[val
];
1322 dev_dbg(&pdev
->dev
, "registered IRQ %s %d\n",
1323 qspi_irq_tab
[val
].irq_name
,
1329 dev_err(&pdev
->dev
, "no IRQs registered, cannot init driver\n");
1331 goto qspi_probe_err
;
1335 * Some SoCs integrate spi controller (e.g., its interrupt bits)
1339 qspi
->soc_intc
= soc_intc
;
1340 soc_intc
->bcm_qspi_int_set(soc_intc
, MSPI_DONE
, true);
1342 qspi
->soc_intc
= NULL
;
1345 qspi
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1346 if (IS_ERR(qspi
->clk
)) {
1347 dev_warn(dev
, "unable to get clock\n");
1348 ret
= PTR_ERR(qspi
->clk
);
1349 goto qspi_probe_err
;
1352 ret
= clk_prepare_enable(qspi
->clk
);
1354 dev_err(dev
, "failed to prepare clock\n");
1355 goto qspi_probe_err
;
1358 qspi
->base_clk
= clk_get_rate(qspi
->clk
);
1359 qspi
->max_speed_hz
= qspi
->base_clk
/ (QSPI_SPBR_MIN
* 2);
1361 bcm_qspi_hw_init(qspi
);
1362 init_completion(&qspi
->mspi_done
);
1363 init_completion(&qspi
->bspi_done
);
1366 platform_set_drvdata(pdev
, qspi
);
1368 qspi
->xfer_mode
.width
= -1;
1369 qspi
->xfer_mode
.addrlen
= -1;
1370 qspi
->xfer_mode
.hp
= -1;
1372 ret
= devm_spi_register_master(&pdev
->dev
, master
);
1374 dev_err(dev
, "can't register master\n");
1381 bcm_qspi_hw_uninit(qspi
);
1382 clk_disable_unprepare(qspi
->clk
);
1384 kfree(qspi
->dev_ids
);
1386 spi_master_put(master
);
1389 /* probe function to be called by SoC specific platform driver probe */
1390 EXPORT_SYMBOL_GPL(bcm_qspi_probe
);
1392 int bcm_qspi_remove(struct platform_device
*pdev
)
1394 struct bcm_qspi
*qspi
= platform_get_drvdata(pdev
);
1396 bcm_qspi_hw_uninit(qspi
);
1397 clk_disable_unprepare(qspi
->clk
);
1398 kfree(qspi
->dev_ids
);
1399 spi_unregister_master(qspi
->master
);
1403 /* function to be called by SoC specific platform driver remove() */
1404 EXPORT_SYMBOL_GPL(bcm_qspi_remove
);
1406 static int __maybe_unused
bcm_qspi_suspend(struct device
*dev
)
1408 struct bcm_qspi
*qspi
= dev_get_drvdata(dev
);
1410 /* store the override strap value */
1411 if (!bcm_qspi_bspi_ver_three(qspi
))
1412 qspi
->s3_strap_override_ctrl
=
1413 bcm_qspi_read(qspi
, BSPI
, BSPI_STRAP_OVERRIDE_CTRL
);
1415 spi_master_suspend(qspi
->master
);
1416 clk_disable(qspi
->clk
);
1417 bcm_qspi_hw_uninit(qspi
);
1422 static int __maybe_unused
bcm_qspi_resume(struct device
*dev
)
1424 struct bcm_qspi
*qspi
= dev_get_drvdata(dev
);
1427 bcm_qspi_hw_init(qspi
);
1428 bcm_qspi_chip_select(qspi
, qspi
->curr_cs
);
1430 /* enable MSPI interrupt */
1431 qspi
->soc_intc
->bcm_qspi_int_set(qspi
->soc_intc
, MSPI_DONE
,
1434 ret
= clk_enable(qspi
->clk
);
1436 spi_master_resume(qspi
->master
);
1441 SIMPLE_DEV_PM_OPS(bcm_qspi_pm_ops
, bcm_qspi_suspend
, bcm_qspi_resume
);
1443 /* pm_ops to be called by SoC specific platform driver */
1444 EXPORT_SYMBOL_GPL(bcm_qspi_pm_ops
);
1446 MODULE_AUTHOR("Kamal Dasu");
1447 MODULE_DESCRIPTION("Broadcom QSPI driver");
1448 MODULE_LICENSE("GPL v2");
1449 MODULE_ALIAS("platform:" DRIVER_NAME
);