1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel PCH/PCU SPI flash driver.
5 * Copyright (C) 2016 - 2022, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
12 #include <linux/mtd/partitions.h>
13 #include <linux/mtd/spi-nor.h>
15 #include <linux/spi/flash.h>
16 #include <linux/spi/spi.h>
17 #include <linux/spi/spi-mem.h>
19 #include "spi-intel.h"
21 /* Offsets are from @ispi->base */
24 #define HSFSTS_CTL 0x04
25 #define HSFSTS_CTL_FSMIE BIT(31)
26 #define HSFSTS_CTL_FDBC_SHIFT 24
27 #define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT)
29 #define HSFSTS_CTL_FCYCLE_SHIFT 17
30 #define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT)
31 /* HW sequencer opcodes */
32 #define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT)
33 #define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT)
34 #define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT)
35 #define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT)
36 #define HSFSTS_CTL_FCYCLE_RDSFDP (0x05 << HSFSTS_CTL_FCYCLE_SHIFT)
37 #define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT)
38 #define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT)
39 #define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT)
41 #define HSFSTS_CTL_FGO BIT(16)
42 #define HSFSTS_CTL_FLOCKDN BIT(15)
43 #define HSFSTS_CTL_FDV BIT(14)
44 #define HSFSTS_CTL_SCIP BIT(5)
45 #define HSFSTS_CTL_AEL BIT(2)
46 #define HSFSTS_CTL_FCERR BIT(1)
47 #define HSFSTS_CTL_FDONE BIT(0)
51 #define FDATA(n) (0x10 + ((n) * 4))
55 #define FREG(n) (0x54 + ((n) * 4))
56 #define FREG_BASE_MASK GENMASK(14, 0)
57 #define FREG_LIMIT_SHIFT 16
58 #define FREG_LIMIT_MASK GENMASK(30, 16)
60 /* Offset is from @ispi->pregs */
61 #define PR(n) ((n) * 4)
62 #define PR_WPE BIT(31)
63 #define PR_LIMIT_SHIFT 16
64 #define PR_LIMIT_MASK GENMASK(30, 16)
65 #define PR_RPE BIT(15)
66 #define PR_BASE_MASK GENMASK(14, 0)
68 /* Offsets are from @ispi->sregs */
69 #define SSFSTS_CTL 0x00
70 #define SSFSTS_CTL_FSMIE BIT(23)
71 #define SSFSTS_CTL_DS BIT(22)
72 #define SSFSTS_CTL_DBC_SHIFT 16
73 #define SSFSTS_CTL_SPOP BIT(11)
74 #define SSFSTS_CTL_ACS BIT(10)
75 #define SSFSTS_CTL_SCGO BIT(9)
76 #define SSFSTS_CTL_COP_SHIFT 12
77 #define SSFSTS_CTL_FRS BIT(7)
78 #define SSFSTS_CTL_DOFRS BIT(6)
79 #define SSFSTS_CTL_AEL BIT(4)
80 #define SSFSTS_CTL_FCERR BIT(3)
81 #define SSFSTS_CTL_FDONE BIT(2)
82 #define SSFSTS_CTL_SCIP BIT(0)
84 #define PREOP_OPTYPE 0x04
88 #define OPTYPE_READ_NO_ADDR 0
89 #define OPTYPE_WRITE_NO_ADDR 1
90 #define OPTYPE_READ_WITH_ADDR 2
91 #define OPTYPE_WRITE_WITH_ADDR 3
95 #define BYT_SSFSTS_CTL 0x90
96 #define BYT_FREG_NUM 5
100 #define LPT_SSFSTS_CTL 0x90
101 #define LPT_FREG_NUM 5
105 #define BXT_SSFSTS_CTL 0xa0
106 #define BXT_FREG_NUM 12
110 #define CNL_FREG_NUM 6
115 #define ERASE_OPCODE_SHIFT 8
116 #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT)
117 #define ERASE_64K_OPCODE_SHIFT 16
118 #define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT)
120 /* Flash descriptor fields */
121 #define FLVALSIG_MAGIC 0x0ff0a55a
122 #define FLMAP0_NC_MASK GENMASK(9, 8)
123 #define FLMAP0_NC_SHIFT 8
124 #define FLMAP0_FCBA_MASK GENMASK(7, 0)
126 #define FLCOMP_C0DEN_MASK GENMASK(3, 0)
127 #define FLCOMP_C0DEN_512K 0x00
128 #define FLCOMP_C0DEN_1M 0x01
129 #define FLCOMP_C0DEN_2M 0x02
130 #define FLCOMP_C0DEN_4M 0x03
131 #define FLCOMP_C0DEN_8M 0x04
132 #define FLCOMP_C0DEN_16M 0x05
133 #define FLCOMP_C0DEN_32M 0x06
134 #define FLCOMP_C0DEN_64M 0x07
136 #define INTEL_SPI_TIMEOUT 5000 /* ms */
137 #define INTEL_SPI_FIFO_SZ 64
140 * struct intel_spi - Driver private data
141 * @dev: Device pointer
142 * @info: Pointer to board specific info
143 * @base: Beginning of MMIO space
144 * @pregs: Start of protection registers
145 * @sregs: Start of software sequencer registers
146 * @host: Pointer to the SPI controller structure
147 * @nregions: Maximum number of regions
148 * @pr_num: Maximum number of protected range registers
149 * @chip0_size: Size of the first flash chip in bytes
150 * @locked: Is SPI setting locked
151 * @protected: Whether the regions are write protected
152 * @bios_locked: Is BIOS region locked
153 * @swseq_reg: Use SW sequencer in register reads/writes
154 * @swseq_erase: Use SW sequencer in erase operation
155 * @atomic_preopcode: Holds preopcode when atomic sequence is requested
156 * @opcodes: Opcodes which are supported. This are programmed by BIOS
157 * before it locks down the controller.
158 * @mem_ops: Pointer to SPI MEM ops supported by the controller
162 const struct intel_spi_boardinfo
*info
;
166 struct spi_controller
*host
;
177 const struct intel_spi_mem_op
*mem_ops
;
180 struct intel_spi_mem_op
{
181 struct spi_mem_op mem_op
;
183 int (*exec_op
)(struct intel_spi
*ispi
,
184 const struct spi_mem
*mem
,
185 const struct intel_spi_mem_op
*iop
,
186 const struct spi_mem_op
*op
);
189 static bool writeable
;
190 module_param(writeable
, bool, 0);
191 MODULE_PARM_DESC(writeable
, "Enable write access to SPI flash chip (default=0)");
193 static void intel_spi_dump_regs(struct intel_spi
*ispi
)
198 dev_dbg(ispi
->dev
, "BFPREG=0x%08x\n", readl(ispi
->base
+ BFPREG
));
200 value
= readl(ispi
->base
+ HSFSTS_CTL
);
201 dev_dbg(ispi
->dev
, "HSFSTS_CTL=0x%08x\n", value
);
202 if (value
& HSFSTS_CTL_FLOCKDN
)
203 dev_dbg(ispi
->dev
, "-> Locked\n");
205 dev_dbg(ispi
->dev
, "FADDR=0x%08x\n", readl(ispi
->base
+ FADDR
));
206 dev_dbg(ispi
->dev
, "DLOCK=0x%08x\n", readl(ispi
->base
+ DLOCK
));
208 for (i
= 0; i
< 16; i
++)
209 dev_dbg(ispi
->dev
, "FDATA(%d)=0x%08x\n",
210 i
, readl(ispi
->base
+ FDATA(i
)));
212 dev_dbg(ispi
->dev
, "FRACC=0x%08x\n", readl(ispi
->base
+ FRACC
));
214 for (i
= 0; i
< ispi
->nregions
; i
++)
215 dev_dbg(ispi
->dev
, "FREG(%d)=0x%08x\n", i
,
216 readl(ispi
->base
+ FREG(i
)));
217 for (i
= 0; i
< ispi
->pr_num
; i
++)
218 dev_dbg(ispi
->dev
, "PR(%d)=0x%08x\n", i
,
219 readl(ispi
->pregs
+ PR(i
)));
222 value
= readl(ispi
->sregs
+ SSFSTS_CTL
);
223 dev_dbg(ispi
->dev
, "SSFSTS_CTL=0x%08x\n", value
);
224 dev_dbg(ispi
->dev
, "PREOP_OPTYPE=0x%08x\n",
225 readl(ispi
->sregs
+ PREOP_OPTYPE
));
226 dev_dbg(ispi
->dev
, "OPMENU0=0x%08x\n",
227 readl(ispi
->sregs
+ OPMENU0
));
228 dev_dbg(ispi
->dev
, "OPMENU1=0x%08x\n",
229 readl(ispi
->sregs
+ OPMENU1
));
232 dev_dbg(ispi
->dev
, "LVSCC=0x%08x\n", readl(ispi
->base
+ LVSCC
));
233 dev_dbg(ispi
->dev
, "UVSCC=0x%08x\n", readl(ispi
->base
+ UVSCC
));
235 dev_dbg(ispi
->dev
, "Protected regions:\n");
236 for (i
= 0; i
< ispi
->pr_num
; i
++) {
239 value
= readl(ispi
->pregs
+ PR(i
));
240 if (!(value
& (PR_WPE
| PR_RPE
)))
243 limit
= (value
& PR_LIMIT_MASK
) >> PR_LIMIT_SHIFT
;
244 base
= value
& PR_BASE_MASK
;
246 dev_dbg(ispi
->dev
, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n",
247 i
, base
<< 12, (limit
<< 12) | 0xfff,
248 value
& PR_WPE
? 'W' : '.', value
& PR_RPE
? 'R' : '.');
251 dev_dbg(ispi
->dev
, "Flash regions:\n");
252 for (i
= 0; i
< ispi
->nregions
; i
++) {
253 u32 region
, base
, limit
;
255 region
= readl(ispi
->base
+ FREG(i
));
256 base
= region
& FREG_BASE_MASK
;
257 limit
= (region
& FREG_LIMIT_MASK
) >> FREG_LIMIT_SHIFT
;
259 if (base
>= limit
|| (i
> 0 && limit
== 0))
260 dev_dbg(ispi
->dev
, " %02d disabled\n", i
);
262 dev_dbg(ispi
->dev
, " %02d base: 0x%08x limit: 0x%08x\n",
263 i
, base
<< 12, (limit
<< 12) | 0xfff);
266 dev_dbg(ispi
->dev
, "Using %cW sequencer for register access\n",
267 ispi
->swseq_reg
? 'S' : 'H');
268 dev_dbg(ispi
->dev
, "Using %cW sequencer for erase operation\n",
269 ispi
->swseq_erase
? 'S' : 'H');
272 /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */
273 static int intel_spi_read_block(struct intel_spi
*ispi
, void *buf
, size_t size
)
278 if (size
> INTEL_SPI_FIFO_SZ
)
282 bytes
= min_t(size_t, size
, 4);
283 memcpy_fromio(buf
, ispi
->base
+ FDATA(i
), bytes
);
292 /* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */
293 static int intel_spi_write_block(struct intel_spi
*ispi
, const void *buf
,
299 if (size
> INTEL_SPI_FIFO_SZ
)
303 bytes
= min_t(size_t, size
, 4);
304 memcpy_toio(ispi
->base
+ FDATA(i
), buf
, bytes
);
313 static int intel_spi_wait_hw_busy(struct intel_spi
*ispi
)
317 return readl_poll_timeout(ispi
->base
+ HSFSTS_CTL
, val
,
318 !(val
& HSFSTS_CTL_SCIP
), 0,
319 INTEL_SPI_TIMEOUT
* 1000);
322 static int intel_spi_wait_sw_busy(struct intel_spi
*ispi
)
326 return readl_poll_timeout(ispi
->sregs
+ SSFSTS_CTL
, val
,
327 !(val
& SSFSTS_CTL_SCIP
), 0,
328 INTEL_SPI_TIMEOUT
* 1000);
331 static bool intel_spi_set_writeable(struct intel_spi
*ispi
)
333 if (!ispi
->info
->set_writeable
)
336 return ispi
->info
->set_writeable(ispi
->base
, ispi
->info
->data
);
339 static int intel_spi_opcode_index(struct intel_spi
*ispi
, u8 opcode
, int optype
)
345 for (i
= 0; i
< ARRAY_SIZE(ispi
->opcodes
); i
++)
346 if (ispi
->opcodes
[i
] == opcode
)
352 /* The lock is off, so just use index 0 */
353 writel(opcode
, ispi
->sregs
+ OPMENU0
);
354 preop
= readw(ispi
->sregs
+ PREOP_OPTYPE
);
355 writel(optype
<< 16 | preop
, ispi
->sregs
+ PREOP_OPTYPE
);
360 static int intel_spi_hw_cycle(struct intel_spi
*ispi
,
361 const struct intel_spi_mem_op
*iop
, size_t len
)
366 if (!iop
->replacement_op
)
369 val
= readl(ispi
->base
+ HSFSTS_CTL
);
370 val
&= ~(HSFSTS_CTL_FCYCLE_MASK
| HSFSTS_CTL_FDBC_MASK
);
371 val
|= (len
- 1) << HSFSTS_CTL_FDBC_SHIFT
;
372 val
|= HSFSTS_CTL_FCERR
| HSFSTS_CTL_FDONE
;
373 val
|= HSFSTS_CTL_FGO
;
374 val
|= iop
->replacement_op
;
375 writel(val
, ispi
->base
+ HSFSTS_CTL
);
377 ret
= intel_spi_wait_hw_busy(ispi
);
381 status
= readl(ispi
->base
+ HSFSTS_CTL
);
382 if (status
& HSFSTS_CTL_FCERR
)
384 else if (status
& HSFSTS_CTL_AEL
)
390 static int intel_spi_sw_cycle(struct intel_spi
*ispi
, u8 opcode
, size_t len
,
397 ret
= intel_spi_opcode_index(ispi
, opcode
, optype
);
402 * Always clear it after each SW sequencer operation regardless
403 * of whether it is successful or not.
405 atomic_preopcode
= ispi
->atomic_preopcode
;
406 ispi
->atomic_preopcode
= 0;
408 /* Only mark 'Data Cycle' bit when there is data to be transferred */
410 val
= ((len
- 1) << SSFSTS_CTL_DBC_SHIFT
) | SSFSTS_CTL_DS
;
411 val
|= ret
<< SSFSTS_CTL_COP_SHIFT
;
412 val
|= SSFSTS_CTL_FCERR
| SSFSTS_CTL_FDONE
;
413 val
|= SSFSTS_CTL_SCGO
;
414 if (atomic_preopcode
) {
418 case OPTYPE_WRITE_NO_ADDR
:
419 case OPTYPE_WRITE_WITH_ADDR
:
420 /* Pick matching preopcode for the atomic sequence */
421 preop
= readw(ispi
->sregs
+ PREOP_OPTYPE
);
422 if ((preop
& 0xff) == atomic_preopcode
)
424 else if ((preop
>> 8) == atomic_preopcode
)
425 val
|= SSFSTS_CTL_SPOP
;
429 /* Enable atomic sequence */
430 val
|= SSFSTS_CTL_ACS
;
437 writel(val
, ispi
->sregs
+ SSFSTS_CTL
);
439 ret
= intel_spi_wait_sw_busy(ispi
);
443 status
= readl(ispi
->sregs
+ SSFSTS_CTL
);
444 if (status
& SSFSTS_CTL_FCERR
)
446 else if (status
& SSFSTS_CTL_AEL
)
452 static u32
intel_spi_chip_addr(const struct intel_spi
*ispi
,
453 const struct spi_mem
*mem
)
455 /* Pick up the correct start address */
458 return (spi_get_chipselect(mem
->spi
, 0) == 1) ? ispi
->chip0_size
: 0;
461 static int intel_spi_read_reg(struct intel_spi
*ispi
, const struct spi_mem
*mem
,
462 const struct intel_spi_mem_op
*iop
,
463 const struct spi_mem_op
*op
)
465 u32 addr
= intel_spi_chip_addr(ispi
, mem
) + op
->addr
.val
;
466 size_t nbytes
= op
->data
.nbytes
;
467 u8 opcode
= op
->cmd
.opcode
;
470 writel(addr
, ispi
->base
+ FADDR
);
473 ret
= intel_spi_sw_cycle(ispi
, opcode
, nbytes
,
474 OPTYPE_READ_NO_ADDR
);
476 ret
= intel_spi_hw_cycle(ispi
, iop
, nbytes
);
481 return intel_spi_read_block(ispi
, op
->data
.buf
.in
, nbytes
);
484 static int intel_spi_write_reg(struct intel_spi
*ispi
, const struct spi_mem
*mem
,
485 const struct intel_spi_mem_op
*iop
,
486 const struct spi_mem_op
*op
)
488 u32 addr
= intel_spi_chip_addr(ispi
, mem
) + op
->addr
.val
;
489 size_t nbytes
= op
->data
.nbytes
;
490 u8 opcode
= op
->cmd
.opcode
;
494 * This is handled with atomic operation and preop code in Intel
495 * controller so we only verify that it is available. If the
496 * controller is not locked, program the opcode to the PREOP
497 * register for later use.
499 * When hardware sequencer is used there is no need to program
500 * any opcodes (it handles them automatically as part of a command).
502 if (opcode
== SPINOR_OP_WREN
) {
505 if (!ispi
->swseq_reg
)
508 preop
= readw(ispi
->sregs
+ PREOP_OPTYPE
);
509 if ((preop
& 0xff) != opcode
&& (preop
>> 8) != opcode
) {
512 writel(opcode
, ispi
->sregs
+ PREOP_OPTYPE
);
516 * This enables atomic sequence on next SW sycle. Will
517 * be cleared after next operation.
519 ispi
->atomic_preopcode
= opcode
;
524 * We hope that HW sequencer will do the right thing automatically and
525 * with the SW sequencer we cannot use preopcode anyway, so just ignore
526 * the Write Disable operation and pretend it was completed
529 if (opcode
== SPINOR_OP_WRDI
)
532 writel(addr
, ispi
->base
+ FADDR
);
534 /* Write the value beforehand */
535 ret
= intel_spi_write_block(ispi
, op
->data
.buf
.out
, nbytes
);
540 return intel_spi_sw_cycle(ispi
, opcode
, nbytes
,
541 OPTYPE_WRITE_NO_ADDR
);
542 return intel_spi_hw_cycle(ispi
, iop
, nbytes
);
545 static int intel_spi_read(struct intel_spi
*ispi
, const struct spi_mem
*mem
,
546 const struct intel_spi_mem_op
*iop
,
547 const struct spi_mem_op
*op
)
549 u32 addr
= intel_spi_chip_addr(ispi
, mem
) + op
->addr
.val
;
550 size_t block_size
, nbytes
= op
->data
.nbytes
;
551 void *read_buf
= op
->data
.buf
.in
;
556 * Atomic sequence is not expected with HW sequencer reads. Make
557 * sure it is cleared regardless.
559 if (WARN_ON_ONCE(ispi
->atomic_preopcode
))
560 ispi
->atomic_preopcode
= 0;
563 block_size
= min_t(size_t, nbytes
, INTEL_SPI_FIFO_SZ
);
565 /* Read cannot cross 4K boundary */
566 block_size
= min_t(loff_t
, addr
+ block_size
,
567 round_up(addr
+ 1, SZ_4K
)) - addr
;
569 writel(addr
, ispi
->base
+ FADDR
);
571 val
= readl(ispi
->base
+ HSFSTS_CTL
);
572 val
&= ~(HSFSTS_CTL_FDBC_MASK
| HSFSTS_CTL_FCYCLE_MASK
);
573 val
|= HSFSTS_CTL_AEL
| HSFSTS_CTL_FCERR
| HSFSTS_CTL_FDONE
;
574 val
|= (block_size
- 1) << HSFSTS_CTL_FDBC_SHIFT
;
575 val
|= HSFSTS_CTL_FCYCLE_READ
;
576 val
|= HSFSTS_CTL_FGO
;
577 writel(val
, ispi
->base
+ HSFSTS_CTL
);
579 ret
= intel_spi_wait_hw_busy(ispi
);
583 status
= readl(ispi
->base
+ HSFSTS_CTL
);
584 if (status
& HSFSTS_CTL_FCERR
)
586 else if (status
& HSFSTS_CTL_AEL
)
590 dev_err(ispi
->dev
, "read error: %x: %#x\n", addr
, status
);
594 ret
= intel_spi_read_block(ispi
, read_buf
, block_size
);
598 nbytes
-= block_size
;
600 read_buf
+= block_size
;
606 static int intel_spi_write(struct intel_spi
*ispi
, const struct spi_mem
*mem
,
607 const struct intel_spi_mem_op
*iop
,
608 const struct spi_mem_op
*op
)
610 u32 addr
= intel_spi_chip_addr(ispi
, mem
) + op
->addr
.val
;
611 size_t block_size
, nbytes
= op
->data
.nbytes
;
612 const void *write_buf
= op
->data
.buf
.out
;
616 /* Not needed with HW sequencer write, make sure it is cleared */
617 ispi
->atomic_preopcode
= 0;
620 block_size
= min_t(size_t, nbytes
, INTEL_SPI_FIFO_SZ
);
622 /* Write cannot cross 4K boundary */
623 block_size
= min_t(loff_t
, addr
+ block_size
,
624 round_up(addr
+ 1, SZ_4K
)) - addr
;
626 writel(addr
, ispi
->base
+ FADDR
);
628 val
= readl(ispi
->base
+ HSFSTS_CTL
);
629 val
&= ~(HSFSTS_CTL_FDBC_MASK
| HSFSTS_CTL_FCYCLE_MASK
);
630 val
|= HSFSTS_CTL_AEL
| HSFSTS_CTL_FCERR
| HSFSTS_CTL_FDONE
;
631 val
|= (block_size
- 1) << HSFSTS_CTL_FDBC_SHIFT
;
632 val
|= HSFSTS_CTL_FCYCLE_WRITE
;
634 ret
= intel_spi_write_block(ispi
, write_buf
, block_size
);
636 dev_err(ispi
->dev
, "failed to write block\n");
640 /* Start the write now */
641 val
|= HSFSTS_CTL_FGO
;
642 writel(val
, ispi
->base
+ HSFSTS_CTL
);
644 ret
= intel_spi_wait_hw_busy(ispi
);
646 dev_err(ispi
->dev
, "timeout\n");
650 status
= readl(ispi
->base
+ HSFSTS_CTL
);
651 if (status
& HSFSTS_CTL_FCERR
)
653 else if (status
& HSFSTS_CTL_AEL
)
657 dev_err(ispi
->dev
, "write error: %x: %#x\n", addr
, status
);
661 nbytes
-= block_size
;
663 write_buf
+= block_size
;
669 static int intel_spi_erase(struct intel_spi
*ispi
, const struct spi_mem
*mem
,
670 const struct intel_spi_mem_op
*iop
,
671 const struct spi_mem_op
*op
)
673 u32 addr
= intel_spi_chip_addr(ispi
, mem
) + op
->addr
.val
;
674 u8 opcode
= op
->cmd
.opcode
;
678 writel(addr
, ispi
->base
+ FADDR
);
680 if (ispi
->swseq_erase
)
681 return intel_spi_sw_cycle(ispi
, opcode
, 0,
682 OPTYPE_WRITE_WITH_ADDR
);
684 /* Not needed with HW sequencer erase, make sure it is cleared */
685 ispi
->atomic_preopcode
= 0;
687 val
= readl(ispi
->base
+ HSFSTS_CTL
);
688 val
&= ~(HSFSTS_CTL_FDBC_MASK
| HSFSTS_CTL_FCYCLE_MASK
);
689 val
|= HSFSTS_CTL_AEL
| HSFSTS_CTL_FCERR
| HSFSTS_CTL_FDONE
;
690 val
|= HSFSTS_CTL_FGO
;
691 val
|= iop
->replacement_op
;
692 writel(val
, ispi
->base
+ HSFSTS_CTL
);
694 ret
= intel_spi_wait_hw_busy(ispi
);
698 status
= readl(ispi
->base
+ HSFSTS_CTL
);
699 if (status
& HSFSTS_CTL_FCERR
)
701 if (status
& HSFSTS_CTL_AEL
)
707 static int intel_spi_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
709 op
->data
.nbytes
= clamp_val(op
->data
.nbytes
, 0, INTEL_SPI_FIFO_SZ
);
713 static bool intel_spi_cmp_mem_op(const struct intel_spi_mem_op
*iop
,
714 const struct spi_mem_op
*op
)
716 if (iop
->mem_op
.cmd
.nbytes
!= op
->cmd
.nbytes
||
717 iop
->mem_op
.cmd
.buswidth
!= op
->cmd
.buswidth
||
718 iop
->mem_op
.cmd
.dtr
!= op
->cmd
.dtr
)
721 if (iop
->mem_op
.addr
.nbytes
!= op
->addr
.nbytes
||
722 iop
->mem_op
.addr
.dtr
!= op
->addr
.dtr
)
725 if (iop
->mem_op
.data
.dir
!= op
->data
.dir
||
726 iop
->mem_op
.data
.dtr
!= op
->data
.dtr
)
729 if (iop
->mem_op
.data
.dir
!= SPI_MEM_NO_DATA
) {
730 if (iop
->mem_op
.data
.buswidth
!= op
->data
.buswidth
)
737 static const struct intel_spi_mem_op
*
738 intel_spi_match_mem_op(struct intel_spi
*ispi
, const struct spi_mem_op
*op
)
740 const struct intel_spi_mem_op
*iop
;
742 for (iop
= ispi
->mem_ops
; iop
->mem_op
.cmd
.opcode
; iop
++) {
743 if (iop
->mem_op
.cmd
.opcode
== op
->cmd
.opcode
&&
744 intel_spi_cmp_mem_op(iop
, op
))
751 static bool intel_spi_supports_mem_op(struct spi_mem
*mem
,
752 const struct spi_mem_op
*op
)
754 struct intel_spi
*ispi
= spi_controller_get_devdata(mem
->spi
->controller
);
755 const struct intel_spi_mem_op
*iop
;
757 iop
= intel_spi_match_mem_op(ispi
, op
);
759 dev_dbg(ispi
->dev
, "%#x not supported\n", op
->cmd
.opcode
);
764 * For software sequencer check that the opcode is actually
765 * present in the opmenu if it is locked.
767 if (ispi
->swseq_reg
&& ispi
->locked
) {
770 /* Check if it is in the locked opcodes list */
771 for (i
= 0; i
< ARRAY_SIZE(ispi
->opcodes
); i
++) {
772 if (ispi
->opcodes
[i
] == op
->cmd
.opcode
)
776 dev_dbg(ispi
->dev
, "%#x not supported\n", op
->cmd
.opcode
);
783 static int intel_spi_exec_mem_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
785 struct intel_spi
*ispi
= spi_controller_get_devdata(mem
->spi
->controller
);
786 const struct intel_spi_mem_op
*iop
;
788 iop
= intel_spi_match_mem_op(ispi
, op
);
792 return iop
->exec_op(ispi
, mem
, iop
, op
);
795 static const char *intel_spi_get_name(struct spi_mem
*mem
)
797 const struct intel_spi
*ispi
= spi_controller_get_devdata(mem
->spi
->controller
);
800 * Return name of the flash controller device to be compatible
801 * with the MTD version.
803 return dev_name(ispi
->dev
);
806 static int intel_spi_dirmap_create(struct spi_mem_dirmap_desc
*desc
)
808 struct intel_spi
*ispi
= spi_controller_get_devdata(desc
->mem
->spi
->controller
);
809 const struct intel_spi_mem_op
*iop
;
811 iop
= intel_spi_match_mem_op(ispi
, &desc
->info
.op_tmpl
);
815 desc
->priv
= (void *)iop
;
819 static ssize_t
intel_spi_dirmap_read(struct spi_mem_dirmap_desc
*desc
, u64 offs
,
820 size_t len
, void *buf
)
822 struct intel_spi
*ispi
= spi_controller_get_devdata(desc
->mem
->spi
->controller
);
823 const struct intel_spi_mem_op
*iop
= desc
->priv
;
824 struct spi_mem_op op
= desc
->info
.op_tmpl
;
827 /* Fill in the gaps */
829 op
.data
.nbytes
= len
;
830 op
.data
.buf
.in
= buf
;
832 ret
= iop
->exec_op(ispi
, desc
->mem
, iop
, &op
);
833 return ret
? ret
: len
;
836 static ssize_t
intel_spi_dirmap_write(struct spi_mem_dirmap_desc
*desc
, u64 offs
,
837 size_t len
, const void *buf
)
839 struct intel_spi
*ispi
= spi_controller_get_devdata(desc
->mem
->spi
->controller
);
840 const struct intel_spi_mem_op
*iop
= desc
->priv
;
841 struct spi_mem_op op
= desc
->info
.op_tmpl
;
845 op
.data
.nbytes
= len
;
846 op
.data
.buf
.out
= buf
;
848 ret
= iop
->exec_op(ispi
, desc
->mem
, iop
, &op
);
849 return ret
? ret
: len
;
852 static const struct spi_controller_mem_ops intel_spi_mem_ops
= {
853 .adjust_op_size
= intel_spi_adjust_op_size
,
854 .supports_op
= intel_spi_supports_mem_op
,
855 .exec_op
= intel_spi_exec_mem_op
,
856 .get_name
= intel_spi_get_name
,
857 .dirmap_create
= intel_spi_dirmap_create
,
858 .dirmap_read
= intel_spi_dirmap_read
,
859 .dirmap_write
= intel_spi_dirmap_write
,
862 #define INTEL_SPI_OP_ADDR(__nbytes) \
864 .nbytes = __nbytes, \
867 #define INTEL_SPI_OP_NO_DATA \
869 .dir = SPI_MEM_NO_DATA, \
872 #define INTEL_SPI_OP_DATA_IN(__buswidth) \
874 .dir = SPI_MEM_DATA_IN, \
875 .buswidth = __buswidth, \
878 #define INTEL_SPI_OP_DATA_OUT(__buswidth) \
880 .dir = SPI_MEM_DATA_OUT, \
881 .buswidth = __buswidth, \
884 #define INTEL_SPI_MEM_OP(__cmd, __addr, __data, __exec_op) \
891 .exec_op = __exec_op, \
894 #define INTEL_SPI_MEM_OP_REPL(__cmd, __addr, __data, __exec_op, __repl) \
901 .exec_op = __exec_op, \
902 .replacement_op = __repl, \
906 * The controller handles pretty much everything internally based on the
907 * SFDP data but we want to make sure we only support the operations
908 * actually possible. Only check buswidth and transfer direction, the
909 * core validates data.
911 #define INTEL_SPI_GENERIC_OPS \
912 /* Status register operations */ \
913 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), \
914 SPI_MEM_OP_NO_ADDR, \
915 INTEL_SPI_OP_DATA_IN(1), \
916 intel_spi_read_reg, \
917 HSFSTS_CTL_FCYCLE_RDID), \
918 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1), \
919 SPI_MEM_OP_NO_ADDR, \
920 INTEL_SPI_OP_DATA_IN(1), \
921 intel_spi_read_reg, \
922 HSFSTS_CTL_FCYCLE_RDSR), \
923 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1), \
924 SPI_MEM_OP_NO_ADDR, \
925 INTEL_SPI_OP_DATA_OUT(1), \
926 intel_spi_write_reg, \
927 HSFSTS_CTL_FCYCLE_WRSR), \
928 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_RDSFDP, 1), \
929 INTEL_SPI_OP_ADDR(3), \
930 INTEL_SPI_OP_DATA_IN(1), \
931 intel_spi_read_reg, \
932 HSFSTS_CTL_FCYCLE_RDSFDP), \
934 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
935 INTEL_SPI_OP_ADDR(3), \
936 INTEL_SPI_OP_DATA_IN(1), \
938 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
939 INTEL_SPI_OP_ADDR(3), \
940 INTEL_SPI_OP_DATA_IN(2), \
942 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
943 INTEL_SPI_OP_ADDR(3), \
944 INTEL_SPI_OP_DATA_IN(4), \
946 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
947 INTEL_SPI_OP_ADDR(4), \
948 INTEL_SPI_OP_DATA_IN(1), \
950 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
951 INTEL_SPI_OP_ADDR(4), \
952 INTEL_SPI_OP_DATA_IN(2), \
954 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ, 1), \
955 INTEL_SPI_OP_ADDR(4), \
956 INTEL_SPI_OP_DATA_IN(4), \
959 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
960 INTEL_SPI_OP_ADDR(3), \
961 INTEL_SPI_OP_DATA_IN(1), \
963 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
964 INTEL_SPI_OP_ADDR(3), \
965 INTEL_SPI_OP_DATA_IN(2), \
967 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
968 INTEL_SPI_OP_ADDR(3), \
969 INTEL_SPI_OP_DATA_IN(4), \
971 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
972 INTEL_SPI_OP_ADDR(4), \
973 INTEL_SPI_OP_DATA_IN(1), \
975 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
976 INTEL_SPI_OP_ADDR(4), \
977 INTEL_SPI_OP_DATA_IN(2), \
979 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST, 1), \
980 INTEL_SPI_OP_ADDR(4), \
981 INTEL_SPI_OP_DATA_IN(4), \
983 /* Read with 4-byte address opcode */ \
984 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
985 INTEL_SPI_OP_ADDR(4), \
986 INTEL_SPI_OP_DATA_IN(1), \
988 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
989 INTEL_SPI_OP_ADDR(4), \
990 INTEL_SPI_OP_DATA_IN(2), \
992 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_4B, 1), \
993 INTEL_SPI_OP_ADDR(4), \
994 INTEL_SPI_OP_DATA_IN(4), \
996 /* Fast read with 4-byte address opcode */ \
997 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
998 INTEL_SPI_OP_ADDR(4), \
999 INTEL_SPI_OP_DATA_IN(1), \
1001 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
1002 INTEL_SPI_OP_ADDR(4), \
1003 INTEL_SPI_OP_DATA_IN(2), \
1005 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ_FAST_4B, 1), \
1006 INTEL_SPI_OP_ADDR(4), \
1007 INTEL_SPI_OP_DATA_IN(4), \
1009 /* Write operations */ \
1010 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
1011 INTEL_SPI_OP_ADDR(3), \
1012 INTEL_SPI_OP_DATA_OUT(1), \
1014 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP, 1), \
1015 INTEL_SPI_OP_ADDR(4), \
1016 INTEL_SPI_OP_DATA_OUT(1), \
1018 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_PP_4B, 1), \
1019 INTEL_SPI_OP_ADDR(4), \
1020 INTEL_SPI_OP_DATA_OUT(1), \
1022 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1), \
1023 SPI_MEM_OP_NO_ADDR, \
1024 SPI_MEM_OP_NO_DATA, \
1025 intel_spi_write_reg), \
1026 INTEL_SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1), \
1027 SPI_MEM_OP_NO_ADDR, \
1028 SPI_MEM_OP_NO_DATA, \
1029 intel_spi_write_reg), \
1030 /* Erase operations */ \
1031 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
1032 INTEL_SPI_OP_ADDR(3), \
1033 SPI_MEM_OP_NO_DATA, \
1035 HSFSTS_CTL_FCYCLE_ERASE), \
1036 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K, 1), \
1037 INTEL_SPI_OP_ADDR(4), \
1038 SPI_MEM_OP_NO_DATA, \
1040 HSFSTS_CTL_FCYCLE_ERASE), \
1041 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_BE_4K_4B, 1), \
1042 INTEL_SPI_OP_ADDR(4), \
1043 SPI_MEM_OP_NO_DATA, \
1045 HSFSTS_CTL_FCYCLE_ERASE) \
1047 static const struct intel_spi_mem_op generic_mem_ops[] = {
1048 INTEL_SPI_GENERIC_OPS
,
1052 static const struct intel_spi_mem_op erase_64k_mem_ops
[] = {
1053 INTEL_SPI_GENERIC_OPS
,
1054 /* 64k sector erase operations */
1055 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE
, 1),
1056 INTEL_SPI_OP_ADDR(3),
1059 HSFSTS_CTL_FCYCLE_ERASE_64K
),
1060 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE
, 1),
1061 INTEL_SPI_OP_ADDR(4),
1064 HSFSTS_CTL_FCYCLE_ERASE_64K
),
1065 INTEL_SPI_MEM_OP_REPL(SPI_MEM_OP_CMD(SPINOR_OP_SE_4B
, 1),
1066 INTEL_SPI_OP_ADDR(4),
1069 HSFSTS_CTL_FCYCLE_ERASE_64K
),
1073 static int intel_spi_init(struct intel_spi
*ispi
)
1075 u32 opmenu0
, opmenu1
, lvscc
, uvscc
, val
;
1076 bool erase_64k
= false;
1079 switch (ispi
->info
->type
) {
1081 ispi
->sregs
= ispi
->base
+ BYT_SSFSTS_CTL
;
1082 ispi
->pregs
= ispi
->base
+ BYT_PR
;
1083 ispi
->nregions
= BYT_FREG_NUM
;
1084 ispi
->pr_num
= BYT_PR_NUM
;
1085 ispi
->swseq_reg
= true;
1089 ispi
->sregs
= ispi
->base
+ LPT_SSFSTS_CTL
;
1090 ispi
->pregs
= ispi
->base
+ LPT_PR
;
1091 ispi
->nregions
= LPT_FREG_NUM
;
1092 ispi
->pr_num
= LPT_PR_NUM
;
1093 ispi
->swseq_reg
= true;
1097 ispi
->sregs
= ispi
->base
+ BXT_SSFSTS_CTL
;
1098 ispi
->pregs
= ispi
->base
+ BXT_PR
;
1099 ispi
->nregions
= BXT_FREG_NUM
;
1100 ispi
->pr_num
= BXT_PR_NUM
;
1106 ispi
->pregs
= ispi
->base
+ CNL_PR
;
1107 ispi
->nregions
= CNL_FREG_NUM
;
1108 ispi
->pr_num
= CNL_PR_NUM
;
1116 ispi
->bios_locked
= true;
1117 /* Try to disable BIOS write protection if user asked to do so */
1119 if (intel_spi_set_writeable(ispi
))
1120 ispi
->bios_locked
= false;
1122 dev_warn(ispi
->dev
, "can't disable chip write protection\n");
1125 /* Disable #SMI generation from HW sequencer */
1126 val
= readl(ispi
->base
+ HSFSTS_CTL
);
1127 val
&= ~HSFSTS_CTL_FSMIE
;
1128 writel(val
, ispi
->base
+ HSFSTS_CTL
);
1131 * Determine whether erase operation should use HW or SW sequencer.
1133 * The HW sequencer has a predefined list of opcodes, with only the
1134 * erase opcode being programmable in LVSCC and UVSCC registers.
1135 * If these registers don't contain a valid erase opcode, erase
1136 * cannot be done using HW sequencer.
1138 lvscc
= readl(ispi
->base
+ LVSCC
);
1139 uvscc
= readl(ispi
->base
+ UVSCC
);
1140 if (!(lvscc
& ERASE_OPCODE_MASK
) || !(uvscc
& ERASE_OPCODE_MASK
))
1141 ispi
->swseq_erase
= true;
1142 /* SPI controller on Intel BXT supports 64K erase opcode */
1143 if (ispi
->info
->type
== INTEL_SPI_BXT
&& !ispi
->swseq_erase
)
1144 if (!(lvscc
& ERASE_64K_OPCODE_MASK
) ||
1145 !(uvscc
& ERASE_64K_OPCODE_MASK
))
1148 if (!ispi
->sregs
&& (ispi
->swseq_reg
|| ispi
->swseq_erase
)) {
1149 dev_err(ispi
->dev
, "software sequencer not supported, but required\n");
1154 * Some controllers can only do basic operations using hardware
1155 * sequencer. All other operations are supposed to be carried out
1156 * using software sequencer.
1158 if (ispi
->swseq_reg
) {
1159 /* Disable #SMI generation from SW sequencer */
1160 val
= readl(ispi
->sregs
+ SSFSTS_CTL
);
1161 val
&= ~SSFSTS_CTL_FSMIE
;
1162 writel(val
, ispi
->sregs
+ SSFSTS_CTL
);
1165 /* Check controller's lock status */
1166 val
= readl(ispi
->base
+ HSFSTS_CTL
);
1167 ispi
->locked
= !!(val
& HSFSTS_CTL_FLOCKDN
);
1169 if (ispi
->locked
&& ispi
->sregs
) {
1171 * BIOS programs allowed opcodes and then locks down the
1172 * register. So read back what opcodes it decided to support.
1173 * That's the set we are going to support as well.
1175 opmenu0
= readl(ispi
->sregs
+ OPMENU0
);
1176 opmenu1
= readl(ispi
->sregs
+ OPMENU1
);
1178 if (opmenu0
&& opmenu1
) {
1179 for (i
= 0; i
< ARRAY_SIZE(ispi
->opcodes
) / 2; i
++) {
1180 ispi
->opcodes
[i
] = opmenu0
>> i
* 8;
1181 ispi
->opcodes
[i
+ 4] = opmenu1
>> i
* 8;
1187 dev_dbg(ispi
->dev
, "Using erase_64k memory operations");
1188 ispi
->mem_ops
= erase_64k_mem_ops
;
1190 dev_dbg(ispi
->dev
, "Using generic memory operations");
1191 ispi
->mem_ops
= generic_mem_ops
;
1194 intel_spi_dump_regs(ispi
);
1198 static bool intel_spi_is_protected(const struct intel_spi
*ispi
,
1199 unsigned int base
, unsigned int limit
)
1203 for (i
= 0; i
< ispi
->pr_num
; i
++) {
1204 u32 pr_base
, pr_limit
, pr_value
;
1206 pr_value
= readl(ispi
->pregs
+ PR(i
));
1207 if (!(pr_value
& (PR_WPE
| PR_RPE
)))
1210 pr_limit
= (pr_value
& PR_LIMIT_MASK
) >> PR_LIMIT_SHIFT
;
1211 pr_base
= pr_value
& PR_BASE_MASK
;
1213 if (pr_base
>= base
&& pr_limit
<= limit
)
1221 * There will be a single partition holding all enabled flash regions. We
1224 static void intel_spi_fill_partition(struct intel_spi
*ispi
,
1225 struct mtd_partition
*part
)
1230 memset(part
, 0, sizeof(*part
));
1232 /* Start from the mandatory descriptor region */
1234 part
->name
= "BIOS";
1237 * Now try to find where this partition ends based on the flash
1240 for (i
= 1; i
< ispi
->nregions
; i
++) {
1241 u32 region
, base
, limit
;
1243 region
= readl(ispi
->base
+ FREG(i
));
1244 base
= region
& FREG_BASE_MASK
;
1245 limit
= (region
& FREG_LIMIT_MASK
) >> FREG_LIMIT_SHIFT
;
1247 if (base
>= limit
|| limit
== 0)
1251 * If any of the regions have protection bits set, make the
1252 * whole partition read-only to be on the safe side.
1254 * Also if the user did not ask the chip to be writeable
1257 if (!writeable
|| intel_spi_is_protected(ispi
, base
, limit
)) {
1258 part
->mask_flags
|= MTD_WRITEABLE
;
1259 ispi
->protected = true;
1262 end
= (limit
<< 12) + 4096;
1263 if (end
> part
->size
)
1268 * Regions can refer to the second chip too so in this case we
1269 * just make the BIOS partition to occupy the whole chip.
1271 if (ispi
->chip0_size
&& part
->size
> ispi
->chip0_size
)
1272 part
->size
= MTDPART_SIZ_FULL
;
1275 static int intel_spi_read_desc(struct intel_spi
*ispi
)
1277 struct spi_mem_op op
=
1278 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_READ
, 0),
1279 SPI_MEM_OP_ADDR(3, 0, 0),
1280 SPI_MEM_OP_NO_DUMMY
,
1281 SPI_MEM_OP_DATA_IN(0, NULL
, 0));
1282 u32 buf
[2], nc
, fcba
, flcomp
;
1286 op
.data
.buf
.in
= buf
;
1287 op
.data
.nbytes
= sizeof(buf
);
1289 ret
= intel_spi_read(ispi
, NULL
, NULL
, &op
);
1291 dev_warn(ispi
->dev
, "failed to read descriptor\n");
1295 dev_dbg(ispi
->dev
, "FLVALSIG=0x%08x\n", buf
[0]);
1296 dev_dbg(ispi
->dev
, "FLMAP0=0x%08x\n", buf
[1]);
1298 if (buf
[0] != FLVALSIG_MAGIC
) {
1299 dev_warn(ispi
->dev
, "descriptor signature not valid\n");
1303 fcba
= (buf
[1] & FLMAP0_FCBA_MASK
) << 4;
1304 dev_dbg(ispi
->dev
, "FCBA=%#x\n", fcba
);
1307 op
.data
.buf
.in
= &flcomp
;
1308 op
.data
.nbytes
= sizeof(flcomp
);
1310 ret
= intel_spi_read(ispi
, NULL
, NULL
, &op
);
1312 dev_warn(ispi
->dev
, "failed to read FLCOMP\n");
1316 dev_dbg(ispi
->dev
, "FLCOMP=0x%08x\n", flcomp
);
1318 switch (flcomp
& FLCOMP_C0DEN_MASK
) {
1319 case FLCOMP_C0DEN_512K
:
1320 ispi
->chip0_size
= SZ_512K
;
1322 case FLCOMP_C0DEN_1M
:
1323 ispi
->chip0_size
= SZ_1M
;
1325 case FLCOMP_C0DEN_2M
:
1326 ispi
->chip0_size
= SZ_2M
;
1328 case FLCOMP_C0DEN_4M
:
1329 ispi
->chip0_size
= SZ_4M
;
1331 case FLCOMP_C0DEN_8M
:
1332 ispi
->chip0_size
= SZ_8M
;
1334 case FLCOMP_C0DEN_16M
:
1335 ispi
->chip0_size
= SZ_16M
;
1337 case FLCOMP_C0DEN_32M
:
1338 ispi
->chip0_size
= SZ_32M
;
1340 case FLCOMP_C0DEN_64M
:
1341 ispi
->chip0_size
= SZ_64M
;
1347 dev_dbg(ispi
->dev
, "chip0 size %zd KB\n", ispi
->chip0_size
/ SZ_1K
);
1349 nc
= (buf
[1] & FLMAP0_NC_MASK
) >> FLMAP0_NC_SHIFT
;
1351 ispi
->host
->num_chipselect
= 1;
1353 ispi
->host
->num_chipselect
= 2;
1357 dev_dbg(ispi
->dev
, "%u flash components found\n",
1358 ispi
->host
->num_chipselect
);
1362 static int intel_spi_populate_chip(struct intel_spi
*ispi
)
1364 struct flash_platform_data
*pdata
;
1365 struct mtd_partition
*parts
;
1366 struct spi_board_info chip
;
1369 ret
= intel_spi_read_desc(ispi
);
1373 pdata
= devm_kzalloc(ispi
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1377 pdata
->nr_parts
= 1;
1378 pdata
->parts
= devm_kcalloc(ispi
->dev
, pdata
->nr_parts
,
1379 sizeof(*pdata
->parts
), GFP_KERNEL
);
1383 intel_spi_fill_partition(ispi
, pdata
->parts
);
1385 memset(&chip
, 0, sizeof(chip
));
1386 snprintf(chip
.modalias
, 8, "spi-nor");
1387 chip
.platform_data
= pdata
;
1389 if (!spi_new_device(ispi
->host
, &chip
))
1392 /* Add the second chip if present */
1393 if (ispi
->host
->num_chipselect
< 2)
1396 pdata
= devm_kzalloc(ispi
->dev
, sizeof(*pdata
), GFP_KERNEL
);
1400 pdata
->name
= devm_kasprintf(ispi
->dev
, GFP_KERNEL
, "%s-chip1",
1401 dev_name(ispi
->dev
));
1405 pdata
->nr_parts
= 1;
1406 parts
= devm_kcalloc(ispi
->dev
, pdata
->nr_parts
, sizeof(*parts
),
1411 parts
[0].size
= MTDPART_SIZ_FULL
;
1412 parts
[0].name
= "BIOS1";
1413 pdata
->parts
= parts
;
1415 chip
.platform_data
= pdata
;
1416 chip
.chip_select
= 1;
1418 if (!spi_new_device(ispi
->host
, &chip
))
1423 static ssize_t
intel_spi_protected_show(struct device
*dev
,
1424 struct device_attribute
*attr
, char *buf
)
1426 struct intel_spi
*ispi
= dev_get_drvdata(dev
);
1428 return sysfs_emit(buf
, "%d\n", ispi
->protected);
1430 static DEVICE_ATTR_ADMIN_RO(intel_spi_protected
);
1432 static ssize_t
intel_spi_locked_show(struct device
*dev
,
1433 struct device_attribute
*attr
, char *buf
)
1435 struct intel_spi
*ispi
= dev_get_drvdata(dev
);
1437 return sysfs_emit(buf
, "%d\n", ispi
->locked
);
1439 static DEVICE_ATTR_ADMIN_RO(intel_spi_locked
);
1441 static ssize_t
intel_spi_bios_locked_show(struct device
*dev
,
1442 struct device_attribute
*attr
, char *buf
)
1444 struct intel_spi
*ispi
= dev_get_drvdata(dev
);
1446 return sysfs_emit(buf
, "%d\n", ispi
->bios_locked
);
1448 static DEVICE_ATTR_ADMIN_RO(intel_spi_bios_locked
);
1450 static struct attribute
*intel_spi_attrs
[] = {
1451 &dev_attr_intel_spi_protected
.attr
,
1452 &dev_attr_intel_spi_locked
.attr
,
1453 &dev_attr_intel_spi_bios_locked
.attr
,
1457 static const struct attribute_group intel_spi_attr_group
= {
1458 .attrs
= intel_spi_attrs
,
1461 const struct attribute_group
*intel_spi_groups
[] = {
1462 &intel_spi_attr_group
,
1465 EXPORT_SYMBOL_GPL(intel_spi_groups
);
1468 * intel_spi_probe() - Probe the Intel SPI flash controller
1469 * @dev: Pointer to the parent device
1470 * @mem: MMIO resource
1471 * @info: Platform specific information
1473 * Probes Intel SPI flash controller and creates the flash chip device.
1474 * Returns %0 on success and negative errno in case of failure.
1476 int intel_spi_probe(struct device
*dev
, struct resource
*mem
,
1477 const struct intel_spi_boardinfo
*info
)
1479 struct spi_controller
*host
;
1480 struct intel_spi
*ispi
;
1483 host
= devm_spi_alloc_host(dev
, sizeof(*ispi
));
1487 host
->mem_ops
= &intel_spi_mem_ops
;
1489 ispi
= spi_controller_get_devdata(host
);
1491 ispi
->base
= devm_ioremap_resource(dev
, mem
);
1492 if (IS_ERR(ispi
->base
))
1493 return PTR_ERR(ispi
->base
);
1499 ret
= intel_spi_init(ispi
);
1503 ret
= devm_spi_register_controller(dev
, host
);
1507 dev_set_drvdata(dev
, ispi
);
1508 return intel_spi_populate_chip(ispi
);
1510 EXPORT_SYMBOL_GPL(intel_spi_probe
);
1512 MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver");
1513 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
1514 MODULE_LICENSE("GPL v2");