1 // SPDX-License-Identifier: GPL-2.0
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/mutex.h>
15 #include <linux/math64.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
19 #include <linux/mtd/mtd.h>
20 #include <linux/of_platform.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/spi/flash.h>
23 #include <linux/mtd/spi-nor.h>
27 /* Define max times to check status register before we give up. */
30 * For everything but full-chip erase; probably could be much smaller, but kept
31 * around for safety for now
33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
41 #define SPI_NOR_MAX_ADDR_WIDTH 4
43 #define SPI_NOR_SRST_SLEEP_MIN 200
44 #define SPI_NOR_SRST_SLEEP_MAX 400
47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
49 * @nor: pointer to a 'struct spi_nor'
50 * @op: pointer to the 'struct spi_mem_op' whose properties
51 * need to be initialized.
53 * Right now, only "repeat" and "invert" are supported.
55 * Return: The opcode extension.
57 static u8
spi_nor_get_cmd_ext(const struct spi_nor
*nor
,
58 const struct spi_mem_op
*op
)
60 switch (nor
->cmd_ext_type
) {
61 case SPI_NOR_EXT_INVERT
:
62 return ~op
->cmd
.opcode
;
64 case SPI_NOR_EXT_REPEAT
:
65 return op
->cmd
.opcode
;
68 dev_err(nor
->dev
, "Unknown command extension type\n");
74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
75 * @nor: pointer to a 'struct spi_nor'
76 * @op: pointer to the 'struct spi_mem_op' whose properties
77 * need to be initialized.
78 * @proto: the protocol from which the properties need to be set.
80 void spi_nor_spimem_setup_op(const struct spi_nor
*nor
,
81 struct spi_mem_op
*op
,
82 const enum spi_nor_protocol proto
)
86 op
->cmd
.buswidth
= spi_nor_get_protocol_inst_nbits(proto
);
89 op
->addr
.buswidth
= spi_nor_get_protocol_addr_nbits(proto
);
92 op
->dummy
.buswidth
= spi_nor_get_protocol_addr_nbits(proto
);
95 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(proto
);
97 if (spi_nor_protocol_is_dtr(proto
)) {
99 * SPIMEM supports mixed DTR modes, but right now we can only
100 * have all phases either DTR or STR. IOW, SPIMEM can have
101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
102 * phases to either DTR or STR.
106 op
->dummy
.dtr
= true;
109 /* 2 bytes per clock cycle in DTR mode. */
110 op
->dummy
.nbytes
*= 2;
112 ext
= spi_nor_get_cmd_ext(nor
, op
);
113 op
->cmd
.opcode
= (op
->cmd
.opcode
<< 8) | ext
;
119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
121 * @nor: pointer to 'struct spi_nor'
122 * @op: pointer to 'struct spi_mem_op' template for transfer
124 * If we have to use the bounce buffer, the data field in @op will be updated.
126 * Return: true if the bounce buffer is needed, false if not
128 static bool spi_nor_spimem_bounce(struct spi_nor
*nor
, struct spi_mem_op
*op
)
130 /* op->data.buf.in occupies the same memory as op->data.buf.out */
131 if (object_is_on_stack(op
->data
.buf
.in
) ||
132 !virt_addr_valid(op
->data
.buf
.in
)) {
133 if (op
->data
.nbytes
> nor
->bouncebuf_size
)
134 op
->data
.nbytes
= nor
->bouncebuf_size
;
135 op
->data
.buf
.in
= nor
->bouncebuf
;
143 * spi_nor_spimem_exec_op() - execute a memory operation
144 * @nor: pointer to 'struct spi_nor'
145 * @op: pointer to 'struct spi_mem_op' template for transfer
147 * Return: 0 on success, -error otherwise.
149 static int spi_nor_spimem_exec_op(struct spi_nor
*nor
, struct spi_mem_op
*op
)
153 error
= spi_mem_adjust_op_size(nor
->spimem
, op
);
157 return spi_mem_exec_op(nor
->spimem
, op
);
160 static int spi_nor_controller_ops_read_reg(struct spi_nor
*nor
, u8 opcode
,
163 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
166 return nor
->controller_ops
->read_reg(nor
, opcode
, buf
, len
);
169 static int spi_nor_controller_ops_write_reg(struct spi_nor
*nor
, u8 opcode
,
170 const u8
*buf
, size_t len
)
172 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
175 return nor
->controller_ops
->write_reg(nor
, opcode
, buf
, len
);
178 static int spi_nor_controller_ops_erase(struct spi_nor
*nor
, loff_t offs
)
180 if (spi_nor_protocol_is_dtr(nor
->write_proto
))
183 return nor
->controller_ops
->erase(nor
, offs
);
187 * spi_nor_spimem_read_data() - read data from flash's memory region via
189 * @nor: pointer to 'struct spi_nor'
190 * @from: offset to read from
191 * @len: number of bytes to read
192 * @buf: pointer to dst buffer
194 * Return: number of bytes read successfully, -errno otherwise
196 static ssize_t
spi_nor_spimem_read_data(struct spi_nor
*nor
, loff_t from
,
199 struct spi_mem_op op
=
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
201 SPI_MEM_OP_ADDR(nor
->addr_width
, from
, 0),
202 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
203 SPI_MEM_OP_DATA_IN(len
, buf
, 0));
208 spi_nor_spimem_setup_op(nor
, &op
, nor
->read_proto
);
210 /* convert the dummy cycles to the number of bytes */
211 op
.dummy
.nbytes
= (nor
->read_dummy
* op
.dummy
.buswidth
) / 8;
212 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
213 op
.dummy
.nbytes
*= 2;
215 usebouncebuf
= spi_nor_spimem_bounce(nor
, &op
);
217 if (nor
->dirmap
.rdesc
) {
218 nbytes
= spi_mem_dirmap_read(nor
->dirmap
.rdesc
, op
.addr
.val
,
219 op
.data
.nbytes
, op
.data
.buf
.in
);
221 error
= spi_nor_spimem_exec_op(nor
, &op
);
224 nbytes
= op
.data
.nbytes
;
227 if (usebouncebuf
&& nbytes
> 0)
228 memcpy(buf
, op
.data
.buf
.in
, nbytes
);
234 * spi_nor_read_data() - read data from flash memory
235 * @nor: pointer to 'struct spi_nor'
236 * @from: offset to read from
237 * @len: number of bytes to read
238 * @buf: pointer to dst buffer
240 * Return: number of bytes read successfully, -errno otherwise
242 ssize_t
spi_nor_read_data(struct spi_nor
*nor
, loff_t from
, size_t len
, u8
*buf
)
245 return spi_nor_spimem_read_data(nor
, from
, len
, buf
);
247 return nor
->controller_ops
->read(nor
, from
, len
, buf
);
251 * spi_nor_spimem_write_data() - write data to flash memory via
253 * @nor: pointer to 'struct spi_nor'
254 * @to: offset to write to
255 * @len: number of bytes to write
256 * @buf: pointer to src buffer
258 * Return: number of bytes written successfully, -errno otherwise
260 static ssize_t
spi_nor_spimem_write_data(struct spi_nor
*nor
, loff_t to
,
261 size_t len
, const u8
*buf
)
263 struct spi_mem_op op
=
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
265 SPI_MEM_OP_ADDR(nor
->addr_width
, to
, 0),
267 SPI_MEM_OP_DATA_OUT(len
, buf
, 0));
271 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
274 spi_nor_spimem_setup_op(nor
, &op
, nor
->write_proto
);
276 if (spi_nor_spimem_bounce(nor
, &op
))
277 memcpy(nor
->bouncebuf
, buf
, op
.data
.nbytes
);
279 if (nor
->dirmap
.wdesc
) {
280 nbytes
= spi_mem_dirmap_write(nor
->dirmap
.wdesc
, op
.addr
.val
,
281 op
.data
.nbytes
, op
.data
.buf
.out
);
283 error
= spi_nor_spimem_exec_op(nor
, &op
);
286 nbytes
= op
.data
.nbytes
;
293 * spi_nor_write_data() - write data to flash memory
294 * @nor: pointer to 'struct spi_nor'
295 * @to: offset to write to
296 * @len: number of bytes to write
297 * @buf: pointer to src buffer
299 * Return: number of bytes written successfully, -errno otherwise
301 ssize_t
spi_nor_write_data(struct spi_nor
*nor
, loff_t to
, size_t len
,
305 return spi_nor_spimem_write_data(nor
, to
, len
, buf
);
307 return nor
->controller_ops
->write(nor
, to
, len
, buf
);
311 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
312 * @nor: pointer to 'struct spi_nor'.
314 * Return: 0 on success, -errno otherwise.
316 int spi_nor_write_enable(struct spi_nor
*nor
)
321 struct spi_mem_op op
=
322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN
, 0),
327 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
329 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
331 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WREN
,
336 dev_dbg(nor
->dev
, "error %d on Write Enable\n", ret
);
342 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
343 * @nor: pointer to 'struct spi_nor'.
345 * Return: 0 on success, -errno otherwise.
347 int spi_nor_write_disable(struct spi_nor
*nor
)
352 struct spi_mem_op op
=
353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI
, 0),
358 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
360 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
362 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRDI
,
367 dev_dbg(nor
->dev
, "error %d on Write Disable\n", ret
);
373 * spi_nor_read_sr() - Read the Status Register.
374 * @nor: pointer to 'struct spi_nor'.
375 * @sr: pointer to a DMA-able buffer where the value of the
376 * Status Register will be written. Should be at least 2 bytes.
378 * Return: 0 on success, -errno otherwise.
380 int spi_nor_read_sr(struct spi_nor
*nor
, u8
*sr
)
385 struct spi_mem_op op
=
386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR
, 0),
389 SPI_MEM_OP_DATA_IN(1, sr
, 0));
391 if (nor
->reg_proto
== SNOR_PROTO_8_8_8_DTR
) {
392 op
.addr
.nbytes
= nor
->params
->rdsr_addr_nbytes
;
393 op
.dummy
.nbytes
= nor
->params
->rdsr_dummy
;
395 * We don't want to read only one byte in DTR mode. So,
396 * read 2 and then discard the second byte.
401 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
403 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
405 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR
, sr
,
410 dev_dbg(nor
->dev
, "error %d reading SR\n", ret
);
416 * spi_nor_read_fsr() - Read the Flag Status Register.
417 * @nor: pointer to 'struct spi_nor'
418 * @fsr: pointer to a DMA-able buffer where the value of the
419 * Flag Status Register will be written. Should be at least 2
422 * Return: 0 on success, -errno otherwise.
424 static int spi_nor_read_fsr(struct spi_nor
*nor
, u8
*fsr
)
429 struct spi_mem_op op
=
430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR
, 0),
433 SPI_MEM_OP_DATA_IN(1, fsr
, 0));
435 if (nor
->reg_proto
== SNOR_PROTO_8_8_8_DTR
) {
436 op
.addr
.nbytes
= nor
->params
->rdsr_addr_nbytes
;
437 op
.dummy
.nbytes
= nor
->params
->rdsr_dummy
;
439 * We don't want to read only one byte in DTR mode. So,
440 * read 2 and then discard the second byte.
445 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
447 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
449 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDFSR
, fsr
,
454 dev_dbg(nor
->dev
, "error %d reading FSR\n", ret
);
460 * spi_nor_read_cr() - Read the Configuration Register using the
461 * SPINOR_OP_RDCR (35h) command.
462 * @nor: pointer to 'struct spi_nor'
463 * @cr: pointer to a DMA-able buffer where the value of the
464 * Configuration Register will be written.
466 * Return: 0 on success, -errno otherwise.
468 static int spi_nor_read_cr(struct spi_nor
*nor
, u8
*cr
)
473 struct spi_mem_op op
=
474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR
, 0),
477 SPI_MEM_OP_DATA_IN(1, cr
, 0));
479 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
481 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
483 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDCR
, cr
,
488 dev_dbg(nor
->dev
, "error %d reading CR\n", ret
);
494 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
495 * @nor: pointer to 'struct spi_nor'.
496 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
499 * Return: 0 on success, -errno otherwise.
501 int spi_nor_set_4byte_addr_mode(struct spi_nor
*nor
, bool enable
)
506 struct spi_mem_op op
=
507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable
?
515 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
517 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
519 ret
= spi_nor_controller_ops_write_reg(nor
,
520 enable
? SPINOR_OP_EN4B
:
526 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
532 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
534 * @nor: pointer to 'struct spi_nor'.
535 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
538 * Return: 0 on success, -errno otherwise.
540 static int spansion_set_4byte_addr_mode(struct spi_nor
*nor
, bool enable
)
544 nor
->bouncebuf
[0] = enable
<< 7;
547 struct spi_mem_op op
=
548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR
, 0),
551 SPI_MEM_OP_DATA_OUT(1, nor
->bouncebuf
, 0));
553 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
555 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
557 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_BRWR
,
562 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
568 * spi_nor_write_ear() - Write Extended Address Register.
569 * @nor: pointer to 'struct spi_nor'.
570 * @ear: value to write to the Extended Address Register.
572 * Return: 0 on success, -errno otherwise.
574 int spi_nor_write_ear(struct spi_nor
*nor
, u8 ear
)
578 nor
->bouncebuf
[0] = ear
;
581 struct spi_mem_op op
=
582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR
, 0),
585 SPI_MEM_OP_DATA_OUT(1, nor
->bouncebuf
, 0));
587 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
589 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
591 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WREAR
,
596 dev_dbg(nor
->dev
, "error %d writing EAR\n", ret
);
602 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
603 * @nor: pointer to 'struct spi_nor'.
604 * @sr: pointer to a DMA-able buffer where the value of the
605 * Status Register will be written.
607 * Return: 0 on success, -errno otherwise.
609 int spi_nor_xread_sr(struct spi_nor
*nor
, u8
*sr
)
614 struct spi_mem_op op
=
615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR
, 0),
618 SPI_MEM_OP_DATA_IN(1, sr
, 0));
620 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
622 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
624 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_XRDSR
, sr
,
629 dev_dbg(nor
->dev
, "error %d reading XRDSR\n", ret
);
635 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
636 * the flash is ready for new commands.
637 * @nor: pointer to 'struct spi_nor'.
639 * Return: 1 if ready, 0 if not ready, -errno on errors.
641 static int spi_nor_xsr_ready(struct spi_nor
*nor
)
645 ret
= spi_nor_xread_sr(nor
, nor
->bouncebuf
);
649 return !!(nor
->bouncebuf
[0] & XSR_RDY
);
653 * spi_nor_clear_sr() - Clear the Status Register.
654 * @nor: pointer to 'struct spi_nor'.
656 static void spi_nor_clear_sr(struct spi_nor
*nor
)
661 struct spi_mem_op op
=
662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR
, 0),
667 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
669 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
671 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_CLSR
,
676 dev_dbg(nor
->dev
, "error %d clearing SR\n", ret
);
680 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
682 * @nor: pointer to 'struct spi_nor'.
684 * Return: 1 if ready, 0 if not ready, -errno on errors.
686 static int spi_nor_sr_ready(struct spi_nor
*nor
)
688 int ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
693 if (nor
->flags
& SNOR_F_USE_CLSR
&&
694 nor
->bouncebuf
[0] & (SR_E_ERR
| SR_P_ERR
)) {
695 if (nor
->bouncebuf
[0] & SR_E_ERR
)
696 dev_err(nor
->dev
, "Erase Error occurred\n");
698 dev_err(nor
->dev
, "Programming Error occurred\n");
700 spi_nor_clear_sr(nor
);
703 * WEL bit remains set to one when an erase or page program
704 * error occurs. Issue a Write Disable command to protect
705 * against inadvertent writes that can possibly corrupt the
706 * contents of the memory.
708 ret
= spi_nor_write_disable(nor
);
715 return !(nor
->bouncebuf
[0] & SR_WIP
);
719 * spi_nor_clear_fsr() - Clear the Flag Status Register.
720 * @nor: pointer to 'struct spi_nor'.
722 static void spi_nor_clear_fsr(struct spi_nor
*nor
)
727 struct spi_mem_op op
=
728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR
, 0),
733 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
735 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
737 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_CLFSR
,
742 dev_dbg(nor
->dev
, "error %d clearing FSR\n", ret
);
746 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
747 * ready for new commands.
748 * @nor: pointer to 'struct spi_nor'.
750 * Return: 1 if ready, 0 if not ready, -errno on errors.
752 static int spi_nor_fsr_ready(struct spi_nor
*nor
)
754 int ret
= spi_nor_read_fsr(nor
, nor
->bouncebuf
);
759 if (nor
->bouncebuf
[0] & (FSR_E_ERR
| FSR_P_ERR
)) {
760 if (nor
->bouncebuf
[0] & FSR_E_ERR
)
761 dev_err(nor
->dev
, "Erase operation failed.\n");
763 dev_err(nor
->dev
, "Program operation failed.\n");
765 if (nor
->bouncebuf
[0] & FSR_PT_ERR
)
767 "Attempted to modify a protected sector.\n");
769 spi_nor_clear_fsr(nor
);
772 * WEL bit remains set to one when an erase or page program
773 * error occurs. Issue a Write Disable command to protect
774 * against inadvertent writes that can possibly corrupt the
775 * contents of the memory.
777 ret
= spi_nor_write_disable(nor
);
784 return !!(nor
->bouncebuf
[0] & FSR_READY
);
788 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
789 * @nor: pointer to 'struct spi_nor'.
791 * Return: 1 if ready, 0 if not ready, -errno on errors.
793 static int spi_nor_ready(struct spi_nor
*nor
)
797 if (nor
->flags
& SNOR_F_READY_XSR_RDY
)
798 sr
= spi_nor_xsr_ready(nor
);
800 sr
= spi_nor_sr_ready(nor
);
803 fsr
= nor
->flags
& SNOR_F_USE_FSR
? spi_nor_fsr_ready(nor
) : 1;
810 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
811 * Status Register until ready, or timeout occurs.
812 * @nor: pointer to "struct spi_nor".
813 * @timeout_jiffies: jiffies to wait until timeout.
815 * Return: 0 on success, -errno otherwise.
817 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor
*nor
,
818 unsigned long timeout_jiffies
)
820 unsigned long deadline
;
821 int timeout
= 0, ret
;
823 deadline
= jiffies
+ timeout_jiffies
;
826 if (time_after_eq(jiffies
, deadline
))
829 ret
= spi_nor_ready(nor
);
838 dev_dbg(nor
->dev
, "flash operation timed out\n");
844 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
845 * flash to be ready, or timeout occurs.
846 * @nor: pointer to "struct spi_nor".
848 * Return: 0 on success, -errno otherwise.
850 int spi_nor_wait_till_ready(struct spi_nor
*nor
)
852 return spi_nor_wait_till_ready_with_timeout(nor
,
853 DEFAULT_READY_WAIT_JIFFIES
);
857 * spi_nor_write_sr() - Write the Status Register.
858 * @nor: pointer to 'struct spi_nor'.
859 * @sr: pointer to DMA-able buffer to write to the Status Register.
860 * @len: number of bytes to write to the Status Register.
862 * Return: 0 on success, -errno otherwise.
864 int spi_nor_write_sr(struct spi_nor
*nor
, const u8
*sr
, size_t len
)
868 ret
= spi_nor_write_enable(nor
);
873 struct spi_mem_op op
=
874 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR
, 0),
877 SPI_MEM_OP_DATA_OUT(len
, sr
, 0));
879 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
881 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
883 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR
, sr
,
888 dev_dbg(nor
->dev
, "error %d writing SR\n", ret
);
892 return spi_nor_wait_till_ready(nor
);
896 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
897 * ensure that the byte written match the received value.
898 * @nor: pointer to a 'struct spi_nor'.
899 * @sr1: byte value to be written to the Status Register.
901 * Return: 0 on success, -errno otherwise.
903 static int spi_nor_write_sr1_and_check(struct spi_nor
*nor
, u8 sr1
)
907 nor
->bouncebuf
[0] = sr1
;
909 ret
= spi_nor_write_sr(nor
, nor
->bouncebuf
, 1);
913 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
917 if (nor
->bouncebuf
[0] != sr1
) {
918 dev_dbg(nor
->dev
, "SR1: read back test failed\n");
926 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
927 * Status Register 2 in one shot. Ensure that the byte written in the Status
928 * Register 1 match the received value, and that the 16-bit Write did not
929 * affect what was already in the Status Register 2.
930 * @nor: pointer to a 'struct spi_nor'.
931 * @sr1: byte value to be written to the Status Register 1.
933 * Return: 0 on success, -errno otherwise.
935 static int spi_nor_write_16bit_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
938 u8
*sr_cr
= nor
->bouncebuf
;
941 /* Make sure we don't overwrite the contents of Status Register 2. */
942 if (!(nor
->flags
& SNOR_F_NO_READ_CR
)) {
943 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
946 } else if (nor
->params
->quad_enable
) {
948 * If the Status Register 2 Read command (35h) is not
949 * supported, we should at least be sure we don't
950 * change the value of the SR2 Quad Enable bit.
952 * We can safely assume that when the Quad Enable method is
953 * set, the value of the QE bit is one, as a consequence of the
954 * nor->params->quad_enable() call.
956 * We can safely assume that the Quad Enable bit is present in
957 * the Status Register 2 at BIT(1). According to the JESD216
958 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
959 * Write Status (01h) command is available just for the cases
960 * in which the QE bit is described in SR2 at BIT(1).
962 sr_cr
[1] = SR2_QUAD_EN_BIT1
;
969 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
973 if (nor
->flags
& SNOR_F_NO_READ_CR
)
976 cr_written
= sr_cr
[1];
978 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
982 if (cr_written
!= sr_cr
[1]) {
983 dev_dbg(nor
->dev
, "CR: read back test failed\n");
991 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
992 * Configuration Register in one shot. Ensure that the byte written in the
993 * Configuration Register match the received value, and that the 16-bit Write
994 * did not affect what was already in the Status Register 1.
995 * @nor: pointer to a 'struct spi_nor'.
996 * @cr: byte value to be written to the Configuration Register.
998 * Return: 0 on success, -errno otherwise.
1000 static int spi_nor_write_16bit_cr_and_check(struct spi_nor
*nor
, u8 cr
)
1003 u8
*sr_cr
= nor
->bouncebuf
;
1006 /* Keep the current value of the Status Register 1. */
1007 ret
= spi_nor_read_sr(nor
, sr_cr
);
1013 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
1017 sr_written
= sr_cr
[0];
1019 ret
= spi_nor_read_sr(nor
, sr_cr
);
1023 if (sr_written
!= sr_cr
[0]) {
1024 dev_dbg(nor
->dev
, "SR: Read back test failed\n");
1028 if (nor
->flags
& SNOR_F_NO_READ_CR
)
1031 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
1035 if (cr
!= sr_cr
[1]) {
1036 dev_dbg(nor
->dev
, "CR: read back test failed\n");
1044 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
1045 * the byte written match the received value without affecting other bits in the
1046 * Status Register 1 and 2.
1047 * @nor: pointer to a 'struct spi_nor'.
1048 * @sr1: byte value to be written to the Status Register.
1050 * Return: 0 on success, -errno otherwise.
1052 int spi_nor_write_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
1054 if (nor
->flags
& SNOR_F_HAS_16BIT_SR
)
1055 return spi_nor_write_16bit_sr_and_check(nor
, sr1
);
1057 return spi_nor_write_sr1_and_check(nor
, sr1
);
1061 * spi_nor_write_sr2() - Write the Status Register 2 using the
1062 * SPINOR_OP_WRSR2 (3eh) command.
1063 * @nor: pointer to 'struct spi_nor'.
1064 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
1066 * Return: 0 on success, -errno otherwise.
1068 static int spi_nor_write_sr2(struct spi_nor
*nor
, const u8
*sr2
)
1072 ret
= spi_nor_write_enable(nor
);
1077 struct spi_mem_op op
=
1078 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2
, 0),
1080 SPI_MEM_OP_NO_DUMMY
,
1081 SPI_MEM_OP_DATA_OUT(1, sr2
, 0));
1083 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1085 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1087 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR2
,
1092 dev_dbg(nor
->dev
, "error %d writing SR2\n", ret
);
1096 return spi_nor_wait_till_ready(nor
);
1100 * spi_nor_read_sr2() - Read the Status Register 2 using the
1101 * SPINOR_OP_RDSR2 (3fh) command.
1102 * @nor: pointer to 'struct spi_nor'.
1103 * @sr2: pointer to DMA-able buffer where the value of the
1104 * Status Register 2 will be written.
1106 * Return: 0 on success, -errno otherwise.
1108 static int spi_nor_read_sr2(struct spi_nor
*nor
, u8
*sr2
)
1113 struct spi_mem_op op
=
1114 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2
, 0),
1116 SPI_MEM_OP_NO_DUMMY
,
1117 SPI_MEM_OP_DATA_IN(1, sr2
, 0));
1119 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1121 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1123 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR2
, sr2
,
1128 dev_dbg(nor
->dev
, "error %d reading SR2\n", ret
);
1134 * spi_nor_erase_chip() - Erase the entire flash memory.
1135 * @nor: pointer to 'struct spi_nor'.
1137 * Return: 0 on success, -errno otherwise.
1139 static int spi_nor_erase_chip(struct spi_nor
*nor
)
1143 dev_dbg(nor
->dev
, " %lldKiB\n", (long long)(nor
->mtd
.size
>> 10));
1146 struct spi_mem_op op
=
1147 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE
, 0),
1149 SPI_MEM_OP_NO_DUMMY
,
1150 SPI_MEM_OP_NO_DATA
);
1152 spi_nor_spimem_setup_op(nor
, &op
, nor
->write_proto
);
1154 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1156 ret
= spi_nor_controller_ops_write_reg(nor
,
1157 SPINOR_OP_CHIP_ERASE
,
1162 dev_dbg(nor
->dev
, "error %d erasing chip\n", ret
);
1167 static u8
spi_nor_convert_opcode(u8 opcode
, const u8 table
[][2], size_t size
)
1171 for (i
= 0; i
< size
; i
++)
1172 if (table
[i
][0] == opcode
)
1175 /* No conversion found, keep input op code. */
1179 u8
spi_nor_convert_3to4_read(u8 opcode
)
1181 static const u8 spi_nor_3to4_read
[][2] = {
1182 { SPINOR_OP_READ
, SPINOR_OP_READ_4B
},
1183 { SPINOR_OP_READ_FAST
, SPINOR_OP_READ_FAST_4B
},
1184 { SPINOR_OP_READ_1_1_2
, SPINOR_OP_READ_1_1_2_4B
},
1185 { SPINOR_OP_READ_1_2_2
, SPINOR_OP_READ_1_2_2_4B
},
1186 { SPINOR_OP_READ_1_1_4
, SPINOR_OP_READ_1_1_4_4B
},
1187 { SPINOR_OP_READ_1_4_4
, SPINOR_OP_READ_1_4_4_4B
},
1188 { SPINOR_OP_READ_1_1_8
, SPINOR_OP_READ_1_1_8_4B
},
1189 { SPINOR_OP_READ_1_8_8
, SPINOR_OP_READ_1_8_8_4B
},
1191 { SPINOR_OP_READ_1_1_1_DTR
, SPINOR_OP_READ_1_1_1_DTR_4B
},
1192 { SPINOR_OP_READ_1_2_2_DTR
, SPINOR_OP_READ_1_2_2_DTR_4B
},
1193 { SPINOR_OP_READ_1_4_4_DTR
, SPINOR_OP_READ_1_4_4_DTR_4B
},
1196 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_read
,
1197 ARRAY_SIZE(spi_nor_3to4_read
));
1200 static u8
spi_nor_convert_3to4_program(u8 opcode
)
1202 static const u8 spi_nor_3to4_program
[][2] = {
1203 { SPINOR_OP_PP
, SPINOR_OP_PP_4B
},
1204 { SPINOR_OP_PP_1_1_4
, SPINOR_OP_PP_1_1_4_4B
},
1205 { SPINOR_OP_PP_1_4_4
, SPINOR_OP_PP_1_4_4_4B
},
1206 { SPINOR_OP_PP_1_1_8
, SPINOR_OP_PP_1_1_8_4B
},
1207 { SPINOR_OP_PP_1_8_8
, SPINOR_OP_PP_1_8_8_4B
},
1210 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_program
,
1211 ARRAY_SIZE(spi_nor_3to4_program
));
1214 static u8
spi_nor_convert_3to4_erase(u8 opcode
)
1216 static const u8 spi_nor_3to4_erase
[][2] = {
1217 { SPINOR_OP_BE_4K
, SPINOR_OP_BE_4K_4B
},
1218 { SPINOR_OP_BE_32K
, SPINOR_OP_BE_32K_4B
},
1219 { SPINOR_OP_SE
, SPINOR_OP_SE_4B
},
1222 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_erase
,
1223 ARRAY_SIZE(spi_nor_3to4_erase
));
1226 static bool spi_nor_has_uniform_erase(const struct spi_nor
*nor
)
1228 return !!nor
->params
->erase_map
.uniform_erase_type
;
1231 static void spi_nor_set_4byte_opcodes(struct spi_nor
*nor
)
1233 nor
->read_opcode
= spi_nor_convert_3to4_read(nor
->read_opcode
);
1234 nor
->program_opcode
= spi_nor_convert_3to4_program(nor
->program_opcode
);
1235 nor
->erase_opcode
= spi_nor_convert_3to4_erase(nor
->erase_opcode
);
1237 if (!spi_nor_has_uniform_erase(nor
)) {
1238 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1239 struct spi_nor_erase_type
*erase
;
1242 for (i
= 0; i
< SNOR_ERASE_TYPE_MAX
; i
++) {
1243 erase
= &map
->erase_type
[i
];
1245 spi_nor_convert_3to4_erase(erase
->opcode
);
1250 int spi_nor_lock_and_prep(struct spi_nor
*nor
)
1254 mutex_lock(&nor
->lock
);
1256 if (nor
->controller_ops
&& nor
->controller_ops
->prepare
) {
1257 ret
= nor
->controller_ops
->prepare(nor
);
1259 mutex_unlock(&nor
->lock
);
1266 void spi_nor_unlock_and_unprep(struct spi_nor
*nor
)
1268 if (nor
->controller_ops
&& nor
->controller_ops
->unprepare
)
1269 nor
->controller_ops
->unprepare(nor
);
1270 mutex_unlock(&nor
->lock
);
1273 static u32
spi_nor_convert_addr(struct spi_nor
*nor
, loff_t addr
)
1275 if (!nor
->params
->convert_addr
)
1278 return nor
->params
->convert_addr(nor
, addr
);
1282 * Initiate the erasure of a single sector
1284 static int spi_nor_erase_sector(struct spi_nor
*nor
, u32 addr
)
1288 addr
= spi_nor_convert_addr(nor
, addr
);
1291 struct spi_mem_op op
=
1292 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->erase_opcode
, 0),
1293 SPI_MEM_OP_ADDR(nor
->addr_width
, addr
, 0),
1294 SPI_MEM_OP_NO_DUMMY
,
1295 SPI_MEM_OP_NO_DATA
);
1297 spi_nor_spimem_setup_op(nor
, &op
, nor
->write_proto
);
1299 return spi_mem_exec_op(nor
->spimem
, &op
);
1300 } else if (nor
->controller_ops
->erase
) {
1301 return spi_nor_controller_ops_erase(nor
, addr
);
1305 * Default implementation, if driver doesn't have a specialized HW
1308 for (i
= nor
->addr_width
- 1; i
>= 0; i
--) {
1309 nor
->bouncebuf
[i
] = addr
& 0xff;
1313 return spi_nor_controller_ops_write_reg(nor
, nor
->erase_opcode
,
1314 nor
->bouncebuf
, nor
->addr_width
);
1318 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1319 * @erase: pointer to a structure that describes a SPI NOR erase type
1320 * @dividend: dividend value
1321 * @remainder: pointer to u32 remainder (will be updated)
1323 * Return: the result of the division
1325 static u64
spi_nor_div_by_erase_size(const struct spi_nor_erase_type
*erase
,
1326 u64 dividend
, u32
*remainder
)
1328 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1329 *remainder
= (u32
)dividend
& erase
->size_mask
;
1330 return dividend
>> erase
->size_shift
;
1334 * spi_nor_find_best_erase_type() - find the best erase type for the given
1335 * offset in the serial flash memory and the
1336 * number of bytes to erase. The region in
1337 * which the address fits is expected to be
1339 * @map: the erase map of the SPI NOR
1340 * @region: pointer to a structure that describes a SPI NOR erase region
1341 * @addr: offset in the serial flash memory
1342 * @len: number of bytes to erase
1344 * Return: a pointer to the best fitted erase type, NULL otherwise.
1346 static const struct spi_nor_erase_type
*
1347 spi_nor_find_best_erase_type(const struct spi_nor_erase_map
*map
,
1348 const struct spi_nor_erase_region
*region
,
1351 const struct spi_nor_erase_type
*erase
;
1354 u8 erase_mask
= region
->offset
& SNOR_ERASE_TYPE_MASK
;
1357 * Erase types are ordered by size, with the smallest erase type at
1360 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
1361 /* Does the erase region support the tested erase type? */
1362 if (!(erase_mask
& BIT(i
)))
1365 erase
= &map
->erase_type
[i
];
1367 /* Don't erase more than what the user has asked for. */
1368 if (erase
->size
> len
)
1371 /* Alignment is not mandatory for overlaid regions */
1372 if (region
->offset
& SNOR_OVERLAID_REGION
)
1375 spi_nor_div_by_erase_size(erase
, addr
, &rem
);
1385 static u64
spi_nor_region_is_last(const struct spi_nor_erase_region
*region
)
1387 return region
->offset
& SNOR_LAST_REGION
;
1390 static u64
spi_nor_region_end(const struct spi_nor_erase_region
*region
)
1392 return (region
->offset
& ~SNOR_ERASE_FLAGS_MASK
) + region
->size
;
1396 * spi_nor_region_next() - get the next spi nor region
1397 * @region: pointer to a structure that describes a SPI NOR erase region
1399 * Return: the next spi nor region or NULL if last region.
1401 struct spi_nor_erase_region
*
1402 spi_nor_region_next(struct spi_nor_erase_region
*region
)
1404 if (spi_nor_region_is_last(region
))
1411 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1412 * which the offset fits
1413 * @map: the erase map of the SPI NOR
1414 * @addr: offset in the serial flash memory
1416 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1419 static struct spi_nor_erase_region
*
1420 spi_nor_find_erase_region(const struct spi_nor_erase_map
*map
, u64 addr
)
1422 struct spi_nor_erase_region
*region
= map
->regions
;
1423 u64 region_start
= region
->offset
& ~SNOR_ERASE_FLAGS_MASK
;
1424 u64 region_end
= region_start
+ region
->size
;
1426 while (addr
< region_start
|| addr
>= region_end
) {
1427 region
= spi_nor_region_next(region
);
1429 return ERR_PTR(-EINVAL
);
1431 region_start
= region
->offset
& ~SNOR_ERASE_FLAGS_MASK
;
1432 region_end
= region_start
+ region
->size
;
1439 * spi_nor_init_erase_cmd() - initialize an erase command
1440 * @region: pointer to a structure that describes a SPI NOR erase region
1441 * @erase: pointer to a structure that describes a SPI NOR erase type
1443 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1446 static struct spi_nor_erase_command
*
1447 spi_nor_init_erase_cmd(const struct spi_nor_erase_region
*region
,
1448 const struct spi_nor_erase_type
*erase
)
1450 struct spi_nor_erase_command
*cmd
;
1452 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
1454 return ERR_PTR(-ENOMEM
);
1456 INIT_LIST_HEAD(&cmd
->list
);
1457 cmd
->opcode
= erase
->opcode
;
1460 if (region
->offset
& SNOR_OVERLAID_REGION
)
1461 cmd
->size
= region
->size
;
1463 cmd
->size
= erase
->size
;
1469 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1470 * @erase_list: list of erase commands
1472 static void spi_nor_destroy_erase_cmd_list(struct list_head
*erase_list
)
1474 struct spi_nor_erase_command
*cmd
, *next
;
1476 list_for_each_entry_safe(cmd
, next
, erase_list
, list
) {
1477 list_del(&cmd
->list
);
1483 * spi_nor_init_erase_cmd_list() - initialize erase command list
1484 * @nor: pointer to a 'struct spi_nor'
1485 * @erase_list: list of erase commands to be executed once we validate that the
1486 * erase can be performed
1487 * @addr: offset in the serial flash memory
1488 * @len: number of bytes to erase
1490 * Builds the list of best fitted erase commands and verifies if the erase can
1493 * Return: 0 on success, -errno otherwise.
1495 static int spi_nor_init_erase_cmd_list(struct spi_nor
*nor
,
1496 struct list_head
*erase_list
,
1499 const struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1500 const struct spi_nor_erase_type
*erase
, *prev_erase
= NULL
;
1501 struct spi_nor_erase_region
*region
;
1502 struct spi_nor_erase_command
*cmd
= NULL
;
1506 region
= spi_nor_find_erase_region(map
, addr
);
1508 return PTR_ERR(region
);
1510 region_end
= spi_nor_region_end(region
);
1513 erase
= spi_nor_find_best_erase_type(map
, region
, addr
, len
);
1515 goto destroy_erase_cmd_list
;
1517 if (prev_erase
!= erase
||
1518 region
->offset
& SNOR_OVERLAID_REGION
) {
1519 cmd
= spi_nor_init_erase_cmd(region
, erase
);
1522 goto destroy_erase_cmd_list
;
1525 list_add_tail(&cmd
->list
, erase_list
);
1533 if (len
&& addr
>= region_end
) {
1534 region
= spi_nor_region_next(region
);
1536 goto destroy_erase_cmd_list
;
1537 region_end
= spi_nor_region_end(region
);
1545 destroy_erase_cmd_list
:
1546 spi_nor_destroy_erase_cmd_list(erase_list
);
1551 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1552 * @nor: pointer to a 'struct spi_nor'
1553 * @addr: offset in the serial flash memory
1554 * @len: number of bytes to erase
1556 * Build a list of best fitted erase commands and execute it once we validate
1557 * that the erase can be performed.
1559 * Return: 0 on success, -errno otherwise.
1561 static int spi_nor_erase_multi_sectors(struct spi_nor
*nor
, u64 addr
, u32 len
)
1563 LIST_HEAD(erase_list
);
1564 struct spi_nor_erase_command
*cmd
, *next
;
1567 ret
= spi_nor_init_erase_cmd_list(nor
, &erase_list
, addr
, len
);
1571 list_for_each_entry_safe(cmd
, next
, &erase_list
, list
) {
1572 nor
->erase_opcode
= cmd
->opcode
;
1573 while (cmd
->count
) {
1574 ret
= spi_nor_write_enable(nor
);
1576 goto destroy_erase_cmd_list
;
1578 ret
= spi_nor_erase_sector(nor
, addr
);
1580 goto destroy_erase_cmd_list
;
1585 ret
= spi_nor_wait_till_ready(nor
);
1587 goto destroy_erase_cmd_list
;
1589 list_del(&cmd
->list
);
1595 destroy_erase_cmd_list
:
1596 spi_nor_destroy_erase_cmd_list(&erase_list
);
1601 * Erase an address range on the nor chip. The address range may extend
1602 * one or more erase sectors. Return an error if there is a problem erasing.
1604 static int spi_nor_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1606 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
1611 dev_dbg(nor
->dev
, "at 0x%llx, len %lld\n", (long long)instr
->addr
,
1612 (long long)instr
->len
);
1614 if (spi_nor_has_uniform_erase(nor
)) {
1615 div_u64_rem(instr
->len
, mtd
->erasesize
, &rem
);
1623 ret
= spi_nor_lock_and_prep(nor
);
1627 /* whole-chip erase? */
1628 if (len
== mtd
->size
&& !(nor
->flags
& SNOR_F_NO_OP_CHIP_ERASE
)) {
1629 unsigned long timeout
;
1631 ret
= spi_nor_write_enable(nor
);
1635 ret
= spi_nor_erase_chip(nor
);
1640 * Scale the timeout linearly with the size of the flash, with
1641 * a minimum calibrated to an old 2MB flash. We could try to
1642 * pull these from CFI/SFDP, but these values should be good
1645 timeout
= max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES
,
1646 CHIP_ERASE_2MB_READY_WAIT_JIFFIES
*
1647 (unsigned long)(mtd
->size
/ SZ_2M
));
1648 ret
= spi_nor_wait_till_ready_with_timeout(nor
, timeout
);
1652 /* REVISIT in some cases we could speed up erasing large regions
1653 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1654 * to use "small sector erase", but that's not always optimal.
1657 /* "sector"-at-a-time erase */
1658 } else if (spi_nor_has_uniform_erase(nor
)) {
1660 ret
= spi_nor_write_enable(nor
);
1664 ret
= spi_nor_erase_sector(nor
, addr
);
1668 addr
+= mtd
->erasesize
;
1669 len
-= mtd
->erasesize
;
1671 ret
= spi_nor_wait_till_ready(nor
);
1676 /* erase multiple sectors */
1678 ret
= spi_nor_erase_multi_sectors(nor
, addr
, len
);
1683 ret
= spi_nor_write_disable(nor
);
1686 spi_nor_unlock_and_unprep(nor
);
1691 static u8
spi_nor_get_sr_bp_mask(struct spi_nor
*nor
)
1693 u8 mask
= SR_BP2
| SR_BP1
| SR_BP0
;
1695 if (nor
->flags
& SNOR_F_HAS_SR_BP3_BIT6
)
1696 return mask
| SR_BP3_BIT6
;
1698 if (nor
->flags
& SNOR_F_HAS_4BIT_BP
)
1699 return mask
| SR_BP3
;
1704 static u8
spi_nor_get_sr_tb_mask(struct spi_nor
*nor
)
1706 if (nor
->flags
& SNOR_F_HAS_SR_TB_BIT6
)
1712 static u64
spi_nor_get_min_prot_length_sr(struct spi_nor
*nor
)
1714 unsigned int bp_slots
, bp_slots_needed
;
1715 u8 mask
= spi_nor_get_sr_bp_mask(nor
);
1717 /* Reserved one for "protect none" and one for "protect all". */
1718 bp_slots
= (1 << hweight8(mask
)) - 2;
1719 bp_slots_needed
= ilog2(nor
->info
->n_sectors
);
1721 if (bp_slots_needed
> bp_slots
)
1722 return nor
->info
->sector_size
<<
1723 (bp_slots_needed
- bp_slots
);
1725 return nor
->info
->sector_size
;
1728 static void spi_nor_get_locked_range_sr(struct spi_nor
*nor
, u8 sr
, loff_t
*ofs
,
1731 struct mtd_info
*mtd
= &nor
->mtd
;
1733 u8 mask
= spi_nor_get_sr_bp_mask(nor
);
1734 u8 tb_mask
= spi_nor_get_sr_tb_mask(nor
);
1735 u8 bp
, val
= sr
& mask
;
1737 if (nor
->flags
& SNOR_F_HAS_SR_BP3_BIT6
&& val
& SR_BP3_BIT6
)
1738 val
= (val
& ~SR_BP3_BIT6
) | SR_BP3
;
1740 bp
= val
>> SR_BP_SHIFT
;
1749 min_prot_len
= spi_nor_get_min_prot_length_sr(nor
);
1750 *len
= min_prot_len
<< (bp
- 1);
1752 if (*len
> mtd
->size
)
1755 if (nor
->flags
& SNOR_F_HAS_SR_TB
&& sr
& tb_mask
)
1758 *ofs
= mtd
->size
- *len
;
1762 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1763 * @locked is false); 0 otherwise
1765 static int spi_nor_check_lock_status_sr(struct spi_nor
*nor
, loff_t ofs
,
1766 uint64_t len
, u8 sr
, bool locked
)
1774 spi_nor_get_locked_range_sr(nor
, sr
, &lock_offs
, &lock_len
);
1777 /* Requested range is a sub-range of locked range */
1778 return (ofs
+ len
<= lock_offs
+ lock_len
) && (ofs
>= lock_offs
);
1780 /* Requested range does not overlap with locked range */
1781 return (ofs
>= lock_offs
+ lock_len
) || (ofs
+ len
<= lock_offs
);
1784 static int spi_nor_is_locked_sr(struct spi_nor
*nor
, loff_t ofs
, uint64_t len
,
1787 return spi_nor_check_lock_status_sr(nor
, ofs
, len
, sr
, true);
1790 static int spi_nor_is_unlocked_sr(struct spi_nor
*nor
, loff_t ofs
, uint64_t len
,
1793 return spi_nor_check_lock_status_sr(nor
, ofs
, len
, sr
, false);
1797 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1798 * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
1800 * (SR). Does not support these features found in newer SR bitfields:
1801 * - SEC: sector/block protect - only handle SEC=0 (block protect)
1802 * - CMP: complement protect - only support CMP=0 (range is not complemented)
1804 * Support for the following is provided conditionally for some flash:
1805 * - TB: top/bottom protect
1807 * Sample table portion for 8MB flash (Winbond w25q64fw):
1809 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
1810 * --------------------------------------------------------------------------
1811 * X | X | 0 | 0 | 0 | NONE | NONE
1812 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
1813 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
1814 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
1815 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
1816 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
1817 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
1818 * X | X | 1 | 1 | 1 | 8 MB | ALL
1819 * ------|-------|-------|-------|-------|---------------|-------------------
1820 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
1821 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
1822 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
1823 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
1824 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
1825 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
1827 * Returns negative on errors, 0 on success.
1829 static int spi_nor_sr_lock(struct spi_nor
*nor
, loff_t ofs
, uint64_t len
)
1831 struct mtd_info
*mtd
= &nor
->mtd
;
1833 int ret
, status_old
, status_new
;
1834 u8 mask
= spi_nor_get_sr_bp_mask(nor
);
1835 u8 tb_mask
= spi_nor_get_sr_tb_mask(nor
);
1838 bool can_be_top
= true, can_be_bottom
= nor
->flags
& SNOR_F_HAS_SR_TB
;
1841 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
1845 status_old
= nor
->bouncebuf
[0];
1847 /* If nothing in our range is unlocked, we don't need to do anything */
1848 if (spi_nor_is_locked_sr(nor
, ofs
, len
, status_old
))
1851 /* If anything below us is unlocked, we can't use 'bottom' protection */
1852 if (!spi_nor_is_locked_sr(nor
, 0, ofs
, status_old
))
1853 can_be_bottom
= false;
1855 /* If anything above us is unlocked, we can't use 'top' protection */
1856 if (!spi_nor_is_locked_sr(nor
, ofs
+ len
, mtd
->size
- (ofs
+ len
),
1860 if (!can_be_bottom
&& !can_be_top
)
1863 /* Prefer top, if both are valid */
1864 use_top
= can_be_top
;
1866 /* lock_len: length of region that should end up locked */
1868 lock_len
= mtd
->size
- ofs
;
1870 lock_len
= ofs
+ len
;
1872 if (lock_len
== mtd
->size
) {
1875 min_prot_len
= spi_nor_get_min_prot_length_sr(nor
);
1876 pow
= ilog2(lock_len
) - ilog2(min_prot_len
) + 1;
1877 val
= pow
<< SR_BP_SHIFT
;
1879 if (nor
->flags
& SNOR_F_HAS_SR_BP3_BIT6
&& val
& SR_BP3
)
1880 val
= (val
& ~SR_BP3
) | SR_BP3_BIT6
;
1885 /* Don't "lock" with no region! */
1890 status_new
= (status_old
& ~mask
& ~tb_mask
) | val
;
1892 /* Disallow further writes if WP pin is asserted */
1893 status_new
|= SR_SRWD
;
1896 status_new
|= tb_mask
;
1898 /* Don't bother if they're the same */
1899 if (status_new
== status_old
)
1902 /* Only modify protection if it will not unlock other areas */
1903 if ((status_new
& mask
) < (status_old
& mask
))
1906 return spi_nor_write_sr_and_check(nor
, status_new
);
1910 * Unlock a region of the flash. See spi_nor_sr_lock() for more info
1912 * Returns negative on errors, 0 on success.
1914 static int spi_nor_sr_unlock(struct spi_nor
*nor
, loff_t ofs
, uint64_t len
)
1916 struct mtd_info
*mtd
= &nor
->mtd
;
1918 int ret
, status_old
, status_new
;
1919 u8 mask
= spi_nor_get_sr_bp_mask(nor
);
1920 u8 tb_mask
= spi_nor_get_sr_tb_mask(nor
);
1923 bool can_be_top
= true, can_be_bottom
= nor
->flags
& SNOR_F_HAS_SR_TB
;
1926 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
1930 status_old
= nor
->bouncebuf
[0];
1932 /* If nothing in our range is locked, we don't need to do anything */
1933 if (spi_nor_is_unlocked_sr(nor
, ofs
, len
, status_old
))
1936 /* If anything below us is locked, we can't use 'top' protection */
1937 if (!spi_nor_is_unlocked_sr(nor
, 0, ofs
, status_old
))
1940 /* If anything above us is locked, we can't use 'bottom' protection */
1941 if (!spi_nor_is_unlocked_sr(nor
, ofs
+ len
, mtd
->size
- (ofs
+ len
),
1943 can_be_bottom
= false;
1945 if (!can_be_bottom
&& !can_be_top
)
1948 /* Prefer top, if both are valid */
1949 use_top
= can_be_top
;
1951 /* lock_len: length of region that should remain locked */
1953 lock_len
= mtd
->size
- (ofs
+ len
);
1957 if (lock_len
== 0) {
1958 val
= 0; /* fully unlocked */
1960 min_prot_len
= spi_nor_get_min_prot_length_sr(nor
);
1961 pow
= ilog2(lock_len
) - ilog2(min_prot_len
) + 1;
1962 val
= pow
<< SR_BP_SHIFT
;
1964 if (nor
->flags
& SNOR_F_HAS_SR_BP3_BIT6
&& val
& SR_BP3
)
1965 val
= (val
& ~SR_BP3
) | SR_BP3_BIT6
;
1967 /* Some power-of-two sizes are not supported */
1972 status_new
= (status_old
& ~mask
& ~tb_mask
) | val
;
1974 /* Don't protect status register if we're fully unlocked */
1976 status_new
&= ~SR_SRWD
;
1979 status_new
|= tb_mask
;
1981 /* Don't bother if they're the same */
1982 if (status_new
== status_old
)
1985 /* Only modify protection if it will not lock other areas */
1986 if ((status_new
& mask
) > (status_old
& mask
))
1989 return spi_nor_write_sr_and_check(nor
, status_new
);
1993 * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
1996 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1997 * negative on errors.
1999 static int spi_nor_sr_is_locked(struct spi_nor
*nor
, loff_t ofs
, uint64_t len
)
2003 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
2007 return spi_nor_is_locked_sr(nor
, ofs
, len
, nor
->bouncebuf
[0]);
2010 static const struct spi_nor_locking_ops spi_nor_sr_locking_ops
= {
2011 .lock
= spi_nor_sr_lock
,
2012 .unlock
= spi_nor_sr_unlock
,
2013 .is_locked
= spi_nor_sr_is_locked
,
2016 static int spi_nor_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2018 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2021 ret
= spi_nor_lock_and_prep(nor
);
2025 ret
= nor
->params
->locking_ops
->lock(nor
, ofs
, len
);
2027 spi_nor_unlock_and_unprep(nor
);
2031 static int spi_nor_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2033 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2036 ret
= spi_nor_lock_and_prep(nor
);
2040 ret
= nor
->params
->locking_ops
->unlock(nor
, ofs
, len
);
2042 spi_nor_unlock_and_unprep(nor
);
2046 static int spi_nor_is_locked(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2048 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2051 ret
= spi_nor_lock_and_prep(nor
);
2055 ret
= nor
->params
->locking_ops
->is_locked(nor
, ofs
, len
);
2057 spi_nor_unlock_and_unprep(nor
);
2062 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
2064 * @nor: pointer to a 'struct spi_nor'
2066 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
2068 * Return: 0 on success, -errno otherwise.
2070 int spi_nor_sr1_bit6_quad_enable(struct spi_nor
*nor
)
2074 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
2078 if (nor
->bouncebuf
[0] & SR1_QUAD_EN_BIT6
)
2081 nor
->bouncebuf
[0] |= SR1_QUAD_EN_BIT6
;
2083 return spi_nor_write_sr1_and_check(nor
, nor
->bouncebuf
[0]);
2087 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
2089 * @nor: pointer to a 'struct spi_nor'.
2091 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
2093 * Return: 0 on success, -errno otherwise.
2095 int spi_nor_sr2_bit1_quad_enable(struct spi_nor
*nor
)
2099 if (nor
->flags
& SNOR_F_NO_READ_CR
)
2100 return spi_nor_write_16bit_cr_and_check(nor
, SR2_QUAD_EN_BIT1
);
2102 ret
= spi_nor_read_cr(nor
, nor
->bouncebuf
);
2106 if (nor
->bouncebuf
[0] & SR2_QUAD_EN_BIT1
)
2109 nor
->bouncebuf
[0] |= SR2_QUAD_EN_BIT1
;
2111 return spi_nor_write_16bit_cr_and_check(nor
, nor
->bouncebuf
[0]);
2115 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
2116 * @nor: pointer to a 'struct spi_nor'
2118 * Set the Quad Enable (QE) bit in the Status Register 2.
2120 * This is one of the procedures to set the QE bit described in the SFDP
2121 * (JESD216 rev B) specification but no manufacturer using this procedure has
2122 * been identified yet, hence the name of the function.
2124 * Return: 0 on success, -errno otherwise.
2126 int spi_nor_sr2_bit7_quad_enable(struct spi_nor
*nor
)
2128 u8
*sr2
= nor
->bouncebuf
;
2132 /* Check current Quad Enable bit value. */
2133 ret
= spi_nor_read_sr2(nor
, sr2
);
2136 if (*sr2
& SR2_QUAD_EN_BIT7
)
2139 /* Update the Quad Enable bit. */
2140 *sr2
|= SR2_QUAD_EN_BIT7
;
2142 ret
= spi_nor_write_sr2(nor
, sr2
);
2148 /* Read back and check it. */
2149 ret
= spi_nor_read_sr2(nor
, sr2
);
2153 if (*sr2
!= sr2_written
) {
2154 dev_dbg(nor
->dev
, "SR2: Read back test failed\n");
2161 static const struct spi_nor_manufacturer
*manufacturers
[] = {
2168 &spi_nor_gigadevice
,
2181 static const struct flash_info
*
2182 spi_nor_search_part_by_id(const struct flash_info
*parts
, unsigned int nparts
,
2187 for (i
= 0; i
< nparts
; i
++) {
2188 if (parts
[i
].id_len
&&
2189 !memcmp(parts
[i
].id
, id
, parts
[i
].id_len
))
2196 static const struct flash_info
*spi_nor_read_id(struct spi_nor
*nor
)
2198 const struct flash_info
*info
;
2199 u8
*id
= nor
->bouncebuf
;
2204 struct spi_mem_op op
=
2205 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID
, 1),
2207 SPI_MEM_OP_NO_DUMMY
,
2208 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN
, id
, 1));
2210 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
2212 ret
= nor
->controller_ops
->read_reg(nor
, SPINOR_OP_RDID
, id
,
2213 SPI_NOR_MAX_ID_LEN
);
2216 dev_dbg(nor
->dev
, "error %d reading JEDEC ID\n", ret
);
2217 return ERR_PTR(ret
);
2220 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
2221 info
= spi_nor_search_part_by_id(manufacturers
[i
]->parts
,
2222 manufacturers
[i
]->nparts
,
2225 nor
->manufacturer
= manufacturers
[i
];
2230 dev_err(nor
->dev
, "unrecognized JEDEC id bytes: %*ph\n",
2231 SPI_NOR_MAX_ID_LEN
, id
);
2232 return ERR_PTR(-ENODEV
);
2235 static int spi_nor_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2236 size_t *retlen
, u_char
*buf
)
2238 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2241 dev_dbg(nor
->dev
, "from 0x%08x, len %zd\n", (u32
)from
, len
);
2243 ret
= spi_nor_lock_and_prep(nor
);
2250 addr
= spi_nor_convert_addr(nor
, addr
);
2252 ret
= spi_nor_read_data(nor
, addr
, len
, buf
);
2254 /* We shouldn't see 0-length reads */
2270 spi_nor_unlock_and_unprep(nor
);
2275 * Write an address range to the nor chip. Data must be written in
2276 * FLASH_PAGESIZE chunks. The address range may be any size provided
2277 * it is within the physical boundaries.
2279 static int spi_nor_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
2280 size_t *retlen
, const u_char
*buf
)
2282 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2283 size_t page_offset
, page_remain
, i
;
2286 dev_dbg(nor
->dev
, "to 0x%08x, len %zd\n", (u32
)to
, len
);
2288 ret
= spi_nor_lock_and_prep(nor
);
2292 for (i
= 0; i
< len
; ) {
2294 loff_t addr
= to
+ i
;
2297 * If page_size is a power of two, the offset can be quickly
2298 * calculated with an AND operation. On the other cases we
2299 * need to do a modulus operation (more expensive).
2300 * Power of two numbers have only one bit set and we can use
2301 * the instruction hweight32 to detect if we need to do a
2302 * modulus (do_div()) or not.
2304 if (hweight32(nor
->page_size
) == 1) {
2305 page_offset
= addr
& (nor
->page_size
- 1);
2307 uint64_t aux
= addr
;
2309 page_offset
= do_div(aux
, nor
->page_size
);
2311 /* the size of data remaining on the first page */
2312 page_remain
= min_t(size_t,
2313 nor
->page_size
- page_offset
, len
- i
);
2315 addr
= spi_nor_convert_addr(nor
, addr
);
2317 ret
= spi_nor_write_enable(nor
);
2321 ret
= spi_nor_write_data(nor
, addr
, page_remain
, buf
+ i
);
2326 ret
= spi_nor_wait_till_ready(nor
);
2334 spi_nor_unlock_and_unprep(nor
);
2338 static int spi_nor_check(struct spi_nor
*nor
)
2341 (!nor
->spimem
&& !nor
->controller_ops
) ||
2342 (!nor
->spimem
&& nor
->controller_ops
&&
2343 (!nor
->controller_ops
->read
||
2344 !nor
->controller_ops
->write
||
2345 !nor
->controller_ops
->read_reg
||
2346 !nor
->controller_ops
->write_reg
))) {
2347 pr_err("spi-nor: please fill all the necessary fields!\n");
2351 if (nor
->spimem
&& nor
->controller_ops
) {
2352 dev_err(nor
->dev
, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2360 spi_nor_set_read_settings(struct spi_nor_read_command
*read
,
2364 enum spi_nor_protocol proto
)
2366 read
->num_mode_clocks
= num_mode_clocks
;
2367 read
->num_wait_states
= num_wait_states
;
2368 read
->opcode
= opcode
;
2369 read
->proto
= proto
;
2372 void spi_nor_set_pp_settings(struct spi_nor_pp_command
*pp
, u8 opcode
,
2373 enum spi_nor_protocol proto
)
2375 pp
->opcode
= opcode
;
2379 static int spi_nor_hwcaps2cmd(u32 hwcaps
, const int table
[][2], size_t size
)
2383 for (i
= 0; i
< size
; i
++)
2384 if (table
[i
][0] == (int)hwcaps
)
2390 int spi_nor_hwcaps_read2cmd(u32 hwcaps
)
2392 static const int hwcaps_read2cmd
[][2] = {
2393 { SNOR_HWCAPS_READ
, SNOR_CMD_READ
},
2394 { SNOR_HWCAPS_READ_FAST
, SNOR_CMD_READ_FAST
},
2395 { SNOR_HWCAPS_READ_1_1_1_DTR
, SNOR_CMD_READ_1_1_1_DTR
},
2396 { SNOR_HWCAPS_READ_1_1_2
, SNOR_CMD_READ_1_1_2
},
2397 { SNOR_HWCAPS_READ_1_2_2
, SNOR_CMD_READ_1_2_2
},
2398 { SNOR_HWCAPS_READ_2_2_2
, SNOR_CMD_READ_2_2_2
},
2399 { SNOR_HWCAPS_READ_1_2_2_DTR
, SNOR_CMD_READ_1_2_2_DTR
},
2400 { SNOR_HWCAPS_READ_1_1_4
, SNOR_CMD_READ_1_1_4
},
2401 { SNOR_HWCAPS_READ_1_4_4
, SNOR_CMD_READ_1_4_4
},
2402 { SNOR_HWCAPS_READ_4_4_4
, SNOR_CMD_READ_4_4_4
},
2403 { SNOR_HWCAPS_READ_1_4_4_DTR
, SNOR_CMD_READ_1_4_4_DTR
},
2404 { SNOR_HWCAPS_READ_1_1_8
, SNOR_CMD_READ_1_1_8
},
2405 { SNOR_HWCAPS_READ_1_8_8
, SNOR_CMD_READ_1_8_8
},
2406 { SNOR_HWCAPS_READ_8_8_8
, SNOR_CMD_READ_8_8_8
},
2407 { SNOR_HWCAPS_READ_1_8_8_DTR
, SNOR_CMD_READ_1_8_8_DTR
},
2408 { SNOR_HWCAPS_READ_8_8_8_DTR
, SNOR_CMD_READ_8_8_8_DTR
},
2411 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_read2cmd
,
2412 ARRAY_SIZE(hwcaps_read2cmd
));
2415 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps
)
2417 static const int hwcaps_pp2cmd
[][2] = {
2418 { SNOR_HWCAPS_PP
, SNOR_CMD_PP
},
2419 { SNOR_HWCAPS_PP_1_1_4
, SNOR_CMD_PP_1_1_4
},
2420 { SNOR_HWCAPS_PP_1_4_4
, SNOR_CMD_PP_1_4_4
},
2421 { SNOR_HWCAPS_PP_4_4_4
, SNOR_CMD_PP_4_4_4
},
2422 { SNOR_HWCAPS_PP_1_1_8
, SNOR_CMD_PP_1_1_8
},
2423 { SNOR_HWCAPS_PP_1_8_8
, SNOR_CMD_PP_1_8_8
},
2424 { SNOR_HWCAPS_PP_8_8_8
, SNOR_CMD_PP_8_8_8
},
2425 { SNOR_HWCAPS_PP_8_8_8_DTR
, SNOR_CMD_PP_8_8_8_DTR
},
2428 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_pp2cmd
,
2429 ARRAY_SIZE(hwcaps_pp2cmd
));
2433 * spi_nor_spimem_check_op - check if the operation is supported
2435 *@nor: pointer to a 'struct spi_nor'
2436 *@op: pointer to op template to be checked
2438 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2440 static int spi_nor_spimem_check_op(struct spi_nor
*nor
,
2441 struct spi_mem_op
*op
)
2444 * First test with 4 address bytes. The opcode itself might
2445 * be a 3B addressing opcode but we don't care, because
2446 * SPI controller implementation should not check the opcode,
2447 * but just the sequence.
2449 op
->addr
.nbytes
= 4;
2450 if (!spi_mem_supports_op(nor
->spimem
, op
)) {
2451 if (nor
->mtd
.size
> SZ_16M
)
2454 /* If flash size <= 16MB, 3 address bytes are sufficient */
2455 op
->addr
.nbytes
= 3;
2456 if (!spi_mem_supports_op(nor
->spimem
, op
))
2464 * spi_nor_spimem_check_readop - check if the read op is supported
2466 *@nor: pointer to a 'struct spi_nor'
2467 *@read: pointer to op template to be checked
2469 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2471 static int spi_nor_spimem_check_readop(struct spi_nor
*nor
,
2472 const struct spi_nor_read_command
*read
)
2474 struct spi_mem_op op
= SPI_MEM_OP(SPI_MEM_OP_CMD(read
->opcode
, 0),
2475 SPI_MEM_OP_ADDR(3, 0, 0),
2476 SPI_MEM_OP_DUMMY(1, 0),
2477 SPI_MEM_OP_DATA_IN(1, NULL
, 0));
2479 spi_nor_spimem_setup_op(nor
, &op
, read
->proto
);
2481 /* convert the dummy cycles to the number of bytes */
2482 op
.dummy
.nbytes
= (nor
->read_dummy
* op
.dummy
.buswidth
) / 8;
2483 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
2484 op
.dummy
.nbytes
*= 2;
2486 return spi_nor_spimem_check_op(nor
, &op
);
2490 * spi_nor_spimem_check_pp - check if the page program op is supported
2492 *@nor: pointer to a 'struct spi_nor'
2493 *@pp: pointer to op template to be checked
2495 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2497 static int spi_nor_spimem_check_pp(struct spi_nor
*nor
,
2498 const struct spi_nor_pp_command
*pp
)
2500 struct spi_mem_op op
= SPI_MEM_OP(SPI_MEM_OP_CMD(pp
->opcode
, 0),
2501 SPI_MEM_OP_ADDR(3, 0, 0),
2502 SPI_MEM_OP_NO_DUMMY
,
2503 SPI_MEM_OP_DATA_OUT(1, NULL
, 0));
2505 spi_nor_spimem_setup_op(nor
, &op
, pp
->proto
);
2507 return spi_nor_spimem_check_op(nor
, &op
);
2511 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2512 * based on SPI controller capabilities
2513 * @nor: pointer to a 'struct spi_nor'
2514 * @hwcaps: pointer to resulting capabilities after adjusting
2515 * according to controller and flash's capability
2518 spi_nor_spimem_adjust_hwcaps(struct spi_nor
*nor
, u32
*hwcaps
)
2520 struct spi_nor_flash_parameter
*params
= nor
->params
;
2523 /* X-X-X modes are not supported yet, mask them all. */
2524 *hwcaps
&= ~SNOR_HWCAPS_X_X_X
;
2527 * If the reset line is broken, we do not want to enter a stateful
2530 if (nor
->flags
& SNOR_F_BROKEN_RESET
)
2531 *hwcaps
&= ~(SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
);
2533 for (cap
= 0; cap
< sizeof(*hwcaps
) * BITS_PER_BYTE
; cap
++) {
2536 if (!(*hwcaps
& BIT(cap
)))
2539 rdidx
= spi_nor_hwcaps_read2cmd(BIT(cap
));
2541 spi_nor_spimem_check_readop(nor
, ¶ms
->reads
[rdidx
]))
2542 *hwcaps
&= ~BIT(cap
);
2544 ppidx
= spi_nor_hwcaps_pp2cmd(BIT(cap
));
2548 if (spi_nor_spimem_check_pp(nor
,
2549 ¶ms
->page_programs
[ppidx
]))
2550 *hwcaps
&= ~BIT(cap
);
2555 * spi_nor_set_erase_type() - set a SPI NOR erase type
2556 * @erase: pointer to a structure that describes a SPI NOR erase type
2557 * @size: the size of the sector/block erased by the erase type
2558 * @opcode: the SPI command op code to erase the sector/block
2560 void spi_nor_set_erase_type(struct spi_nor_erase_type
*erase
, u32 size
,
2564 erase
->opcode
= opcode
;
2565 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2566 erase
->size_shift
= ffs(erase
->size
) - 1;
2567 erase
->size_mask
= (1 << erase
->size_shift
) - 1;
2571 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2572 * @map: the erase map of the SPI NOR
2573 * @erase_mask: bitmask encoding erase types that can erase the entire
2575 * @flash_size: the spi nor flash memory size
2577 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map
*map
,
2578 u8 erase_mask
, u64 flash_size
)
2580 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2581 map
->uniform_region
.offset
= (erase_mask
& SNOR_ERASE_TYPE_MASK
) |
2583 map
->uniform_region
.size
= flash_size
;
2584 map
->regions
= &map
->uniform_region
;
2585 map
->uniform_erase_type
= erase_mask
;
2588 int spi_nor_post_bfpt_fixups(struct spi_nor
*nor
,
2589 const struct sfdp_parameter_header
*bfpt_header
,
2590 const struct sfdp_bfpt
*bfpt
,
2591 struct spi_nor_flash_parameter
*params
)
2595 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2596 nor
->manufacturer
->fixups
->post_bfpt
) {
2597 ret
= nor
->manufacturer
->fixups
->post_bfpt(nor
, bfpt_header
,
2603 if (nor
->info
->fixups
&& nor
->info
->fixups
->post_bfpt
)
2604 return nor
->info
->fixups
->post_bfpt(nor
, bfpt_header
, bfpt
,
2610 static int spi_nor_select_read(struct spi_nor
*nor
,
2613 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_READ_MASK
) - 1;
2614 const struct spi_nor_read_command
*read
;
2619 cmd
= spi_nor_hwcaps_read2cmd(BIT(best_match
));
2623 read
= &nor
->params
->reads
[cmd
];
2624 nor
->read_opcode
= read
->opcode
;
2625 nor
->read_proto
= read
->proto
;
2628 * In the SPI NOR framework, we don't need to make the difference
2629 * between mode clock cycles and wait state clock cycles.
2630 * Indeed, the value of the mode clock cycles is used by a QSPI
2631 * flash memory to know whether it should enter or leave its 0-4-4
2632 * (Continuous Read / XIP) mode.
2633 * eXecution In Place is out of the scope of the mtd sub-system.
2634 * Hence we choose to merge both mode and wait state clock cycles
2635 * into the so called dummy clock cycles.
2637 nor
->read_dummy
= read
->num_mode_clocks
+ read
->num_wait_states
;
2641 static int spi_nor_select_pp(struct spi_nor
*nor
,
2644 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_PP_MASK
) - 1;
2645 const struct spi_nor_pp_command
*pp
;
2650 cmd
= spi_nor_hwcaps_pp2cmd(BIT(best_match
));
2654 pp
= &nor
->params
->page_programs
[cmd
];
2655 nor
->program_opcode
= pp
->opcode
;
2656 nor
->write_proto
= pp
->proto
;
2661 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2662 * @map: the erase map of the SPI NOR
2663 * @wanted_size: the erase type size to search for. Contains the value of
2664 * info->sector_size or of the "small sector" size in case
2665 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
2667 * Once the optimum uniform sector erase command is found, disable all the
2670 * Return: pointer to erase type on success, NULL otherwise.
2672 static const struct spi_nor_erase_type
*
2673 spi_nor_select_uniform_erase(struct spi_nor_erase_map
*map
,
2674 const u32 wanted_size
)
2676 const struct spi_nor_erase_type
*tested_erase
, *erase
= NULL
;
2678 u8 uniform_erase_type
= map
->uniform_erase_type
;
2680 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2681 if (!(uniform_erase_type
& BIT(i
)))
2684 tested_erase
= &map
->erase_type
[i
];
2687 * If the current erase size is the one, stop here:
2688 * we have found the right uniform Sector Erase command.
2690 if (tested_erase
->size
== wanted_size
) {
2691 erase
= tested_erase
;
2696 * Otherwise, the current erase size is still a valid candidate.
2697 * Select the biggest valid candidate.
2699 if (!erase
&& tested_erase
->size
)
2700 erase
= tested_erase
;
2701 /* keep iterating to find the wanted_size */
2707 /* Disable all other Sector Erase commands. */
2708 map
->uniform_erase_type
&= ~SNOR_ERASE_TYPE_MASK
;
2709 map
->uniform_erase_type
|= BIT(erase
- map
->erase_type
);
2713 static int spi_nor_select_erase(struct spi_nor
*nor
)
2715 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
2716 const struct spi_nor_erase_type
*erase
= NULL
;
2717 struct mtd_info
*mtd
= &nor
->mtd
;
2718 u32 wanted_size
= nor
->info
->sector_size
;
2722 * The previous implementation handling Sector Erase commands assumed
2723 * that the SPI flash memory has an uniform layout then used only one
2724 * of the supported erase sizes for all Sector Erase commands.
2725 * So to be backward compatible, the new implementation also tries to
2726 * manage the SPI flash memory as uniform with a single erase sector
2727 * size, when possible.
2729 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2730 /* prefer "small sector" erase if possible */
2731 wanted_size
= 4096u;
2734 if (spi_nor_has_uniform_erase(nor
)) {
2735 erase
= spi_nor_select_uniform_erase(map
, wanted_size
);
2738 nor
->erase_opcode
= erase
->opcode
;
2739 mtd
->erasesize
= erase
->size
;
2744 * For non-uniform SPI flash memory, set mtd->erasesize to the
2745 * maximum erase sector size. No need to set nor->erase_opcode.
2747 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2748 if (map
->erase_type
[i
].size
) {
2749 erase
= &map
->erase_type
[i
];
2757 mtd
->erasesize
= erase
->size
;
2761 static int spi_nor_default_setup(struct spi_nor
*nor
,
2762 const struct spi_nor_hwcaps
*hwcaps
)
2764 struct spi_nor_flash_parameter
*params
= nor
->params
;
2765 u32 ignored_mask
, shared_mask
;
2769 * Keep only the hardware capabilities supported by both the SPI
2770 * controller and the SPI flash memory.
2772 shared_mask
= hwcaps
->mask
& params
->hwcaps
.mask
;
2776 * When called from spi_nor_probe(), all caps are set and we
2777 * need to discard some of them based on what the SPI
2778 * controller actually supports (using spi_mem_supports_op()).
2780 spi_nor_spimem_adjust_hwcaps(nor
, &shared_mask
);
2783 * SPI n-n-n protocols are not supported when the SPI
2784 * controller directly implements the spi_nor interface.
2785 * Yet another reason to switch to spi-mem.
2787 ignored_mask
= SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
;
2788 if (shared_mask
& ignored_mask
) {
2790 "SPI n-n-n protocols are not supported.\n");
2791 shared_mask
&= ~ignored_mask
;
2795 /* Select the (Fast) Read command. */
2796 err
= spi_nor_select_read(nor
, shared_mask
);
2799 "can't select read settings supported by both the SPI controller and memory.\n");
2803 /* Select the Page Program command. */
2804 err
= spi_nor_select_pp(nor
, shared_mask
);
2807 "can't select write settings supported by both the SPI controller and memory.\n");
2811 /* Select the Sector Erase command. */
2812 err
= spi_nor_select_erase(nor
);
2815 "can't select erase settings supported by both the SPI controller and memory.\n");
2822 static int spi_nor_setup(struct spi_nor
*nor
,
2823 const struct spi_nor_hwcaps
*hwcaps
)
2825 if (!nor
->params
->setup
)
2828 return nor
->params
->setup(nor
, hwcaps
);
2832 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2833 * settings based on MFR register and ->default_init() hook.
2834 * @nor: pointer to a 'struct spi_nor'.
2836 static void spi_nor_manufacturer_init_params(struct spi_nor
*nor
)
2838 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2839 nor
->manufacturer
->fixups
->default_init
)
2840 nor
->manufacturer
->fixups
->default_init(nor
);
2842 if (nor
->info
->fixups
&& nor
->info
->fixups
->default_init
)
2843 nor
->info
->fixups
->default_init(nor
);
2847 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
2848 * based on JESD216 SFDP standard.
2849 * @nor: pointer to a 'struct spi_nor'.
2851 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2852 * legacy flash parameters and settings will be restored.
2854 static void spi_nor_sfdp_init_params(struct spi_nor
*nor
)
2856 struct spi_nor_flash_parameter sfdp_params
;
2858 memcpy(&sfdp_params
, nor
->params
, sizeof(sfdp_params
));
2860 if (spi_nor_parse_sfdp(nor
, nor
->params
)) {
2861 memcpy(nor
->params
, &sfdp_params
, sizeof(*nor
->params
));
2862 nor
->addr_width
= 0;
2863 nor
->flags
&= ~SNOR_F_4B_OPCODES
;
2868 * spi_nor_info_init_params() - Initialize the flash's parameters and settings
2869 * based on nor->info data.
2870 * @nor: pointer to a 'struct spi_nor'.
2872 static void spi_nor_info_init_params(struct spi_nor
*nor
)
2874 struct spi_nor_flash_parameter
*params
= nor
->params
;
2875 struct spi_nor_erase_map
*map
= ¶ms
->erase_map
;
2876 const struct flash_info
*info
= nor
->info
;
2877 struct device_node
*np
= spi_nor_get_flash_node(nor
);
2880 /* Initialize legacy flash parameters and settings. */
2881 params
->quad_enable
= spi_nor_sr2_bit1_quad_enable
;
2882 params
->set_4byte_addr_mode
= spansion_set_4byte_addr_mode
;
2883 params
->setup
= spi_nor_default_setup
;
2884 /* Default to 16-bit Write Status (01h) Command */
2885 nor
->flags
|= SNOR_F_HAS_16BIT_SR
;
2887 /* Set SPI NOR sizes. */
2888 params
->writesize
= 1;
2889 params
->size
= (u64
)info
->sector_size
* info
->n_sectors
;
2890 params
->page_size
= info
->page_size
;
2892 if (!(info
->flags
& SPI_NOR_NO_FR
)) {
2893 /* Default to Fast Read for DT and non-DT platform devices. */
2894 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_FAST
;
2896 /* Mask out Fast Read if not requested at DT instantiation. */
2897 if (np
&& !of_property_read_bool(np
, "m25p,fast-read"))
2898 params
->hwcaps
.mask
&= ~SNOR_HWCAPS_READ_FAST
;
2901 /* (Fast) Read settings. */
2902 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ
;
2903 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ
],
2904 0, 0, SPINOR_OP_READ
,
2907 if (params
->hwcaps
.mask
& SNOR_HWCAPS_READ_FAST
)
2908 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_FAST
],
2909 0, 8, SPINOR_OP_READ_FAST
,
2912 if (info
->flags
& SPI_NOR_DUAL_READ
) {
2913 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_2
;
2914 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_2
],
2915 0, 8, SPINOR_OP_READ_1_1_2
,
2919 if (info
->flags
& SPI_NOR_QUAD_READ
) {
2920 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_4
;
2921 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_4
],
2922 0, 8, SPINOR_OP_READ_1_1_4
,
2926 if (info
->flags
& SPI_NOR_OCTAL_READ
) {
2927 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_8
;
2928 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_8
],
2929 0, 8, SPINOR_OP_READ_1_1_8
,
2933 if (info
->flags
& SPI_NOR_OCTAL_DTR_READ
) {
2934 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_8_8_8_DTR
;
2935 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_8_8_8_DTR
],
2936 0, 20, SPINOR_OP_READ_FAST
,
2937 SNOR_PROTO_8_8_8_DTR
);
2940 /* Page Program settings. */
2941 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP
;
2942 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP
],
2943 SPINOR_OP_PP
, SNOR_PROTO_1_1_1
);
2945 if (info
->flags
& SPI_NOR_OCTAL_DTR_PP
) {
2946 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP_8_8_8_DTR
;
2948 * Since xSPI Page Program opcode is backward compatible with
2949 * Legacy SPI, use Legacy SPI opcode there as well.
2951 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP_8_8_8_DTR
],
2952 SPINOR_OP_PP
, SNOR_PROTO_8_8_8_DTR
);
2956 * Sector Erase settings. Sort Erase Types in ascending order, with the
2957 * smallest erase size starting at BIT(0).
2961 if (info
->flags
& SECT_4K_PMC
) {
2962 erase_mask
|= BIT(i
);
2963 spi_nor_set_erase_type(&map
->erase_type
[i
], 4096u,
2964 SPINOR_OP_BE_4K_PMC
);
2966 } else if (info
->flags
& SECT_4K
) {
2967 erase_mask
|= BIT(i
);
2968 spi_nor_set_erase_type(&map
->erase_type
[i
], 4096u,
2972 erase_mask
|= BIT(i
);
2973 spi_nor_set_erase_type(&map
->erase_type
[i
], info
->sector_size
,
2975 spi_nor_init_uniform_erase_map(map
, erase_mask
, params
->size
);
2979 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
2980 * after SFDP has been parsed (is also called for SPI NORs that do not
2982 * @nor: pointer to a 'struct spi_nor'
2984 * Typically used to tweak various parameters that could not be extracted by
2985 * other means (i.e. when information provided by the SFDP/flash_info tables
2986 * are incomplete or wrong).
2988 static void spi_nor_post_sfdp_fixups(struct spi_nor
*nor
)
2990 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2991 nor
->manufacturer
->fixups
->post_sfdp
)
2992 nor
->manufacturer
->fixups
->post_sfdp(nor
);
2994 if (nor
->info
->fixups
&& nor
->info
->fixups
->post_sfdp
)
2995 nor
->info
->fixups
->post_sfdp(nor
);
2999 * spi_nor_late_init_params() - Late initialization of default flash parameters.
3000 * @nor: pointer to a 'struct spi_nor'
3002 * Used to set default flash parameters and settings when the ->default_init()
3003 * hook or the SFDP parser let voids.
3005 static void spi_nor_late_init_params(struct spi_nor
*nor
)
3008 * NOR protection support. When locking_ops are not provided, we pick
3011 if (nor
->flags
& SNOR_F_HAS_LOCK
&& !nor
->params
->locking_ops
)
3012 nor
->params
->locking_ops
= &spi_nor_sr_locking_ops
;
3016 * spi_nor_init_params() - Initialize the flash's parameters and settings.
3017 * @nor: pointer to a 'struct spi_nor'.
3019 * The flash parameters and settings are initialized based on a sequence of
3020 * calls that are ordered by priority:
3022 * 1/ Default flash parameters initialization. The initializations are done
3023 * based on nor->info data:
3024 * spi_nor_info_init_params()
3026 * which can be overwritten by:
3027 * 2/ Manufacturer flash parameters initialization. The initializations are
3028 * done based on MFR register, or when the decisions can not be done solely
3029 * based on MFR, by using specific flash_info tweeks, ->default_init():
3030 * spi_nor_manufacturer_init_params()
3032 * which can be overwritten by:
3033 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
3034 * should be more accurate that the above.
3035 * spi_nor_sfdp_init_params()
3037 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
3038 * the flash parameters and settings immediately after parsing the Basic
3039 * Flash Parameter Table.
3041 * which can be overwritten by:
3042 * 4/ Post SFDP flash parameters initialization. Used to tweak various
3043 * parameters that could not be extracted by other means (i.e. when
3044 * information provided by the SFDP/flash_info tables are incomplete or
3046 * spi_nor_post_sfdp_fixups()
3048 * 5/ Late default flash parameters initialization, used when the
3049 * ->default_init() hook or the SFDP parser do not set specific params.
3050 * spi_nor_late_init_params()
3052 static int spi_nor_init_params(struct spi_nor
*nor
)
3054 nor
->params
= devm_kzalloc(nor
->dev
, sizeof(*nor
->params
), GFP_KERNEL
);
3058 spi_nor_info_init_params(nor
);
3060 spi_nor_manufacturer_init_params(nor
);
3062 if ((nor
->info
->flags
& (SPI_NOR_DUAL_READ
| SPI_NOR_QUAD_READ
|
3063 SPI_NOR_OCTAL_READ
| SPI_NOR_OCTAL_DTR_READ
)) &&
3064 !(nor
->info
->flags
& SPI_NOR_SKIP_SFDP
))
3065 spi_nor_sfdp_init_params(nor
);
3067 spi_nor_post_sfdp_fixups(nor
);
3069 spi_nor_late_init_params(nor
);
3074 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
3075 * @nor: pointer to a 'struct spi_nor'
3076 * @enable: whether to enable or disable Octal DTR
3078 * Return: 0 on success, -errno otherwise.
3080 static int spi_nor_octal_dtr_enable(struct spi_nor
*nor
, bool enable
)
3084 if (!nor
->params
->octal_dtr_enable
)
3087 if (!(nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
&&
3088 nor
->write_proto
== SNOR_PROTO_8_8_8_DTR
))
3091 if (!(nor
->flags
& SNOR_F_IO_MODE_EN_VOLATILE
))
3094 ret
= nor
->params
->octal_dtr_enable(nor
, enable
);
3099 nor
->reg_proto
= SNOR_PROTO_8_8_8_DTR
;
3101 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3107 * spi_nor_quad_enable() - enable Quad I/O if needed.
3108 * @nor: pointer to a 'struct spi_nor'
3110 * Return: 0 on success, -errno otherwise.
3112 static int spi_nor_quad_enable(struct spi_nor
*nor
)
3114 if (!nor
->params
->quad_enable
)
3117 if (!(spi_nor_get_protocol_width(nor
->read_proto
) == 4 ||
3118 spi_nor_get_protocol_width(nor
->write_proto
) == 4))
3121 return nor
->params
->quad_enable(nor
);
3125 * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
3126 * @nor: pointer to a 'struct spi_nor'.
3128 * Some SPI NOR flashes are write protected by default after a power-on reset
3129 * cycle, in order to avoid inadvertent writes during power-up. Backward
3130 * compatibility imposes to unlock the entire flash memory array at power-up
3133 * Unprotecting the entire flash array will fail for boards which are hardware
3134 * write-protected. Thus any errors are ignored.
3136 static void spi_nor_try_unlock_all(struct spi_nor
*nor
)
3140 if (!(nor
->flags
& SNOR_F_HAS_LOCK
))
3143 dev_dbg(nor
->dev
, "Unprotecting entire flash array\n");
3145 ret
= spi_nor_unlock(&nor
->mtd
, 0, nor
->params
->size
);
3147 dev_dbg(nor
->dev
, "Failed to unlock the entire flash memory array\n");
3150 static int spi_nor_init(struct spi_nor
*nor
)
3154 err
= spi_nor_octal_dtr_enable(nor
, true);
3156 dev_dbg(nor
->dev
, "octal mode not supported\n");
3160 err
= spi_nor_quad_enable(nor
);
3162 dev_dbg(nor
->dev
, "quad mode not supported\n");
3167 * Some SPI NOR flashes are write protected by default after a power-on
3168 * reset cycle, in order to avoid inadvertent writes during power-up.
3169 * Backward compatibility imposes to unlock the entire flash memory
3170 * array at power-up by default. Depending on the kernel configuration
3171 * (1) do nothing, (2) always unlock the entire flash array or (3)
3172 * unlock the entire flash array only when the software write
3173 * protection bits are volatile. The latter is indicated by
3174 * SNOR_F_SWP_IS_VOLATILE.
3176 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE
) ||
3177 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
) &&
3178 nor
->flags
& SNOR_F_SWP_IS_VOLATILE
))
3179 spi_nor_try_unlock_all(nor
);
3181 if (nor
->addr_width
== 4 &&
3182 nor
->read_proto
!= SNOR_PROTO_8_8_8_DTR
&&
3183 !(nor
->flags
& SNOR_F_4B_OPCODES
)) {
3185 * If the RESET# pin isn't hooked up properly, or the system
3186 * otherwise doesn't perform a reset command in the boot
3187 * sequence, it's impossible to 100% protect against unexpected
3188 * reboots (e.g., crashes). Warn the user (or hopefully, system
3189 * designer) that this is bad.
3191 WARN_ONCE(nor
->flags
& SNOR_F_BROKEN_RESET
,
3192 "enabling reset hack; may not recover from unexpected reboots\n");
3193 nor
->params
->set_4byte_addr_mode(nor
, true);
3199 static void spi_nor_soft_reset(struct spi_nor
*nor
)
3201 struct spi_mem_op op
;
3204 op
= (struct spi_mem_op
)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN
, 0),
3205 SPI_MEM_OP_NO_DUMMY
,
3207 SPI_MEM_OP_NO_DATA
);
3209 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3211 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3213 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3217 op
= (struct spi_mem_op
)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST
, 0),
3218 SPI_MEM_OP_NO_DUMMY
,
3220 SPI_MEM_OP_NO_DATA
);
3222 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3224 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3226 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3231 * Software Reset is not instant, and the delay varies from flash to
3232 * flash. Looking at a few flashes, most range somewhere below 100
3233 * microseconds. So, sleep for a range of 200-400 us.
3235 usleep_range(SPI_NOR_SRST_SLEEP_MIN
, SPI_NOR_SRST_SLEEP_MAX
);
3238 /* mtd suspend handler */
3239 static int spi_nor_suspend(struct mtd_info
*mtd
)
3241 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3244 /* Disable octal DTR mode if we enabled it. */
3245 ret
= spi_nor_octal_dtr_enable(nor
, false);
3247 dev_err(nor
->dev
, "suspend() failed\n");
3252 /* mtd resume handler */
3253 static void spi_nor_resume(struct mtd_info
*mtd
)
3255 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3256 struct device
*dev
= nor
->dev
;
3259 /* re-initialize the nor chip */
3260 ret
= spi_nor_init(nor
);
3262 dev_err(dev
, "resume() failed\n");
3265 void spi_nor_restore(struct spi_nor
*nor
)
3267 /* restore the addressing mode */
3268 if (nor
->addr_width
== 4 && !(nor
->flags
& SNOR_F_4B_OPCODES
) &&
3269 nor
->flags
& SNOR_F_BROKEN_RESET
)
3270 nor
->params
->set_4byte_addr_mode(nor
, false);
3272 if (nor
->flags
& SNOR_F_SOFT_RESET
)
3273 spi_nor_soft_reset(nor
);
3275 EXPORT_SYMBOL_GPL(spi_nor_restore
);
3277 static const struct flash_info
*spi_nor_match_id(struct spi_nor
*nor
,
3282 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
3283 for (j
= 0; j
< manufacturers
[i
]->nparts
; j
++) {
3284 if (!strcmp(name
, manufacturers
[i
]->parts
[j
].name
)) {
3285 nor
->manufacturer
= manufacturers
[i
];
3286 return &manufacturers
[i
]->parts
[j
];
3294 static int spi_nor_set_addr_width(struct spi_nor
*nor
)
3296 if (nor
->addr_width
) {
3297 /* already configured from SFDP */
3298 } else if (nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
) {
3300 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
3301 * in this protocol an odd address width cannot be used because
3302 * then the address phase would only span a cycle and a half.
3303 * Half a cycle would be left over. We would then have to start
3304 * the dummy phase in the middle of a cycle and so too the data
3305 * phase, and we will end the transaction with half a cycle left
3308 * Force all 8D-8D-8D flashes to use an address width of 4 to
3309 * avoid this situation.
3311 nor
->addr_width
= 4;
3312 } else if (nor
->info
->addr_width
) {
3313 nor
->addr_width
= nor
->info
->addr_width
;
3315 nor
->addr_width
= 3;
3318 if (nor
->addr_width
== 3 && nor
->mtd
.size
> 0x1000000) {
3319 /* enable 4-byte addressing if the device exceeds 16MiB */
3320 nor
->addr_width
= 4;
3323 if (nor
->addr_width
> SPI_NOR_MAX_ADDR_WIDTH
) {
3324 dev_dbg(nor
->dev
, "address width is too large: %u\n",
3329 /* Set 4byte opcodes when possible. */
3330 if (nor
->addr_width
== 4 && nor
->flags
& SNOR_F_4B_OPCODES
&&
3331 !(nor
->flags
& SNOR_F_HAS_4BAIT
))
3332 spi_nor_set_4byte_opcodes(nor
);
3337 static void spi_nor_debugfs_init(struct spi_nor
*nor
,
3338 const struct flash_info
*info
)
3340 struct mtd_info
*mtd
= &nor
->mtd
;
3342 mtd
->dbg
.partname
= info
->name
;
3343 mtd
->dbg
.partid
= devm_kasprintf(nor
->dev
, GFP_KERNEL
, "spi-nor:%*phN",
3344 info
->id_len
, info
->id
);
3347 static const struct flash_info
*spi_nor_get_flash_info(struct spi_nor
*nor
,
3350 const struct flash_info
*info
= NULL
;
3353 info
= spi_nor_match_id(nor
, name
);
3354 /* Try to auto-detect if chip name wasn't specified or not found */
3356 info
= spi_nor_read_id(nor
);
3357 if (IS_ERR_OR_NULL(info
))
3358 return ERR_PTR(-ENOENT
);
3361 * If caller has specified name of flash model that can normally be
3362 * detected using JEDEC, let's verify it.
3364 if (name
&& info
->id_len
) {
3365 const struct flash_info
*jinfo
;
3367 jinfo
= spi_nor_read_id(nor
);
3368 if (IS_ERR(jinfo
)) {
3370 } else if (jinfo
!= info
) {
3372 * JEDEC knows better, so overwrite platform ID. We
3373 * can't trust partitions any longer, but we'll let
3374 * mtd apply them anyway, since some partitions may be
3375 * marked read-only, and we don't want to lose that
3376 * information, even if it's not 100% accurate.
3378 dev_warn(nor
->dev
, "found %s, expected %s\n",
3379 jinfo
->name
, info
->name
);
3387 int spi_nor_scan(struct spi_nor
*nor
, const char *name
,
3388 const struct spi_nor_hwcaps
*hwcaps
)
3390 const struct flash_info
*info
;
3391 struct device
*dev
= nor
->dev
;
3392 struct mtd_info
*mtd
= &nor
->mtd
;
3393 struct device_node
*np
= spi_nor_get_flash_node(nor
);
3397 ret
= spi_nor_check(nor
);
3401 /* Reset SPI protocol for all commands. */
3402 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3403 nor
->read_proto
= SNOR_PROTO_1_1_1
;
3404 nor
->write_proto
= SNOR_PROTO_1_1_1
;
3407 * We need the bounce buffer early to read/write registers when going
3408 * through the spi-mem layer (buffers have to be DMA-able).
3409 * For spi-mem drivers, we'll reallocate a new buffer if
3410 * nor->page_size turns out to be greater than PAGE_SIZE (which
3411 * shouldn't happen before long since NOR pages are usually less
3412 * than 1KB) after spi_nor_scan() returns.
3414 nor
->bouncebuf_size
= PAGE_SIZE
;
3415 nor
->bouncebuf
= devm_kmalloc(dev
, nor
->bouncebuf_size
,
3417 if (!nor
->bouncebuf
)
3420 info
= spi_nor_get_flash_info(nor
, name
);
3422 return PTR_ERR(info
);
3426 spi_nor_debugfs_init(nor
, info
);
3428 mutex_init(&nor
->lock
);
3431 * Make sure the XSR_RDY flag is set before calling
3432 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
3433 * with Atmel SPI NOR.
3435 if (info
->flags
& SPI_NOR_XSR_RDY
)
3436 nor
->flags
|= SNOR_F_READY_XSR_RDY
;
3438 if (info
->flags
& SPI_NOR_HAS_LOCK
)
3439 nor
->flags
|= SNOR_F_HAS_LOCK
;
3441 mtd
->_write
= spi_nor_write
;
3443 /* Init flash parameters based on flash_info struct and SFDP */
3444 ret
= spi_nor_init_params(nor
);
3449 mtd
->name
= dev_name(dev
);
3451 mtd
->type
= MTD_NORFLASH
;
3452 mtd
->writesize
= nor
->params
->writesize
;
3453 mtd
->flags
= MTD_CAP_NORFLASH
;
3454 mtd
->size
= nor
->params
->size
;
3455 mtd
->_erase
= spi_nor_erase
;
3456 mtd
->_read
= spi_nor_read
;
3457 mtd
->_suspend
= spi_nor_suspend
;
3458 mtd
->_resume
= spi_nor_resume
;
3460 if (nor
->params
->locking_ops
) {
3461 mtd
->_lock
= spi_nor_lock
;
3462 mtd
->_unlock
= spi_nor_unlock
;
3463 mtd
->_is_locked
= spi_nor_is_locked
;
3466 if (info
->flags
& USE_FSR
)
3467 nor
->flags
|= SNOR_F_USE_FSR
;
3468 if (info
->flags
& SPI_NOR_HAS_TB
) {
3469 nor
->flags
|= SNOR_F_HAS_SR_TB
;
3470 if (info
->flags
& SPI_NOR_TB_SR_BIT6
)
3471 nor
->flags
|= SNOR_F_HAS_SR_TB_BIT6
;
3474 if (info
->flags
& NO_CHIP_ERASE
)
3475 nor
->flags
|= SNOR_F_NO_OP_CHIP_ERASE
;
3476 if (info
->flags
& USE_CLSR
)
3477 nor
->flags
|= SNOR_F_USE_CLSR
;
3478 if (info
->flags
& SPI_NOR_SWP_IS_VOLATILE
)
3479 nor
->flags
|= SNOR_F_SWP_IS_VOLATILE
;
3481 if (info
->flags
& SPI_NOR_4BIT_BP
) {
3482 nor
->flags
|= SNOR_F_HAS_4BIT_BP
;
3483 if (info
->flags
& SPI_NOR_BP3_SR_BIT6
)
3484 nor
->flags
|= SNOR_F_HAS_SR_BP3_BIT6
;
3487 if (info
->flags
& SPI_NOR_NO_ERASE
)
3488 mtd
->flags
|= MTD_NO_ERASE
;
3490 mtd
->dev
.parent
= dev
;
3491 nor
->page_size
= nor
->params
->page_size
;
3492 mtd
->writebufsize
= nor
->page_size
;
3494 if (of_property_read_bool(np
, "broken-flash-reset"))
3495 nor
->flags
|= SNOR_F_BROKEN_RESET
;
3498 * Configure the SPI memory:
3499 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3500 * - set the number of dummy cycles (mode cycles + wait states).
3501 * - set the SPI protocols for register and memory accesses.
3503 ret
= spi_nor_setup(nor
, hwcaps
);
3507 if (info
->flags
& SPI_NOR_4B_OPCODES
)
3508 nor
->flags
|= SNOR_F_4B_OPCODES
;
3510 if (info
->flags
& SPI_NOR_IO_MODE_EN_VOLATILE
)
3511 nor
->flags
|= SNOR_F_IO_MODE_EN_VOLATILE
;
3513 ret
= spi_nor_set_addr_width(nor
);
3517 /* Send all the required SPI flash commands to initialize device */
3518 ret
= spi_nor_init(nor
);
3522 dev_info(dev
, "%s (%lld Kbytes)\n", info
->name
,
3523 (long long)mtd
->size
>> 10);
3526 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3527 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3528 mtd
->name
, (long long)mtd
->size
, (long long)(mtd
->size
>> 20),
3529 mtd
->erasesize
, mtd
->erasesize
/ 1024, mtd
->numeraseregions
);
3531 if (mtd
->numeraseregions
)
3532 for (i
= 0; i
< mtd
->numeraseregions
; i
++)
3534 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3535 ".erasesize = 0x%.8x (%uKiB), "
3536 ".numblocks = %d }\n",
3537 i
, (long long)mtd
->eraseregions
[i
].offset
,
3538 mtd
->eraseregions
[i
].erasesize
,
3539 mtd
->eraseregions
[i
].erasesize
/ 1024,
3540 mtd
->eraseregions
[i
].numblocks
);
3543 EXPORT_SYMBOL_GPL(spi_nor_scan
);
3545 static int spi_nor_create_read_dirmap(struct spi_nor
*nor
)
3547 struct spi_mem_dirmap_info info
= {
3548 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
3549 SPI_MEM_OP_ADDR(nor
->addr_width
, 0, 0),
3550 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
3551 SPI_MEM_OP_DATA_IN(0, NULL
, 0)),
3553 .length
= nor
->mtd
.size
,
3555 struct spi_mem_op
*op
= &info
.op_tmpl
;
3557 spi_nor_spimem_setup_op(nor
, op
, nor
->read_proto
);
3559 /* convert the dummy cycles to the number of bytes */
3560 op
->dummy
.nbytes
= (nor
->read_dummy
* op
->dummy
.buswidth
) / 8;
3561 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
3562 op
->dummy
.nbytes
*= 2;
3565 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3566 * of data bytes is non-zero, the data buswidth won't be set here. So,
3569 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->read_proto
);
3571 nor
->dirmap
.rdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3573 return PTR_ERR_OR_ZERO(nor
->dirmap
.rdesc
);
3576 static int spi_nor_create_write_dirmap(struct spi_nor
*nor
)
3578 struct spi_mem_dirmap_info info
= {
3579 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
3580 SPI_MEM_OP_ADDR(nor
->addr_width
, 0, 0),
3581 SPI_MEM_OP_NO_DUMMY
,
3582 SPI_MEM_OP_DATA_OUT(0, NULL
, 0)),
3584 .length
= nor
->mtd
.size
,
3586 struct spi_mem_op
*op
= &info
.op_tmpl
;
3588 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
3589 op
->addr
.nbytes
= 0;
3591 spi_nor_spimem_setup_op(nor
, op
, nor
->write_proto
);
3594 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3595 * of data bytes is non-zero, the data buswidth won't be set here. So,
3598 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->write_proto
);
3600 nor
->dirmap
.wdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3602 return PTR_ERR_OR_ZERO(nor
->dirmap
.wdesc
);
3605 static int spi_nor_probe(struct spi_mem
*spimem
)
3607 struct spi_device
*spi
= spimem
->spi
;
3608 struct flash_platform_data
*data
= dev_get_platdata(&spi
->dev
);
3609 struct spi_nor
*nor
;
3611 * Enable all caps by default. The core will mask them after
3612 * checking what's really supported using spi_mem_supports_op().
3614 const struct spi_nor_hwcaps hwcaps
= { .mask
= SNOR_HWCAPS_ALL
};
3618 nor
= devm_kzalloc(&spi
->dev
, sizeof(*nor
), GFP_KERNEL
);
3622 nor
->spimem
= spimem
;
3623 nor
->dev
= &spi
->dev
;
3624 spi_nor_set_flash_node(nor
, spi
->dev
.of_node
);
3626 spi_mem_set_drvdata(spimem
, nor
);
3628 if (data
&& data
->name
)
3629 nor
->mtd
.name
= data
->name
;
3632 nor
->mtd
.name
= spi_mem_get_name(spimem
);
3635 * For some (historical?) reason many platforms provide two different
3636 * names in flash_platform_data: "name" and "type". Quite often name is
3637 * set to "m25p80" and then "type" provides a real chip name.
3638 * If that's the case, respect "type" and ignore a "name".
3640 if (data
&& data
->type
)
3641 flash_name
= data
->type
;
3642 else if (!strcmp(spi
->modalias
, "spi-nor"))
3643 flash_name
= NULL
; /* auto-detect */
3645 flash_name
= spi
->modalias
;
3647 ret
= spi_nor_scan(nor
, flash_name
, &hwcaps
);
3652 * None of the existing parts have > 512B pages, but let's play safe
3653 * and add this logic so that if anyone ever adds support for such
3654 * a NOR we don't end up with buffer overflows.
3656 if (nor
->page_size
> PAGE_SIZE
) {
3657 nor
->bouncebuf_size
= nor
->page_size
;
3658 devm_kfree(nor
->dev
, nor
->bouncebuf
);
3659 nor
->bouncebuf
= devm_kmalloc(nor
->dev
,
3660 nor
->bouncebuf_size
,
3662 if (!nor
->bouncebuf
)
3666 ret
= spi_nor_create_read_dirmap(nor
);
3670 ret
= spi_nor_create_write_dirmap(nor
);
3674 return mtd_device_register(&nor
->mtd
, data
? data
->parts
: NULL
,
3675 data
? data
->nr_parts
: 0);
3678 static int spi_nor_remove(struct spi_mem
*spimem
)
3680 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3682 spi_nor_restore(nor
);
3684 /* Clean up MTD stuff. */
3685 return mtd_device_unregister(&nor
->mtd
);
3688 static void spi_nor_shutdown(struct spi_mem
*spimem
)
3690 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3692 spi_nor_restore(nor
);
3696 * Do NOT add to this array without reading the following:
3698 * Historically, many flash devices are bound to this driver by their name. But
3699 * since most of these flash are compatible to some extent, and their
3700 * differences can often be differentiated by the JEDEC read-ID command, we
3701 * encourage new users to add support to the spi-nor library, and simply bind
3702 * against a generic string here (e.g., "jedec,spi-nor").
3704 * Many flash names are kept here in this list (as well as in spi-nor.c) to
3705 * keep them available as module aliases for existing platforms.
3707 static const struct spi_device_id spi_nor_dev_ids
[] = {
3709 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3710 * hack around the fact that the SPI core does not provide uevent
3711 * matching for .of_match_table
3716 * Entries not used in DTs that should be safe to drop after replacing
3717 * them with "spi-nor" in platform data.
3719 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3722 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3723 * should be kept for backward compatibility.
3725 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3726 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3727 {"mx25l25635e"},{"mx66l51235l"},
3728 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3729 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3731 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3732 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3733 {"m25p64"}, {"m25p128"},
3734 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3735 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3737 /* Flashes that can't be detected using JEDEC */
3738 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3739 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3740 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3742 /* Everspin MRAMs (non-JEDEC) */
3743 { "mr25h128" }, /* 128 Kib, 40 MHz */
3744 { "mr25h256" }, /* 256 Kib, 40 MHz */
3745 { "mr25h10" }, /* 1 Mib, 40 MHz */
3746 { "mr25h40" }, /* 4 Mib, 40 MHz */
3750 MODULE_DEVICE_TABLE(spi
, spi_nor_dev_ids
);
3752 static const struct of_device_id spi_nor_of_table
[] = {
3754 * Generic compatibility for SPI NOR that can be identified by the
3755 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3757 { .compatible
= "jedec,spi-nor" },
3760 MODULE_DEVICE_TABLE(of
, spi_nor_of_table
);
3763 * REVISIT: many of these chips have deep power-down modes, which
3764 * should clearly be entered on suspend() to minimize power use.
3765 * And also when they're otherwise idle...
3767 static struct spi_mem_driver spi_nor_driver
= {
3771 .of_match_table
= spi_nor_of_table
,
3773 .id_table
= spi_nor_dev_ids
,
3775 .probe
= spi_nor_probe
,
3776 .remove
= spi_nor_remove
,
3777 .shutdown
= spi_nor_shutdown
,
3779 module_spi_mem_driver(spi_nor_driver
);
3781 MODULE_LICENSE("GPL v2");
3782 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3783 MODULE_AUTHOR("Mike Lavender");
3784 MODULE_DESCRIPTION("framework for SPI NOR");