1 // SPDX-License-Identifier: GPL-2.0
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/math64.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/mutex.h>
19 #include <linux/of_platform.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sizes.h>
22 #include <linux/slab.h>
23 #include <linux/spi/flash.h>
27 /* Define max times to check status register before we give up. */
30 * For everything but full-chip erase; probably could be much smaller, but kept
31 * around for safety for now
33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
41 #define SPI_NOR_MAX_ADDR_NBYTES 4
43 #define SPI_NOR_SRST_SLEEP_MIN 200
44 #define SPI_NOR_SRST_SLEEP_MAX 400
47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
49 * @nor: pointer to a 'struct spi_nor'
50 * @op: pointer to the 'struct spi_mem_op' whose properties
51 * need to be initialized.
53 * Right now, only "repeat" and "invert" are supported.
55 * Return: The opcode extension.
57 static u8
spi_nor_get_cmd_ext(const struct spi_nor
*nor
,
58 const struct spi_mem_op
*op
)
60 switch (nor
->cmd_ext_type
) {
61 case SPI_NOR_EXT_INVERT
:
62 return ~op
->cmd
.opcode
;
64 case SPI_NOR_EXT_REPEAT
:
65 return op
->cmd
.opcode
;
68 dev_err(nor
->dev
, "Unknown command extension type\n");
74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
75 * @nor: pointer to a 'struct spi_nor'
76 * @op: pointer to the 'struct spi_mem_op' whose properties
77 * need to be initialized.
78 * @proto: the protocol from which the properties need to be set.
80 void spi_nor_spimem_setup_op(const struct spi_nor
*nor
,
81 struct spi_mem_op
*op
,
82 const enum spi_nor_protocol proto
)
86 op
->cmd
.buswidth
= spi_nor_get_protocol_inst_nbits(proto
);
89 op
->addr
.buswidth
= spi_nor_get_protocol_addr_nbits(proto
);
92 op
->dummy
.buswidth
= spi_nor_get_protocol_addr_nbits(proto
);
95 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(proto
);
97 if (spi_nor_protocol_is_dtr(proto
)) {
99 * SPIMEM supports mixed DTR modes, but right now we can only
100 * have all phases either DTR or STR. IOW, SPIMEM can have
101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
102 * phases to either DTR or STR.
106 op
->dummy
.dtr
= true;
109 /* 2 bytes per clock cycle in DTR mode. */
110 op
->dummy
.nbytes
*= 2;
112 ext
= spi_nor_get_cmd_ext(nor
, op
);
113 op
->cmd
.opcode
= (op
->cmd
.opcode
<< 8) | ext
;
119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
121 * @nor: pointer to 'struct spi_nor'
122 * @op: pointer to 'struct spi_mem_op' template for transfer
124 * If we have to use the bounce buffer, the data field in @op will be updated.
126 * Return: true if the bounce buffer is needed, false if not
128 static bool spi_nor_spimem_bounce(struct spi_nor
*nor
, struct spi_mem_op
*op
)
130 /* op->data.buf.in occupies the same memory as op->data.buf.out */
131 if (object_is_on_stack(op
->data
.buf
.in
) ||
132 !virt_addr_valid(op
->data
.buf
.in
)) {
133 if (op
->data
.nbytes
> nor
->bouncebuf_size
)
134 op
->data
.nbytes
= nor
->bouncebuf_size
;
135 op
->data
.buf
.in
= nor
->bouncebuf
;
143 * spi_nor_spimem_exec_op() - execute a memory operation
144 * @nor: pointer to 'struct spi_nor'
145 * @op: pointer to 'struct spi_mem_op' template for transfer
147 * Return: 0 on success, -error otherwise.
149 static int spi_nor_spimem_exec_op(struct spi_nor
*nor
, struct spi_mem_op
*op
)
153 error
= spi_mem_adjust_op_size(nor
->spimem
, op
);
157 return spi_mem_exec_op(nor
->spimem
, op
);
160 int spi_nor_controller_ops_read_reg(struct spi_nor
*nor
, u8 opcode
,
163 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
166 return nor
->controller_ops
->read_reg(nor
, opcode
, buf
, len
);
169 int spi_nor_controller_ops_write_reg(struct spi_nor
*nor
, u8 opcode
,
170 const u8
*buf
, size_t len
)
172 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
175 return nor
->controller_ops
->write_reg(nor
, opcode
, buf
, len
);
178 static int spi_nor_controller_ops_erase(struct spi_nor
*nor
, loff_t offs
)
180 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
183 return nor
->controller_ops
->erase(nor
, offs
);
187 * spi_nor_spimem_read_data() - read data from flash's memory region via
189 * @nor: pointer to 'struct spi_nor'
190 * @from: offset to read from
191 * @len: number of bytes to read
192 * @buf: pointer to dst buffer
194 * Return: number of bytes read successfully, -errno otherwise
196 static ssize_t
spi_nor_spimem_read_data(struct spi_nor
*nor
, loff_t from
,
199 struct spi_mem_op op
=
200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
201 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, from
, 0),
202 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
203 SPI_MEM_OP_DATA_IN(len
, buf
, 0));
208 spi_nor_spimem_setup_op(nor
, &op
, nor
->read_proto
);
210 /* convert the dummy cycles to the number of bytes */
211 op
.dummy
.nbytes
= (nor
->read_dummy
* op
.dummy
.buswidth
) / 8;
212 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
213 op
.dummy
.nbytes
*= 2;
215 usebouncebuf
= spi_nor_spimem_bounce(nor
, &op
);
217 if (nor
->dirmap
.rdesc
) {
218 nbytes
= spi_mem_dirmap_read(nor
->dirmap
.rdesc
, op
.addr
.val
,
219 op
.data
.nbytes
, op
.data
.buf
.in
);
221 error
= spi_nor_spimem_exec_op(nor
, &op
);
224 nbytes
= op
.data
.nbytes
;
227 if (usebouncebuf
&& nbytes
> 0)
228 memcpy(buf
, op
.data
.buf
.in
, nbytes
);
234 * spi_nor_read_data() - read data from flash memory
235 * @nor: pointer to 'struct spi_nor'
236 * @from: offset to read from
237 * @len: number of bytes to read
238 * @buf: pointer to dst buffer
240 * Return: number of bytes read successfully, -errno otherwise
242 ssize_t
spi_nor_read_data(struct spi_nor
*nor
, loff_t from
, size_t len
, u8
*buf
)
245 return spi_nor_spimem_read_data(nor
, from
, len
, buf
);
247 return nor
->controller_ops
->read(nor
, from
, len
, buf
);
251 * spi_nor_spimem_write_data() - write data to flash memory via
253 * @nor: pointer to 'struct spi_nor'
254 * @to: offset to write to
255 * @len: number of bytes to write
256 * @buf: pointer to src buffer
258 * Return: number of bytes written successfully, -errno otherwise
260 static ssize_t
spi_nor_spimem_write_data(struct spi_nor
*nor
, loff_t to
,
261 size_t len
, const u8
*buf
)
263 struct spi_mem_op op
=
264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
265 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, to
, 0),
267 SPI_MEM_OP_DATA_OUT(len
, buf
, 0));
271 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
274 spi_nor_spimem_setup_op(nor
, &op
, nor
->write_proto
);
276 if (spi_nor_spimem_bounce(nor
, &op
))
277 memcpy(nor
->bouncebuf
, buf
, op
.data
.nbytes
);
279 if (nor
->dirmap
.wdesc
) {
280 nbytes
= spi_mem_dirmap_write(nor
->dirmap
.wdesc
, op
.addr
.val
,
281 op
.data
.nbytes
, op
.data
.buf
.out
);
283 error
= spi_nor_spimem_exec_op(nor
, &op
);
286 nbytes
= op
.data
.nbytes
;
293 * spi_nor_write_data() - write data to flash memory
294 * @nor: pointer to 'struct spi_nor'
295 * @to: offset to write to
296 * @len: number of bytes to write
297 * @buf: pointer to src buffer
299 * Return: number of bytes written successfully, -errno otherwise
301 ssize_t
spi_nor_write_data(struct spi_nor
*nor
, loff_t to
, size_t len
,
305 return spi_nor_spimem_write_data(nor
, to
, len
, buf
);
307 return nor
->controller_ops
->write(nor
, to
, len
, buf
);
311 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
313 * @nor: pointer to 'struct spi_nor'.
314 * @op: SPI memory operation. op->data.buf must be DMA-able.
315 * @proto: SPI protocol to use for the register operation.
317 * Return: zero on success, -errno otherwise
319 int spi_nor_read_any_reg(struct spi_nor
*nor
, struct spi_mem_op
*op
,
320 enum spi_nor_protocol proto
)
325 spi_nor_spimem_setup_op(nor
, op
, proto
);
326 return spi_nor_spimem_exec_op(nor
, op
);
330 * spi_nor_write_any_volatile_reg() - write any volatile register to flash
332 * @nor: pointer to 'struct spi_nor'
333 * @op: SPI memory operation. op->data.buf must be DMA-able.
334 * @proto: SPI protocol to use for the register operation.
336 * Writing volatile registers are instant according to some manufacturers
337 * (Cypress, Micron) and do not need any status polling.
339 * Return: zero on success, -errno otherwise
341 int spi_nor_write_any_volatile_reg(struct spi_nor
*nor
, struct spi_mem_op
*op
,
342 enum spi_nor_protocol proto
)
349 ret
= spi_nor_write_enable(nor
);
352 spi_nor_spimem_setup_op(nor
, op
, proto
);
353 return spi_nor_spimem_exec_op(nor
, op
);
357 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
358 * @nor: pointer to 'struct spi_nor'.
360 * Return: 0 on success, -errno otherwise.
362 int spi_nor_write_enable(struct spi_nor
*nor
)
367 struct spi_mem_op op
= SPI_NOR_WREN_OP
;
369 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
371 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
373 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WREN
,
378 dev_dbg(nor
->dev
, "error %d on Write Enable\n", ret
);
384 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
385 * @nor: pointer to 'struct spi_nor'.
387 * Return: 0 on success, -errno otherwise.
389 int spi_nor_write_disable(struct spi_nor
*nor
)
394 struct spi_mem_op op
= SPI_NOR_WRDI_OP
;
396 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
398 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
400 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRDI
,
405 dev_dbg(nor
->dev
, "error %d on Write Disable\n", ret
);
411 * spi_nor_read_id() - Read the JEDEC ID.
412 * @nor: pointer to 'struct spi_nor'.
413 * @naddr: number of address bytes to send. Can be zero if the operation
414 * does not need to send an address.
415 * @ndummy: number of dummy bytes to send after an opcode or address. Can
416 * be zero if the operation does not require dummy bytes.
417 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID
419 * @proto: the SPI protocol for register operation.
421 * Return: 0 on success, -errno otherwise.
423 int spi_nor_read_id(struct spi_nor
*nor
, u8 naddr
, u8 ndummy
, u8
*id
,
424 enum spi_nor_protocol proto
)
429 struct spi_mem_op op
=
430 SPI_NOR_READID_OP(naddr
, ndummy
, id
, SPI_NOR_MAX_ID_LEN
);
432 spi_nor_spimem_setup_op(nor
, &op
, proto
);
433 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
435 ret
= nor
->controller_ops
->read_reg(nor
, SPINOR_OP_RDID
, id
,
442 * spi_nor_read_sr() - Read the Status Register.
443 * @nor: pointer to 'struct spi_nor'.
444 * @sr: pointer to a DMA-able buffer where the value of the
445 * Status Register will be written. Should be at least 2 bytes.
447 * Return: 0 on success, -errno otherwise.
449 int spi_nor_read_sr(struct spi_nor
*nor
, u8
*sr
)
454 struct spi_mem_op op
= SPI_NOR_RDSR_OP(sr
);
456 if (nor
->reg_proto
== SNOR_PROTO_8_8_8_DTR
) {
457 op
.addr
.nbytes
= nor
->params
->rdsr_addr_nbytes
;
458 op
.dummy
.nbytes
= nor
->params
->rdsr_dummy
;
460 * We don't want to read only one byte in DTR mode. So,
461 * read 2 and then discard the second byte.
466 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
468 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
470 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR
, sr
,
475 dev_dbg(nor
->dev
, "error %d reading SR\n", ret
);
481 * spi_nor_read_cr() - Read the Configuration Register using the
482 * SPINOR_OP_RDCR (35h) command.
483 * @nor: pointer to 'struct spi_nor'
484 * @cr: pointer to a DMA-able buffer where the value of the
485 * Configuration Register will be written.
487 * Return: 0 on success, -errno otherwise.
489 int spi_nor_read_cr(struct spi_nor
*nor
, u8
*cr
)
494 struct spi_mem_op op
= SPI_NOR_RDCR_OP(cr
);
496 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
498 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
500 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDCR
, cr
,
505 dev_dbg(nor
->dev
, "error %d reading CR\n", ret
);
511 * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
512 * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
513 * Winbond and Macronix.
514 * @nor: pointer to 'struct spi_nor'.
515 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
518 * Return: 0 on success, -errno otherwise.
520 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor
*nor
, bool enable
)
525 struct spi_mem_op op
= SPI_NOR_EN4B_EX4B_OP(enable
);
527 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
529 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
531 ret
= spi_nor_controller_ops_write_reg(nor
,
532 enable
? SPINOR_OP_EN4B
:
538 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
544 * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
545 * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
546 * by ST and Micron flashes.
547 * @nor: pointer to 'struct spi_nor'.
548 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
551 * Return: 0 on success, -errno otherwise.
553 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor
*nor
, bool enable
)
557 ret
= spi_nor_write_enable(nor
);
561 ret
= spi_nor_set_4byte_addr_mode_en4b_ex4b(nor
, enable
);
565 return spi_nor_write_disable(nor
);
569 * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
570 * SPINOR_OP_BRWR. Typically used by Spansion flashes.
571 * @nor: pointer to 'struct spi_nor'.
572 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
575 * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
576 * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
577 * address mode is active and A[30:24] bits are don’t care. Write instruction is
578 * SPINOR_OP_BRWR(17h) with 1 byte of data.
580 * Return: 0 on success, -errno otherwise.
582 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor
*nor
, bool enable
)
586 nor
->bouncebuf
[0] = enable
<< 7;
589 struct spi_mem_op op
= SPI_NOR_BRWR_OP(nor
->bouncebuf
);
591 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
593 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
595 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_BRWR
,
600 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
606 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
608 * @nor: pointer to 'struct spi_nor'.
610 * Return: 1 if ready, 0 if not ready, -errno on errors.
612 int spi_nor_sr_ready(struct spi_nor
*nor
)
616 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
620 return !(nor
->bouncebuf
[0] & SR_WIP
);
624 * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
625 * @nor: pointer to 'struct spi_nor'.
627 * Return: true if parallel locking is enabled, false otherwise.
629 static bool spi_nor_use_parallel_locking(struct spi_nor
*nor
)
631 return nor
->flags
& SNOR_F_RWW
;
634 /* Locking helpers for status read operations */
635 static int spi_nor_rww_start_rdst(struct spi_nor
*nor
)
637 struct spi_nor_rww
*rww
= &nor
->rww
;
640 mutex_lock(&nor
->lock
);
642 if (rww
->ongoing_io
|| rww
->ongoing_rd
)
645 rww
->ongoing_io
= true;
646 rww
->ongoing_rd
= true;
650 mutex_unlock(&nor
->lock
);
654 static void spi_nor_rww_end_rdst(struct spi_nor
*nor
)
656 struct spi_nor_rww
*rww
= &nor
->rww
;
658 mutex_lock(&nor
->lock
);
660 rww
->ongoing_io
= false;
661 rww
->ongoing_rd
= false;
663 mutex_unlock(&nor
->lock
);
666 static int spi_nor_lock_rdst(struct spi_nor
*nor
)
668 if (spi_nor_use_parallel_locking(nor
))
669 return spi_nor_rww_start_rdst(nor
);
674 static void spi_nor_unlock_rdst(struct spi_nor
*nor
)
676 if (spi_nor_use_parallel_locking(nor
)) {
677 spi_nor_rww_end_rdst(nor
);
678 wake_up(&nor
->rww
.wait
);
683 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
684 * @nor: pointer to 'struct spi_nor'.
686 * Return: 1 if ready, 0 if not ready, -errno on errors.
688 static int spi_nor_ready(struct spi_nor
*nor
)
692 ret
= spi_nor_lock_rdst(nor
);
696 /* Flashes might override the standard routine. */
697 if (nor
->params
->ready
)
698 ret
= nor
->params
->ready(nor
);
700 ret
= spi_nor_sr_ready(nor
);
702 spi_nor_unlock_rdst(nor
);
708 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
709 * Status Register until ready, or timeout occurs.
710 * @nor: pointer to "struct spi_nor".
711 * @timeout_jiffies: jiffies to wait until timeout.
713 * Return: 0 on success, -errno otherwise.
715 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor
*nor
,
716 unsigned long timeout_jiffies
)
718 unsigned long deadline
;
719 int timeout
= 0, ret
;
721 deadline
= jiffies
+ timeout_jiffies
;
724 if (time_after_eq(jiffies
, deadline
))
727 ret
= spi_nor_ready(nor
);
736 dev_dbg(nor
->dev
, "flash operation timed out\n");
742 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
743 * flash to be ready, or timeout occurs.
744 * @nor: pointer to "struct spi_nor".
746 * Return: 0 on success, -errno otherwise.
748 int spi_nor_wait_till_ready(struct spi_nor
*nor
)
750 return spi_nor_wait_till_ready_with_timeout(nor
,
751 DEFAULT_READY_WAIT_JIFFIES
);
755 * spi_nor_global_block_unlock() - Unlock Global Block Protection.
756 * @nor: pointer to 'struct spi_nor'.
758 * Return: 0 on success, -errno otherwise.
760 int spi_nor_global_block_unlock(struct spi_nor
*nor
)
764 ret
= spi_nor_write_enable(nor
);
769 struct spi_mem_op op
= SPI_NOR_GBULK_OP
;
771 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
773 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
775 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_GBULK
,
780 dev_dbg(nor
->dev
, "error %d on Global Block Unlock\n", ret
);
784 return spi_nor_wait_till_ready(nor
);
788 * spi_nor_write_sr() - Write the Status Register.
789 * @nor: pointer to 'struct spi_nor'.
790 * @sr: pointer to DMA-able buffer to write to the Status Register.
791 * @len: number of bytes to write to the Status Register.
793 * Return: 0 on success, -errno otherwise.
795 int spi_nor_write_sr(struct spi_nor
*nor
, const u8
*sr
, size_t len
)
799 ret
= spi_nor_write_enable(nor
);
804 struct spi_mem_op op
= SPI_NOR_WRSR_OP(sr
, len
);
806 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
808 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
810 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR
, sr
,
815 dev_dbg(nor
->dev
, "error %d writing SR\n", ret
);
819 return spi_nor_wait_till_ready(nor
);
823 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
824 * ensure that the byte written match the received value.
825 * @nor: pointer to a 'struct spi_nor'.
826 * @sr1: byte value to be written to the Status Register.
828 * Return: 0 on success, -errno otherwise.
830 static int spi_nor_write_sr1_and_check(struct spi_nor
*nor
, u8 sr1
)
834 nor
->bouncebuf
[0] = sr1
;
836 ret
= spi_nor_write_sr(nor
, nor
->bouncebuf
, 1);
840 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
844 if (nor
->bouncebuf
[0] != sr1
) {
845 dev_dbg(nor
->dev
, "SR1: read back test failed\n");
853 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
854 * Status Register 2 in one shot. Ensure that the byte written in the Status
855 * Register 1 match the received value, and that the 16-bit Write did not
856 * affect what was already in the Status Register 2.
857 * @nor: pointer to a 'struct spi_nor'.
858 * @sr1: byte value to be written to the Status Register 1.
860 * Return: 0 on success, -errno otherwise.
862 static int spi_nor_write_16bit_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
865 u8
*sr_cr
= nor
->bouncebuf
;
868 /* Make sure we don't overwrite the contents of Status Register 2. */
869 if (!(nor
->flags
& SNOR_F_NO_READ_CR
)) {
870 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
873 } else if (spi_nor_get_protocol_width(nor
->read_proto
) == 4 &&
874 spi_nor_get_protocol_width(nor
->write_proto
) == 4 &&
875 nor
->params
->quad_enable
) {
877 * If the Status Register 2 Read command (35h) is not
878 * supported, we should at least be sure we don't
879 * change the value of the SR2 Quad Enable bit.
881 * When the Quad Enable method is set and the buswidth is 4, we
882 * can safely assume that the value of the QE bit is one, as a
883 * consequence of the nor->params->quad_enable() call.
885 * According to the JESD216 revB standard, BFPT DWORDS[15],
886 * bits 22:20, the 16-bit Write Status (01h) command is
887 * available just for the cases in which the QE bit is
888 * described in SR2 at BIT(1).
890 sr_cr
[1] = SR2_QUAD_EN_BIT1
;
897 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
901 ret
= spi_nor_read_sr(nor
, sr_cr
);
905 if (sr1
!= sr_cr
[0]) {
906 dev_dbg(nor
->dev
, "SR: Read back test failed\n");
910 if (nor
->flags
& SNOR_F_NO_READ_CR
)
913 cr_written
= sr_cr
[1];
915 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
919 if (cr_written
!= sr_cr
[1]) {
920 dev_dbg(nor
->dev
, "CR: read back test failed\n");
928 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
929 * Configuration Register in one shot. Ensure that the byte written in the
930 * Configuration Register match the received value, and that the 16-bit Write
931 * did not affect what was already in the Status Register 1.
932 * @nor: pointer to a 'struct spi_nor'.
933 * @cr: byte value to be written to the Configuration Register.
935 * Return: 0 on success, -errno otherwise.
937 int spi_nor_write_16bit_cr_and_check(struct spi_nor
*nor
, u8 cr
)
940 u8
*sr_cr
= nor
->bouncebuf
;
943 /* Keep the current value of the Status Register 1. */
944 ret
= spi_nor_read_sr(nor
, sr_cr
);
950 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
954 sr_written
= sr_cr
[0];
956 ret
= spi_nor_read_sr(nor
, sr_cr
);
960 if (sr_written
!= sr_cr
[0]) {
961 dev_dbg(nor
->dev
, "SR: Read back test failed\n");
965 if (nor
->flags
& SNOR_F_NO_READ_CR
)
968 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
972 if (cr
!= sr_cr
[1]) {
973 dev_dbg(nor
->dev
, "CR: read back test failed\n");
981 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
982 * the byte written match the received value without affecting other bits in the
983 * Status Register 1 and 2.
984 * @nor: pointer to a 'struct spi_nor'.
985 * @sr1: byte value to be written to the Status Register.
987 * Return: 0 on success, -errno otherwise.
989 int spi_nor_write_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
991 if (nor
->flags
& SNOR_F_HAS_16BIT_SR
)
992 return spi_nor_write_16bit_sr_and_check(nor
, sr1
);
994 return spi_nor_write_sr1_and_check(nor
, sr1
);
998 * spi_nor_write_sr2() - Write the Status Register 2 using the
999 * SPINOR_OP_WRSR2 (3eh) command.
1000 * @nor: pointer to 'struct spi_nor'.
1001 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
1003 * Return: 0 on success, -errno otherwise.
1005 static int spi_nor_write_sr2(struct spi_nor
*nor
, const u8
*sr2
)
1009 ret
= spi_nor_write_enable(nor
);
1014 struct spi_mem_op op
= SPI_NOR_WRSR2_OP(sr2
);
1016 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1018 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1020 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR2
,
1025 dev_dbg(nor
->dev
, "error %d writing SR2\n", ret
);
1029 return spi_nor_wait_till_ready(nor
);
1033 * spi_nor_read_sr2() - Read the Status Register 2 using the
1034 * SPINOR_OP_RDSR2 (3fh) command.
1035 * @nor: pointer to 'struct spi_nor'.
1036 * @sr2: pointer to DMA-able buffer where the value of the
1037 * Status Register 2 will be written.
1039 * Return: 0 on success, -errno otherwise.
1041 static int spi_nor_read_sr2(struct spi_nor
*nor
, u8
*sr2
)
1046 struct spi_mem_op op
= SPI_NOR_RDSR2_OP(sr2
);
1048 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1050 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1052 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR2
, sr2
,
1057 dev_dbg(nor
->dev
, "error %d reading SR2\n", ret
);
1063 * spi_nor_erase_die() - Erase the entire die.
1064 * @nor: pointer to 'struct spi_nor'.
1065 * @addr: address of the die.
1066 * @die_size: size of the die.
1068 * Return: 0 on success, -errno otherwise.
1070 static int spi_nor_erase_die(struct spi_nor
*nor
, loff_t addr
, size_t die_size
)
1072 bool multi_die
= nor
->mtd
.size
!= die_size
;
1075 dev_dbg(nor
->dev
, " %lldKiB\n", (long long)(die_size
>> 10));
1078 struct spi_mem_op op
=
1079 SPI_NOR_DIE_ERASE_OP(nor
->params
->die_erase_opcode
,
1080 nor
->addr_nbytes
, addr
, multi_die
);
1082 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1084 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1089 ret
= spi_nor_controller_ops_write_reg(nor
,
1090 SPINOR_OP_CHIP_ERASE
,
1095 dev_dbg(nor
->dev
, "error %d erasing chip\n", ret
);
1100 static u8
spi_nor_convert_opcode(u8 opcode
, const u8 table
[][2], size_t size
)
1104 for (i
= 0; i
< size
; i
++)
1105 if (table
[i
][0] == opcode
)
1108 /* No conversion found, keep input op code. */
1112 u8
spi_nor_convert_3to4_read(u8 opcode
)
1114 static const u8 spi_nor_3to4_read
[][2] = {
1115 { SPINOR_OP_READ
, SPINOR_OP_READ_4B
},
1116 { SPINOR_OP_READ_FAST
, SPINOR_OP_READ_FAST_4B
},
1117 { SPINOR_OP_READ_1_1_2
, SPINOR_OP_READ_1_1_2_4B
},
1118 { SPINOR_OP_READ_1_2_2
, SPINOR_OP_READ_1_2_2_4B
},
1119 { SPINOR_OP_READ_1_1_4
, SPINOR_OP_READ_1_1_4_4B
},
1120 { SPINOR_OP_READ_1_4_4
, SPINOR_OP_READ_1_4_4_4B
},
1121 { SPINOR_OP_READ_1_1_8
, SPINOR_OP_READ_1_1_8_4B
},
1122 { SPINOR_OP_READ_1_8_8
, SPINOR_OP_READ_1_8_8_4B
},
1124 { SPINOR_OP_READ_1_1_1_DTR
, SPINOR_OP_READ_1_1_1_DTR_4B
},
1125 { SPINOR_OP_READ_1_2_2_DTR
, SPINOR_OP_READ_1_2_2_DTR_4B
},
1126 { SPINOR_OP_READ_1_4_4_DTR
, SPINOR_OP_READ_1_4_4_DTR_4B
},
1129 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_read
,
1130 ARRAY_SIZE(spi_nor_3to4_read
));
1133 static u8
spi_nor_convert_3to4_program(u8 opcode
)
1135 static const u8 spi_nor_3to4_program
[][2] = {
1136 { SPINOR_OP_PP
, SPINOR_OP_PP_4B
},
1137 { SPINOR_OP_PP_1_1_4
, SPINOR_OP_PP_1_1_4_4B
},
1138 { SPINOR_OP_PP_1_4_4
, SPINOR_OP_PP_1_4_4_4B
},
1139 { SPINOR_OP_PP_1_1_8
, SPINOR_OP_PP_1_1_8_4B
},
1140 { SPINOR_OP_PP_1_8_8
, SPINOR_OP_PP_1_8_8_4B
},
1143 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_program
,
1144 ARRAY_SIZE(spi_nor_3to4_program
));
1147 static u8
spi_nor_convert_3to4_erase(u8 opcode
)
1149 static const u8 spi_nor_3to4_erase
[][2] = {
1150 { SPINOR_OP_BE_4K
, SPINOR_OP_BE_4K_4B
},
1151 { SPINOR_OP_BE_32K
, SPINOR_OP_BE_32K_4B
},
1152 { SPINOR_OP_SE
, SPINOR_OP_SE_4B
},
1155 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_erase
,
1156 ARRAY_SIZE(spi_nor_3to4_erase
));
1159 static bool spi_nor_has_uniform_erase(const struct spi_nor
*nor
)
1161 return !!nor
->params
->erase_map
.uniform_region
.erase_mask
;
1164 static void spi_nor_set_4byte_opcodes(struct spi_nor
*nor
)
1166 nor
->read_opcode
= spi_nor_convert_3to4_read(nor
->read_opcode
);
1167 nor
->program_opcode
= spi_nor_convert_3to4_program(nor
->program_opcode
);
1168 nor
->erase_opcode
= spi_nor_convert_3to4_erase(nor
->erase_opcode
);
1170 if (!spi_nor_has_uniform_erase(nor
)) {
1171 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1172 struct spi_nor_erase_type
*erase
;
1175 for (i
= 0; i
< SNOR_ERASE_TYPE_MAX
; i
++) {
1176 erase
= &map
->erase_type
[i
];
1178 spi_nor_convert_3to4_erase(erase
->opcode
);
1183 static int spi_nor_prep(struct spi_nor
*nor
)
1187 if (nor
->controller_ops
&& nor
->controller_ops
->prepare
)
1188 ret
= nor
->controller_ops
->prepare(nor
);
1193 static void spi_nor_unprep(struct spi_nor
*nor
)
1195 if (nor
->controller_ops
&& nor
->controller_ops
->unprepare
)
1196 nor
->controller_ops
->unprepare(nor
);
1199 static void spi_nor_offset_to_banks(u64 bank_size
, loff_t start
, size_t len
,
1200 u8
*first
, u8
*last
)
1202 /* This is currently safe, the number of banks being very small */
1203 *first
= DIV_ROUND_DOWN_ULL(start
, bank_size
);
1204 *last
= DIV_ROUND_DOWN_ULL(start
+ len
- 1, bank_size
);
1207 /* Generic helpers for internal locking and serialization */
1208 static bool spi_nor_rww_start_io(struct spi_nor
*nor
)
1210 struct spi_nor_rww
*rww
= &nor
->rww
;
1213 mutex_lock(&nor
->lock
);
1215 if (rww
->ongoing_io
)
1218 rww
->ongoing_io
= true;
1222 mutex_unlock(&nor
->lock
);
1226 static void spi_nor_rww_end_io(struct spi_nor
*nor
)
1228 mutex_lock(&nor
->lock
);
1229 nor
->rww
.ongoing_io
= false;
1230 mutex_unlock(&nor
->lock
);
1233 static int spi_nor_lock_device(struct spi_nor
*nor
)
1235 if (!spi_nor_use_parallel_locking(nor
))
1238 return wait_event_killable(nor
->rww
.wait
, spi_nor_rww_start_io(nor
));
1241 static void spi_nor_unlock_device(struct spi_nor
*nor
)
1243 if (spi_nor_use_parallel_locking(nor
)) {
1244 spi_nor_rww_end_io(nor
);
1245 wake_up(&nor
->rww
.wait
);
1249 /* Generic helpers for internal locking and serialization */
1250 static bool spi_nor_rww_start_exclusive(struct spi_nor
*nor
)
1252 struct spi_nor_rww
*rww
= &nor
->rww
;
1255 mutex_lock(&nor
->lock
);
1257 if (rww
->ongoing_io
|| rww
->ongoing_rd
|| rww
->ongoing_pe
)
1260 rww
->ongoing_io
= true;
1261 rww
->ongoing_rd
= true;
1262 rww
->ongoing_pe
= true;
1266 mutex_unlock(&nor
->lock
);
1270 static void spi_nor_rww_end_exclusive(struct spi_nor
*nor
)
1272 struct spi_nor_rww
*rww
= &nor
->rww
;
1274 mutex_lock(&nor
->lock
);
1275 rww
->ongoing_io
= false;
1276 rww
->ongoing_rd
= false;
1277 rww
->ongoing_pe
= false;
1278 mutex_unlock(&nor
->lock
);
1281 int spi_nor_prep_and_lock(struct spi_nor
*nor
)
1285 ret
= spi_nor_prep(nor
);
1289 if (!spi_nor_use_parallel_locking(nor
))
1290 mutex_lock(&nor
->lock
);
1292 ret
= wait_event_killable(nor
->rww
.wait
,
1293 spi_nor_rww_start_exclusive(nor
));
1298 void spi_nor_unlock_and_unprep(struct spi_nor
*nor
)
1300 if (!spi_nor_use_parallel_locking(nor
)) {
1301 mutex_unlock(&nor
->lock
);
1303 spi_nor_rww_end_exclusive(nor
);
1304 wake_up(&nor
->rww
.wait
);
1307 spi_nor_unprep(nor
);
1310 /* Internal locking helpers for program and erase operations */
1311 static bool spi_nor_rww_start_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1313 struct spi_nor_rww
*rww
= &nor
->rww
;
1314 unsigned int used_banks
= 0;
1315 bool started
= false;
1319 mutex_lock(&nor
->lock
);
1321 if (rww
->ongoing_io
|| rww
->ongoing_rd
|| rww
->ongoing_pe
)
1324 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1325 for (bank
= first
; bank
<= last
; bank
++) {
1326 if (rww
->used_banks
& BIT(bank
))
1329 used_banks
|= BIT(bank
);
1332 rww
->used_banks
|= used_banks
;
1333 rww
->ongoing_pe
= true;
1337 mutex_unlock(&nor
->lock
);
1341 static void spi_nor_rww_end_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1343 struct spi_nor_rww
*rww
= &nor
->rww
;
1347 mutex_lock(&nor
->lock
);
1349 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1350 for (bank
= first
; bank
<= last
; bank
++)
1351 rww
->used_banks
&= ~BIT(bank
);
1353 rww
->ongoing_pe
= false;
1355 mutex_unlock(&nor
->lock
);
1358 static int spi_nor_prep_and_lock_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1362 ret
= spi_nor_prep(nor
);
1366 if (!spi_nor_use_parallel_locking(nor
))
1367 mutex_lock(&nor
->lock
);
1369 ret
= wait_event_killable(nor
->rww
.wait
,
1370 spi_nor_rww_start_pe(nor
, start
, len
));
1375 static void spi_nor_unlock_and_unprep_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1377 if (!spi_nor_use_parallel_locking(nor
)) {
1378 mutex_unlock(&nor
->lock
);
1380 spi_nor_rww_end_pe(nor
, start
, len
);
1381 wake_up(&nor
->rww
.wait
);
1384 spi_nor_unprep(nor
);
1387 /* Internal locking helpers for read operations */
1388 static bool spi_nor_rww_start_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1390 struct spi_nor_rww
*rww
= &nor
->rww
;
1391 unsigned int used_banks
= 0;
1392 bool started
= false;
1396 mutex_lock(&nor
->lock
);
1398 if (rww
->ongoing_io
|| rww
->ongoing_rd
)
1401 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1402 for (bank
= first
; bank
<= last
; bank
++) {
1403 if (rww
->used_banks
& BIT(bank
))
1406 used_banks
|= BIT(bank
);
1409 rww
->used_banks
|= used_banks
;
1410 rww
->ongoing_io
= true;
1411 rww
->ongoing_rd
= true;
1415 mutex_unlock(&nor
->lock
);
1419 static void spi_nor_rww_end_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1421 struct spi_nor_rww
*rww
= &nor
->rww
;
1425 mutex_lock(&nor
->lock
);
1427 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1428 for (bank
= first
; bank
<= last
; bank
++)
1429 nor
->rww
.used_banks
&= ~BIT(bank
);
1431 rww
->ongoing_io
= false;
1432 rww
->ongoing_rd
= false;
1434 mutex_unlock(&nor
->lock
);
1437 static int spi_nor_prep_and_lock_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1441 ret
= spi_nor_prep(nor
);
1445 if (!spi_nor_use_parallel_locking(nor
))
1446 mutex_lock(&nor
->lock
);
1448 ret
= wait_event_killable(nor
->rww
.wait
,
1449 spi_nor_rww_start_rd(nor
, start
, len
));
1454 static void spi_nor_unlock_and_unprep_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1456 if (!spi_nor_use_parallel_locking(nor
)) {
1457 mutex_unlock(&nor
->lock
);
1459 spi_nor_rww_end_rd(nor
, start
, len
);
1460 wake_up(&nor
->rww
.wait
);
1463 spi_nor_unprep(nor
);
1467 * Initiate the erasure of a single sector
1469 int spi_nor_erase_sector(struct spi_nor
*nor
, u32 addr
)
1474 struct spi_mem_op op
=
1475 SPI_NOR_SECTOR_ERASE_OP(nor
->erase_opcode
,
1476 nor
->addr_nbytes
, addr
);
1478 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1480 return spi_mem_exec_op(nor
->spimem
, &op
);
1481 } else if (nor
->controller_ops
->erase
) {
1482 return spi_nor_controller_ops_erase(nor
, addr
);
1486 * Default implementation, if driver doesn't have a specialized HW
1489 for (i
= nor
->addr_nbytes
- 1; i
>= 0; i
--) {
1490 nor
->bouncebuf
[i
] = addr
& 0xff;
1494 return spi_nor_controller_ops_write_reg(nor
, nor
->erase_opcode
,
1495 nor
->bouncebuf
, nor
->addr_nbytes
);
1499 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1500 * @erase: pointer to a structure that describes a SPI NOR erase type
1501 * @dividend: dividend value
1502 * @remainder: pointer to u32 remainder (will be updated)
1504 * Return: the result of the division
1506 static u64
spi_nor_div_by_erase_size(const struct spi_nor_erase_type
*erase
,
1507 u64 dividend
, u32
*remainder
)
1509 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1510 *remainder
= (u32
)dividend
& erase
->size_mask
;
1511 return dividend
>> erase
->size_shift
;
1515 * spi_nor_find_best_erase_type() - find the best erase type for the given
1516 * offset in the serial flash memory and the
1517 * number of bytes to erase. The region in
1518 * which the address fits is expected to be
1520 * @map: the erase map of the SPI NOR
1521 * @region: pointer to a structure that describes a SPI NOR erase region
1522 * @addr: offset in the serial flash memory
1523 * @len: number of bytes to erase
1525 * Return: a pointer to the best fitted erase type, NULL otherwise.
1527 static const struct spi_nor_erase_type
*
1528 spi_nor_find_best_erase_type(const struct spi_nor_erase_map
*map
,
1529 const struct spi_nor_erase_region
*region
,
1532 const struct spi_nor_erase_type
*erase
;
1537 * Erase types are ordered by size, with the smallest erase type at
1540 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
1541 /* Does the erase region support the tested erase type? */
1542 if (!(region
->erase_mask
& BIT(i
)))
1545 erase
= &map
->erase_type
[i
];
1549 /* Alignment is not mandatory for overlaid regions */
1550 if (region
->overlaid
&& region
->size
<= len
)
1553 /* Don't erase more than what the user has asked for. */
1554 if (erase
->size
> len
)
1557 spi_nor_div_by_erase_size(erase
, addr
, &rem
);
1566 * spi_nor_init_erase_cmd() - initialize an erase command
1567 * @region: pointer to a structure that describes a SPI NOR erase region
1568 * @erase: pointer to a structure that describes a SPI NOR erase type
1570 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1573 static struct spi_nor_erase_command
*
1574 spi_nor_init_erase_cmd(const struct spi_nor_erase_region
*region
,
1575 const struct spi_nor_erase_type
*erase
)
1577 struct spi_nor_erase_command
*cmd
;
1579 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
1581 return ERR_PTR(-ENOMEM
);
1583 INIT_LIST_HEAD(&cmd
->list
);
1584 cmd
->opcode
= erase
->opcode
;
1587 if (region
->overlaid
)
1588 cmd
->size
= region
->size
;
1590 cmd
->size
= erase
->size
;
1596 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1597 * @erase_list: list of erase commands
1599 static void spi_nor_destroy_erase_cmd_list(struct list_head
*erase_list
)
1601 struct spi_nor_erase_command
*cmd
, *next
;
1603 list_for_each_entry_safe(cmd
, next
, erase_list
, list
) {
1604 list_del(&cmd
->list
);
1610 * spi_nor_init_erase_cmd_list() - initialize erase command list
1611 * @nor: pointer to a 'struct spi_nor'
1612 * @erase_list: list of erase commands to be executed once we validate that the
1613 * erase can be performed
1614 * @addr: offset in the serial flash memory
1615 * @len: number of bytes to erase
1617 * Builds the list of best fitted erase commands and verifies if the erase can
1620 * Return: 0 on success, -errno otherwise.
1622 static int spi_nor_init_erase_cmd_list(struct spi_nor
*nor
,
1623 struct list_head
*erase_list
,
1626 const struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1627 const struct spi_nor_erase_type
*erase
, *prev_erase
= NULL
;
1628 struct spi_nor_erase_region
*region
;
1629 struct spi_nor_erase_command
*cmd
= NULL
;
1634 for (i
= 0; i
< map
->n_regions
&& len
; i
++) {
1635 region
= &map
->regions
[i
];
1636 region_end
= region
->offset
+ region
->size
;
1638 while (len
&& addr
>= region
->offset
&& addr
< region_end
) {
1639 erase
= spi_nor_find_best_erase_type(map
, region
, addr
,
1642 goto destroy_erase_cmd_list
;
1644 if (prev_erase
!= erase
|| erase
->size
!= cmd
->size
||
1646 cmd
= spi_nor_init_erase_cmd(region
, erase
);
1649 goto destroy_erase_cmd_list
;
1652 list_add_tail(&cmd
->list
, erase_list
);
1665 destroy_erase_cmd_list
:
1666 spi_nor_destroy_erase_cmd_list(erase_list
);
1671 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1672 * @nor: pointer to a 'struct spi_nor'
1673 * @addr: offset in the serial flash memory
1674 * @len: number of bytes to erase
1676 * Build a list of best fitted erase commands and execute it once we validate
1677 * that the erase can be performed.
1679 * Return: 0 on success, -errno otherwise.
1681 static int spi_nor_erase_multi_sectors(struct spi_nor
*nor
, u64 addr
, u32 len
)
1683 LIST_HEAD(erase_list
);
1684 struct spi_nor_erase_command
*cmd
, *next
;
1687 ret
= spi_nor_init_erase_cmd_list(nor
, &erase_list
, addr
, len
);
1691 list_for_each_entry_safe(cmd
, next
, &erase_list
, list
) {
1692 nor
->erase_opcode
= cmd
->opcode
;
1693 while (cmd
->count
) {
1694 dev_vdbg(nor
->dev
, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1695 cmd
->size
, cmd
->opcode
, cmd
->count
);
1697 ret
= spi_nor_lock_device(nor
);
1699 goto destroy_erase_cmd_list
;
1701 ret
= spi_nor_write_enable(nor
);
1703 spi_nor_unlock_device(nor
);
1704 goto destroy_erase_cmd_list
;
1707 ret
= spi_nor_erase_sector(nor
, addr
);
1708 spi_nor_unlock_device(nor
);
1710 goto destroy_erase_cmd_list
;
1712 ret
= spi_nor_wait_till_ready(nor
);
1714 goto destroy_erase_cmd_list
;
1719 list_del(&cmd
->list
);
1725 destroy_erase_cmd_list
:
1726 spi_nor_destroy_erase_cmd_list(&erase_list
);
1730 static int spi_nor_erase_dice(struct spi_nor
*nor
, loff_t addr
,
1731 size_t len
, size_t die_size
)
1733 unsigned long timeout
;
1737 * Scale the timeout linearly with the size of the flash, with
1738 * a minimum calibrated to an old 2MB flash. We could try to
1739 * pull these from CFI/SFDP, but these values should be good
1742 timeout
= max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES
,
1743 CHIP_ERASE_2MB_READY_WAIT_JIFFIES
*
1744 (unsigned long)(nor
->mtd
.size
/ SZ_2M
));
1747 ret
= spi_nor_lock_device(nor
);
1751 ret
= spi_nor_write_enable(nor
);
1753 spi_nor_unlock_device(nor
);
1757 ret
= spi_nor_erase_die(nor
, addr
, die_size
);
1759 spi_nor_unlock_device(nor
);
1763 ret
= spi_nor_wait_till_ready_with_timeout(nor
, timeout
);
1776 * Erase an address range on the nor chip. The address range may extend
1777 * one or more erase sectors. Return an error if there is a problem erasing.
1779 static int spi_nor_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1781 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
1782 u8 n_dice
= nor
->params
->n_dice
;
1783 bool multi_die_erase
= false;
1788 dev_dbg(nor
->dev
, "at 0x%llx, len %lld\n", (long long)instr
->addr
,
1789 (long long)instr
->len
);
1791 if (spi_nor_has_uniform_erase(nor
)) {
1792 div_u64_rem(instr
->len
, mtd
->erasesize
, &rem
);
1801 die_size
= div_u64(mtd
->size
, n_dice
);
1802 if (!(len
& (die_size
- 1)) && !(addr
& (die_size
- 1)))
1803 multi_die_erase
= true;
1805 die_size
= mtd
->size
;
1808 ret
= spi_nor_prep_and_lock_pe(nor
, instr
->addr
, instr
->len
);
1812 /* chip (die) erase? */
1813 if ((len
== mtd
->size
&& !(nor
->flags
& SNOR_F_NO_OP_CHIP_ERASE
)) ||
1815 ret
= spi_nor_erase_dice(nor
, addr
, len
, die_size
);
1819 /* REVISIT in some cases we could speed up erasing large regions
1820 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1821 * to use "small sector erase", but that's not always optimal.
1824 /* "sector"-at-a-time erase */
1825 } else if (spi_nor_has_uniform_erase(nor
)) {
1827 ret
= spi_nor_lock_device(nor
);
1831 ret
= spi_nor_write_enable(nor
);
1833 spi_nor_unlock_device(nor
);
1837 ret
= spi_nor_erase_sector(nor
, addr
);
1838 spi_nor_unlock_device(nor
);
1842 ret
= spi_nor_wait_till_ready(nor
);
1846 addr
+= mtd
->erasesize
;
1847 len
-= mtd
->erasesize
;
1850 /* erase multiple sectors */
1852 ret
= spi_nor_erase_multi_sectors(nor
, addr
, len
);
1857 ret
= spi_nor_write_disable(nor
);
1860 spi_nor_unlock_and_unprep_pe(nor
, instr
->addr
, instr
->len
);
1866 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1868 * @nor: pointer to a 'struct spi_nor'
1870 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1872 * Return: 0 on success, -errno otherwise.
1874 int spi_nor_sr1_bit6_quad_enable(struct spi_nor
*nor
)
1878 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
1882 if (nor
->bouncebuf
[0] & SR1_QUAD_EN_BIT6
)
1885 nor
->bouncebuf
[0] |= SR1_QUAD_EN_BIT6
;
1887 return spi_nor_write_sr1_and_check(nor
, nor
->bouncebuf
[0]);
1891 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1893 * @nor: pointer to a 'struct spi_nor'.
1895 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1897 * Return: 0 on success, -errno otherwise.
1899 int spi_nor_sr2_bit1_quad_enable(struct spi_nor
*nor
)
1903 if (nor
->flags
& SNOR_F_NO_READ_CR
)
1904 return spi_nor_write_16bit_cr_and_check(nor
, SR2_QUAD_EN_BIT1
);
1906 ret
= spi_nor_read_cr(nor
, nor
->bouncebuf
);
1910 if (nor
->bouncebuf
[0] & SR2_QUAD_EN_BIT1
)
1913 nor
->bouncebuf
[0] |= SR2_QUAD_EN_BIT1
;
1915 return spi_nor_write_16bit_cr_and_check(nor
, nor
->bouncebuf
[0]);
1919 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1920 * @nor: pointer to a 'struct spi_nor'
1922 * Set the Quad Enable (QE) bit in the Status Register 2.
1924 * This is one of the procedures to set the QE bit described in the SFDP
1925 * (JESD216 rev B) specification but no manufacturer using this procedure has
1926 * been identified yet, hence the name of the function.
1928 * Return: 0 on success, -errno otherwise.
1930 int spi_nor_sr2_bit7_quad_enable(struct spi_nor
*nor
)
1932 u8
*sr2
= nor
->bouncebuf
;
1936 /* Check current Quad Enable bit value. */
1937 ret
= spi_nor_read_sr2(nor
, sr2
);
1940 if (*sr2
& SR2_QUAD_EN_BIT7
)
1943 /* Update the Quad Enable bit. */
1944 *sr2
|= SR2_QUAD_EN_BIT7
;
1946 ret
= spi_nor_write_sr2(nor
, sr2
);
1952 /* Read back and check it. */
1953 ret
= spi_nor_read_sr2(nor
, sr2
);
1957 if (*sr2
!= sr2_written
) {
1958 dev_dbg(nor
->dev
, "SR2: Read back test failed\n");
1965 static const struct spi_nor_manufacturer
*manufacturers
[] = {
1970 &spi_nor_gigadevice
,
1982 static const struct flash_info spi_nor_generic_flash
= {
1983 .name
= "spi-nor-generic",
1986 static const struct flash_info
*spi_nor_match_id(struct spi_nor
*nor
,
1989 const struct flash_info
*part
;
1992 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
1993 for (j
= 0; j
< manufacturers
[i
]->nparts
; j
++) {
1994 part
= &manufacturers
[i
]->parts
[j
];
1996 !memcmp(part
->id
->bytes
, id
, part
->id
->len
)) {
1997 nor
->manufacturer
= manufacturers
[i
];
2006 static const struct flash_info
*spi_nor_detect(struct spi_nor
*nor
)
2008 const struct flash_info
*info
;
2009 u8
*id
= nor
->bouncebuf
;
2012 ret
= spi_nor_read_id(nor
, 0, 0, id
, nor
->reg_proto
);
2014 dev_dbg(nor
->dev
, "error %d reading JEDEC ID\n", ret
);
2015 return ERR_PTR(ret
);
2018 /* Cache the complete flash ID. */
2019 nor
->id
= devm_kmemdup(nor
->dev
, id
, SPI_NOR_MAX_ID_LEN
, GFP_KERNEL
);
2021 return ERR_PTR(-ENOMEM
);
2023 info
= spi_nor_match_id(nor
, id
);
2025 /* Fallback to a generic flash described only by its SFDP data. */
2027 ret
= spi_nor_check_sfdp_signature(nor
);
2029 info
= &spi_nor_generic_flash
;
2033 dev_err(nor
->dev
, "unrecognized JEDEC id bytes: %*ph\n",
2034 SPI_NOR_MAX_ID_LEN
, id
);
2035 return ERR_PTR(-ENODEV
);
2040 static int spi_nor_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2041 size_t *retlen
, u_char
*buf
)
2043 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2044 loff_t from_lock
= from
;
2045 size_t len_lock
= len
;
2048 dev_dbg(nor
->dev
, "from 0x%08x, len %zd\n", (u32
)from
, len
);
2050 ret
= spi_nor_prep_and_lock_rd(nor
, from_lock
, len_lock
);
2057 ret
= spi_nor_read_data(nor
, addr
, len
, buf
);
2059 /* We shouldn't see 0-length reads */
2075 spi_nor_unlock_and_unprep_rd(nor
, from_lock
, len_lock
);
2081 * Write an address range to the nor chip. Data must be written in
2082 * FLASH_PAGESIZE chunks. The address range may be any size provided
2083 * it is within the physical boundaries.
2085 static int spi_nor_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
2086 size_t *retlen
, const u_char
*buf
)
2088 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2091 u32 page_size
= nor
->params
->page_size
;
2093 dev_dbg(nor
->dev
, "to 0x%08x, len %zd\n", (u32
)to
, len
);
2095 ret
= spi_nor_prep_and_lock_pe(nor
, to
, len
);
2099 for (i
= 0; i
< len
; ) {
2101 loff_t addr
= to
+ i
;
2102 size_t page_offset
= addr
& (page_size
- 1);
2103 /* the size of data remaining on the first page */
2104 size_t page_remain
= min_t(size_t, page_size
- page_offset
, len
- i
);
2106 ret
= spi_nor_lock_device(nor
);
2110 ret
= spi_nor_write_enable(nor
);
2112 spi_nor_unlock_device(nor
);
2116 ret
= spi_nor_write_data(nor
, addr
, page_remain
, buf
+ i
);
2117 spi_nor_unlock_device(nor
);
2122 ret
= spi_nor_wait_till_ready(nor
);
2130 spi_nor_unlock_and_unprep_pe(nor
, to
, len
);
2135 static int spi_nor_check(struct spi_nor
*nor
)
2138 (!nor
->spimem
&& !nor
->controller_ops
) ||
2139 (!nor
->spimem
&& nor
->controller_ops
&&
2140 (!nor
->controller_ops
->read
||
2141 !nor
->controller_ops
->write
||
2142 !nor
->controller_ops
->read_reg
||
2143 !nor
->controller_ops
->write_reg
))) {
2144 pr_err("spi-nor: please fill all the necessary fields!\n");
2148 if (nor
->spimem
&& nor
->controller_ops
) {
2149 dev_err(nor
->dev
, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2157 spi_nor_set_read_settings(struct spi_nor_read_command
*read
,
2161 enum spi_nor_protocol proto
)
2163 read
->num_mode_clocks
= num_mode_clocks
;
2164 read
->num_wait_states
= num_wait_states
;
2165 read
->opcode
= opcode
;
2166 read
->proto
= proto
;
2169 void spi_nor_set_pp_settings(struct spi_nor_pp_command
*pp
, u8 opcode
,
2170 enum spi_nor_protocol proto
)
2172 pp
->opcode
= opcode
;
2176 static int spi_nor_hwcaps2cmd(u32 hwcaps
, const int table
[][2], size_t size
)
2180 for (i
= 0; i
< size
; i
++)
2181 if (table
[i
][0] == (int)hwcaps
)
2187 int spi_nor_hwcaps_read2cmd(u32 hwcaps
)
2189 static const int hwcaps_read2cmd
[][2] = {
2190 { SNOR_HWCAPS_READ
, SNOR_CMD_READ
},
2191 { SNOR_HWCAPS_READ_FAST
, SNOR_CMD_READ_FAST
},
2192 { SNOR_HWCAPS_READ_1_1_1_DTR
, SNOR_CMD_READ_1_1_1_DTR
},
2193 { SNOR_HWCAPS_READ_1_1_2
, SNOR_CMD_READ_1_1_2
},
2194 { SNOR_HWCAPS_READ_1_2_2
, SNOR_CMD_READ_1_2_2
},
2195 { SNOR_HWCAPS_READ_2_2_2
, SNOR_CMD_READ_2_2_2
},
2196 { SNOR_HWCAPS_READ_1_2_2_DTR
, SNOR_CMD_READ_1_2_2_DTR
},
2197 { SNOR_HWCAPS_READ_1_1_4
, SNOR_CMD_READ_1_1_4
},
2198 { SNOR_HWCAPS_READ_1_4_4
, SNOR_CMD_READ_1_4_4
},
2199 { SNOR_HWCAPS_READ_4_4_4
, SNOR_CMD_READ_4_4_4
},
2200 { SNOR_HWCAPS_READ_1_4_4_DTR
, SNOR_CMD_READ_1_4_4_DTR
},
2201 { SNOR_HWCAPS_READ_1_1_8
, SNOR_CMD_READ_1_1_8
},
2202 { SNOR_HWCAPS_READ_1_8_8
, SNOR_CMD_READ_1_8_8
},
2203 { SNOR_HWCAPS_READ_8_8_8
, SNOR_CMD_READ_8_8_8
},
2204 { SNOR_HWCAPS_READ_1_8_8_DTR
, SNOR_CMD_READ_1_8_8_DTR
},
2205 { SNOR_HWCAPS_READ_8_8_8_DTR
, SNOR_CMD_READ_8_8_8_DTR
},
2208 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_read2cmd
,
2209 ARRAY_SIZE(hwcaps_read2cmd
));
2212 int spi_nor_hwcaps_pp2cmd(u32 hwcaps
)
2214 static const int hwcaps_pp2cmd
[][2] = {
2215 { SNOR_HWCAPS_PP
, SNOR_CMD_PP
},
2216 { SNOR_HWCAPS_PP_1_1_4
, SNOR_CMD_PP_1_1_4
},
2217 { SNOR_HWCAPS_PP_1_4_4
, SNOR_CMD_PP_1_4_4
},
2218 { SNOR_HWCAPS_PP_4_4_4
, SNOR_CMD_PP_4_4_4
},
2219 { SNOR_HWCAPS_PP_1_1_8
, SNOR_CMD_PP_1_1_8
},
2220 { SNOR_HWCAPS_PP_1_8_8
, SNOR_CMD_PP_1_8_8
},
2221 { SNOR_HWCAPS_PP_8_8_8
, SNOR_CMD_PP_8_8_8
},
2222 { SNOR_HWCAPS_PP_8_8_8_DTR
, SNOR_CMD_PP_8_8_8_DTR
},
2225 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_pp2cmd
,
2226 ARRAY_SIZE(hwcaps_pp2cmd
));
2230 * spi_nor_spimem_check_op - check if the operation is supported
2232 *@nor: pointer to a 'struct spi_nor'
2233 *@op: pointer to op template to be checked
2235 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2237 static int spi_nor_spimem_check_op(struct spi_nor
*nor
,
2238 struct spi_mem_op
*op
)
2241 * First test with 4 address bytes. The opcode itself might
2242 * be a 3B addressing opcode but we don't care, because
2243 * SPI controller implementation should not check the opcode,
2244 * but just the sequence.
2246 op
->addr
.nbytes
= 4;
2247 if (!spi_mem_supports_op(nor
->spimem
, op
)) {
2248 if (nor
->params
->size
> SZ_16M
)
2251 /* If flash size <= 16MB, 3 address bytes are sufficient */
2252 op
->addr
.nbytes
= 3;
2253 if (!spi_mem_supports_op(nor
->spimem
, op
))
2261 * spi_nor_spimem_check_readop - check if the read op is supported
2263 *@nor: pointer to a 'struct spi_nor'
2264 *@read: pointer to op template to be checked
2266 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2268 static int spi_nor_spimem_check_readop(struct spi_nor
*nor
,
2269 const struct spi_nor_read_command
*read
)
2271 struct spi_mem_op op
= SPI_NOR_READ_OP(read
->opcode
);
2273 spi_nor_spimem_setup_op(nor
, &op
, read
->proto
);
2275 /* convert the dummy cycles to the number of bytes */
2276 op
.dummy
.nbytes
= (read
->num_mode_clocks
+ read
->num_wait_states
) *
2277 op
.dummy
.buswidth
/ 8;
2278 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
2279 op
.dummy
.nbytes
*= 2;
2281 return spi_nor_spimem_check_op(nor
, &op
);
2285 * spi_nor_spimem_check_pp - check if the page program op is supported
2287 *@nor: pointer to a 'struct spi_nor'
2288 *@pp: pointer to op template to be checked
2290 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2292 static int spi_nor_spimem_check_pp(struct spi_nor
*nor
,
2293 const struct spi_nor_pp_command
*pp
)
2295 struct spi_mem_op op
= SPI_NOR_PP_OP(pp
->opcode
);
2297 spi_nor_spimem_setup_op(nor
, &op
, pp
->proto
);
2299 return spi_nor_spimem_check_op(nor
, &op
);
2303 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2304 * based on SPI controller capabilities
2305 * @nor: pointer to a 'struct spi_nor'
2306 * @hwcaps: pointer to resulting capabilities after adjusting
2307 * according to controller and flash's capability
2310 spi_nor_spimem_adjust_hwcaps(struct spi_nor
*nor
, u32
*hwcaps
)
2312 struct spi_nor_flash_parameter
*params
= nor
->params
;
2315 /* X-X-X modes are not supported yet, mask them all. */
2316 *hwcaps
&= ~SNOR_HWCAPS_X_X_X
;
2319 * If the reset line is broken, we do not want to enter a stateful
2322 if (nor
->flags
& SNOR_F_BROKEN_RESET
)
2323 *hwcaps
&= ~(SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
);
2325 for (cap
= 0; cap
< sizeof(*hwcaps
) * BITS_PER_BYTE
; cap
++) {
2328 if (!(*hwcaps
& BIT(cap
)))
2331 rdidx
= spi_nor_hwcaps_read2cmd(BIT(cap
));
2333 spi_nor_spimem_check_readop(nor
, ¶ms
->reads
[rdidx
]))
2334 *hwcaps
&= ~BIT(cap
);
2336 ppidx
= spi_nor_hwcaps_pp2cmd(BIT(cap
));
2340 if (spi_nor_spimem_check_pp(nor
,
2341 ¶ms
->page_programs
[ppidx
]))
2342 *hwcaps
&= ~BIT(cap
);
2347 * spi_nor_set_erase_type() - set a SPI NOR erase type
2348 * @erase: pointer to a structure that describes a SPI NOR erase type
2349 * @size: the size of the sector/block erased by the erase type
2350 * @opcode: the SPI command op code to erase the sector/block
2352 void spi_nor_set_erase_type(struct spi_nor_erase_type
*erase
, u32 size
,
2356 erase
->opcode
= opcode
;
2357 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2358 erase
->size_shift
= ffs(erase
->size
) - 1;
2359 erase
->size_mask
= (1 << erase
->size_shift
) - 1;
2363 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2364 * @erase: pointer to a structure that describes a SPI NOR erase type
2366 void spi_nor_mask_erase_type(struct spi_nor_erase_type
*erase
)
2372 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2373 * @map: the erase map of the SPI NOR
2374 * @erase_mask: bitmask encoding erase types that can erase the entire
2376 * @flash_size: the spi nor flash memory size
2378 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map
*map
,
2379 u8 erase_mask
, u64 flash_size
)
2381 map
->uniform_region
.offset
= 0;
2382 map
->uniform_region
.size
= flash_size
;
2383 map
->uniform_region
.erase_mask
= erase_mask
;
2384 map
->regions
= &map
->uniform_region
;
2388 int spi_nor_post_bfpt_fixups(struct spi_nor
*nor
,
2389 const struct sfdp_parameter_header
*bfpt_header
,
2390 const struct sfdp_bfpt
*bfpt
)
2394 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2395 nor
->manufacturer
->fixups
->post_bfpt
) {
2396 ret
= nor
->manufacturer
->fixups
->post_bfpt(nor
, bfpt_header
,
2402 if (nor
->info
->fixups
&& nor
->info
->fixups
->post_bfpt
)
2403 return nor
->info
->fixups
->post_bfpt(nor
, bfpt_header
, bfpt
);
2408 static int spi_nor_select_read(struct spi_nor
*nor
,
2411 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_READ_MASK
) - 1;
2412 const struct spi_nor_read_command
*read
;
2417 cmd
= spi_nor_hwcaps_read2cmd(BIT(best_match
));
2421 read
= &nor
->params
->reads
[cmd
];
2422 nor
->read_opcode
= read
->opcode
;
2423 nor
->read_proto
= read
->proto
;
2426 * In the SPI NOR framework, we don't need to make the difference
2427 * between mode clock cycles and wait state clock cycles.
2428 * Indeed, the value of the mode clock cycles is used by a QSPI
2429 * flash memory to know whether it should enter or leave its 0-4-4
2430 * (Continuous Read / XIP) mode.
2431 * eXecution In Place is out of the scope of the mtd sub-system.
2432 * Hence we choose to merge both mode and wait state clock cycles
2433 * into the so called dummy clock cycles.
2435 nor
->read_dummy
= read
->num_mode_clocks
+ read
->num_wait_states
;
2439 static int spi_nor_select_pp(struct spi_nor
*nor
,
2442 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_PP_MASK
) - 1;
2443 const struct spi_nor_pp_command
*pp
;
2448 cmd
= spi_nor_hwcaps_pp2cmd(BIT(best_match
));
2452 pp
= &nor
->params
->page_programs
[cmd
];
2453 nor
->program_opcode
= pp
->opcode
;
2454 nor
->write_proto
= pp
->proto
;
2459 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2460 * @map: the erase map of the SPI NOR
2462 * Once the optimum uniform sector erase command is found, disable all the
2465 * Return: pointer to erase type on success, NULL otherwise.
2467 static const struct spi_nor_erase_type
*
2468 spi_nor_select_uniform_erase(struct spi_nor_erase_map
*map
)
2470 const struct spi_nor_erase_type
*tested_erase
, *erase
= NULL
;
2472 u8 uniform_erase_type
= map
->uniform_region
.erase_mask
;
2475 * Search for the biggest erase size, except for when compiled
2478 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2479 if (!(uniform_erase_type
& BIT(i
)))
2482 tested_erase
= &map
->erase_type
[i
];
2484 /* Skip masked erase types. */
2485 if (!tested_erase
->size
)
2489 * If the current erase size is the 4k one, stop here,
2490 * we have found the right uniform Sector Erase command.
2492 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
) &&
2493 tested_erase
->size
== SZ_4K
) {
2494 erase
= tested_erase
;
2499 * Otherwise, the current erase size is still a valid candidate.
2500 * Select the biggest valid candidate.
2502 if (!erase
&& tested_erase
->size
)
2503 erase
= tested_erase
;
2504 /* keep iterating to find the wanted_size */
2510 /* Disable all other Sector Erase commands. */
2511 map
->uniform_region
.erase_mask
= BIT(erase
- map
->erase_type
);
2515 static int spi_nor_select_erase(struct spi_nor
*nor
)
2517 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
2518 const struct spi_nor_erase_type
*erase
= NULL
;
2519 struct mtd_info
*mtd
= &nor
->mtd
;
2523 * The previous implementation handling Sector Erase commands assumed
2524 * that the SPI flash memory has an uniform layout then used only one
2525 * of the supported erase sizes for all Sector Erase commands.
2526 * So to be backward compatible, the new implementation also tries to
2527 * manage the SPI flash memory as uniform with a single erase sector
2528 * size, when possible.
2530 if (spi_nor_has_uniform_erase(nor
)) {
2531 erase
= spi_nor_select_uniform_erase(map
);
2534 nor
->erase_opcode
= erase
->opcode
;
2535 mtd
->erasesize
= erase
->size
;
2540 * For non-uniform SPI flash memory, set mtd->erasesize to the
2541 * maximum erase sector size. No need to set nor->erase_opcode.
2543 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2544 if (map
->erase_type
[i
].size
) {
2545 erase
= &map
->erase_type
[i
];
2553 mtd
->erasesize
= erase
->size
;
2557 static int spi_nor_set_addr_nbytes(struct spi_nor
*nor
)
2559 if (nor
->params
->addr_nbytes
) {
2560 nor
->addr_nbytes
= nor
->params
->addr_nbytes
;
2561 } else if (nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
) {
2563 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2564 * in this protocol an odd addr_nbytes cannot be used because
2565 * then the address phase would only span a cycle and a half.
2566 * Half a cycle would be left over. We would then have to start
2567 * the dummy phase in the middle of a cycle and so too the data
2568 * phase, and we will end the transaction with half a cycle left
2571 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2572 * avoid this situation.
2574 nor
->addr_nbytes
= 4;
2575 } else if (nor
->info
->addr_nbytes
) {
2576 nor
->addr_nbytes
= nor
->info
->addr_nbytes
;
2578 nor
->addr_nbytes
= 3;
2581 if (nor
->addr_nbytes
== 3 && nor
->params
->size
> 0x1000000) {
2582 /* enable 4-byte addressing if the device exceeds 16MiB */
2583 nor
->addr_nbytes
= 4;
2586 if (nor
->addr_nbytes
> SPI_NOR_MAX_ADDR_NBYTES
) {
2587 dev_dbg(nor
->dev
, "The number of address bytes is too large: %u\n",
2592 /* Set 4byte opcodes when possible. */
2593 if (nor
->addr_nbytes
== 4 && nor
->flags
& SNOR_F_4B_OPCODES
&&
2594 !(nor
->flags
& SNOR_F_HAS_4BAIT
))
2595 spi_nor_set_4byte_opcodes(nor
);
2600 static int spi_nor_setup(struct spi_nor
*nor
,
2601 const struct spi_nor_hwcaps
*hwcaps
)
2603 struct spi_nor_flash_parameter
*params
= nor
->params
;
2604 u32 ignored_mask
, shared_mask
;
2608 * Keep only the hardware capabilities supported by both the SPI
2609 * controller and the SPI flash memory.
2611 shared_mask
= hwcaps
->mask
& params
->hwcaps
.mask
;
2615 * When called from spi_nor_probe(), all caps are set and we
2616 * need to discard some of them based on what the SPI
2617 * controller actually supports (using spi_mem_supports_op()).
2619 spi_nor_spimem_adjust_hwcaps(nor
, &shared_mask
);
2622 * SPI n-n-n protocols are not supported when the SPI
2623 * controller directly implements the spi_nor interface.
2624 * Yet another reason to switch to spi-mem.
2626 ignored_mask
= SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
;
2627 if (shared_mask
& ignored_mask
) {
2629 "SPI n-n-n protocols are not supported.\n");
2630 shared_mask
&= ~ignored_mask
;
2634 /* Select the (Fast) Read command. */
2635 err
= spi_nor_select_read(nor
, shared_mask
);
2638 "can't select read settings supported by both the SPI controller and memory.\n");
2642 /* Select the Page Program command. */
2643 err
= spi_nor_select_pp(nor
, shared_mask
);
2646 "can't select write settings supported by both the SPI controller and memory.\n");
2650 /* Select the Sector Erase command. */
2651 err
= spi_nor_select_erase(nor
);
2654 "can't select erase settings supported by both the SPI controller and memory.\n");
2658 return spi_nor_set_addr_nbytes(nor
);
2662 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2663 * settings based on MFR register and ->default_init() hook.
2664 * @nor: pointer to a 'struct spi_nor'.
2666 static void spi_nor_manufacturer_init_params(struct spi_nor
*nor
)
2668 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2669 nor
->manufacturer
->fixups
->default_init
)
2670 nor
->manufacturer
->fixups
->default_init(nor
);
2672 if (nor
->info
->fixups
&& nor
->info
->fixups
->default_init
)
2673 nor
->info
->fixups
->default_init(nor
);
2677 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2678 * settings based on nor->info->sfdp_flags. This method should be called only by
2679 * flashes that do not define SFDP tables. If the flash supports SFDP but the
2680 * information is wrong and the settings from this function can not be retrieved
2681 * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2683 * @nor: pointer to a 'struct spi_nor'.
2685 static void spi_nor_no_sfdp_init_params(struct spi_nor
*nor
)
2687 struct spi_nor_flash_parameter
*params
= nor
->params
;
2688 struct spi_nor_erase_map
*map
= ¶ms
->erase_map
;
2689 const struct flash_info
*info
= nor
->info
;
2690 const u8 no_sfdp_flags
= info
->no_sfdp_flags
;
2693 if (no_sfdp_flags
& SPI_NOR_DUAL_READ
) {
2694 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_2
;
2695 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_2
],
2696 0, 8, SPINOR_OP_READ_1_1_2
,
2700 if (no_sfdp_flags
& SPI_NOR_QUAD_READ
) {
2701 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_4
;
2702 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_4
],
2703 0, 8, SPINOR_OP_READ_1_1_4
,
2707 if (no_sfdp_flags
& SPI_NOR_OCTAL_READ
) {
2708 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_8
;
2709 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_8
],
2710 0, 8, SPINOR_OP_READ_1_1_8
,
2714 if (no_sfdp_flags
& SPI_NOR_OCTAL_DTR_READ
) {
2715 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_8_8_8_DTR
;
2716 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_8_8_8_DTR
],
2717 0, 20, SPINOR_OP_READ_FAST
,
2718 SNOR_PROTO_8_8_8_DTR
);
2721 if (no_sfdp_flags
& SPI_NOR_OCTAL_DTR_PP
) {
2722 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP_8_8_8_DTR
;
2724 * Since xSPI Page Program opcode is backward compatible with
2725 * Legacy SPI, use Legacy SPI opcode there as well.
2727 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP_8_8_8_DTR
],
2728 SPINOR_OP_PP
, SNOR_PROTO_8_8_8_DTR
);
2732 * Sector Erase settings. Sort Erase Types in ascending order, with the
2733 * smallest erase size starting at BIT(0).
2737 if (no_sfdp_flags
& SECT_4K
) {
2738 erase_mask
|= BIT(i
);
2739 spi_nor_set_erase_type(&map
->erase_type
[i
], 4096u,
2743 erase_mask
|= BIT(i
);
2744 spi_nor_set_erase_type(&map
->erase_type
[i
],
2745 info
->sector_size
?: SPI_NOR_DEFAULT_SECTOR_SIZE
,
2747 spi_nor_init_uniform_erase_map(map
, erase_mask
, params
->size
);
2751 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2752 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2753 * @nor: pointer to a 'struct spi_nor'
2755 static void spi_nor_init_flags(struct spi_nor
*nor
)
2757 struct device_node
*np
= spi_nor_get_flash_node(nor
);
2758 const u16 flags
= nor
->info
->flags
;
2760 if (of_property_read_bool(np
, "broken-flash-reset"))
2761 nor
->flags
|= SNOR_F_BROKEN_RESET
;
2763 if (of_property_read_bool(np
, "no-wp"))
2764 nor
->flags
|= SNOR_F_NO_WP
;
2766 if (flags
& SPI_NOR_SWP_IS_VOLATILE
)
2767 nor
->flags
|= SNOR_F_SWP_IS_VOLATILE
;
2769 if (flags
& SPI_NOR_HAS_LOCK
)
2770 nor
->flags
|= SNOR_F_HAS_LOCK
;
2772 if (flags
& SPI_NOR_HAS_TB
) {
2773 nor
->flags
|= SNOR_F_HAS_SR_TB
;
2774 if (flags
& SPI_NOR_TB_SR_BIT6
)
2775 nor
->flags
|= SNOR_F_HAS_SR_TB_BIT6
;
2778 if (flags
& SPI_NOR_4BIT_BP
) {
2779 nor
->flags
|= SNOR_F_HAS_4BIT_BP
;
2780 if (flags
& SPI_NOR_BP3_SR_BIT6
)
2781 nor
->flags
|= SNOR_F_HAS_SR_BP3_BIT6
;
2784 if (flags
& SPI_NOR_RWW
&& nor
->params
->n_banks
> 1 &&
2785 !nor
->controller_ops
)
2786 nor
->flags
|= SNOR_F_RWW
;
2790 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2791 * be discovered by SFDP for this particular flash because the SFDP table that
2792 * indicates this support is not defined in the flash. In case the table for
2793 * this support is defined but has wrong values, one should instead use a
2794 * post_sfdp() hook to set the SNOR_F equivalent flag.
2795 * @nor: pointer to a 'struct spi_nor'
2797 static void spi_nor_init_fixup_flags(struct spi_nor
*nor
)
2799 const u8 fixup_flags
= nor
->info
->fixup_flags
;
2801 if (fixup_flags
& SPI_NOR_4B_OPCODES
)
2802 nor
->flags
|= SNOR_F_4B_OPCODES
;
2804 if (fixup_flags
& SPI_NOR_IO_MODE_EN_VOLATILE
)
2805 nor
->flags
|= SNOR_F_IO_MODE_EN_VOLATILE
;
2809 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2810 * @nor: pointer to a 'struct spi_nor'
2812 * Used to initialize flash parameters that are not declared in the JESD216
2813 * SFDP standard, or where SFDP tables are not defined at all.
2814 * Will replace the spi_nor_manufacturer_init_params() method.
2816 static int spi_nor_late_init_params(struct spi_nor
*nor
)
2818 struct spi_nor_flash_parameter
*params
= nor
->params
;
2821 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2822 nor
->manufacturer
->fixups
->late_init
) {
2823 ret
= nor
->manufacturer
->fixups
->late_init(nor
);
2828 /* Needed by some flashes late_init hooks. */
2829 spi_nor_init_flags(nor
);
2831 if (nor
->info
->fixups
&& nor
->info
->fixups
->late_init
) {
2832 ret
= nor
->info
->fixups
->late_init(nor
);
2837 if (!nor
->params
->die_erase_opcode
)
2838 nor
->params
->die_erase_opcode
= SPINOR_OP_CHIP_ERASE
;
2840 /* Default method kept for backward compatibility. */
2841 if (!params
->set_4byte_addr_mode
)
2842 params
->set_4byte_addr_mode
= spi_nor_set_4byte_addr_mode_brwr
;
2844 spi_nor_init_fixup_flags(nor
);
2847 * NOR protection support. When locking_ops are not provided, we pick
2850 if (nor
->flags
& SNOR_F_HAS_LOCK
&& !nor
->params
->locking_ops
)
2851 spi_nor_init_default_locking_ops(nor
);
2853 if (params
->n_banks
> 1)
2854 params
->bank_size
= div_u64(params
->size
, params
->n_banks
);
2860 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2861 * parameters and settings based on JESD216 SFDP standard.
2862 * @nor: pointer to a 'struct spi_nor'.
2864 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2865 * legacy flash parameters and settings will be restored.
2867 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor
*nor
)
2869 struct spi_nor_flash_parameter sfdp_params
;
2871 memcpy(&sfdp_params
, nor
->params
, sizeof(sfdp_params
));
2873 if (spi_nor_parse_sfdp(nor
)) {
2874 memcpy(nor
->params
, &sfdp_params
, sizeof(*nor
->params
));
2875 nor
->flags
&= ~SNOR_F_4B_OPCODES
;
2880 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2881 * parameters and settings.
2882 * @nor: pointer to a 'struct spi_nor'.
2884 * The method assumes that flash doesn't support SFDP so it initializes flash
2885 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2886 * when parsing SFDP, if supported.
2888 static void spi_nor_init_params_deprecated(struct spi_nor
*nor
)
2890 spi_nor_no_sfdp_init_params(nor
);
2892 spi_nor_manufacturer_init_params(nor
);
2894 if (nor
->info
->no_sfdp_flags
& (SPI_NOR_DUAL_READ
|
2896 SPI_NOR_OCTAL_READ
|
2897 SPI_NOR_OCTAL_DTR_READ
))
2898 spi_nor_sfdp_init_params_deprecated(nor
);
2902 * spi_nor_init_default_params() - Default initialization of flash parameters
2903 * and settings. Done for all flashes, regardless is they define SFDP tables
2905 * @nor: pointer to a 'struct spi_nor'.
2907 static void spi_nor_init_default_params(struct spi_nor
*nor
)
2909 struct spi_nor_flash_parameter
*params
= nor
->params
;
2910 const struct flash_info
*info
= nor
->info
;
2911 struct device_node
*np
= spi_nor_get_flash_node(nor
);
2913 params
->quad_enable
= spi_nor_sr2_bit1_quad_enable
;
2914 params
->otp
.org
= info
->otp
;
2916 /* Default to 16-bit Write Status (01h) Command */
2917 nor
->flags
|= SNOR_F_HAS_16BIT_SR
;
2919 /* Set SPI NOR sizes. */
2920 params
->writesize
= 1;
2921 params
->size
= info
->size
;
2922 params
->bank_size
= params
->size
;
2923 params
->page_size
= info
->page_size
?: SPI_NOR_DEFAULT_PAGE_SIZE
;
2924 params
->n_banks
= info
->n_banks
?: SPI_NOR_DEFAULT_N_BANKS
;
2926 /* Default to Fast Read for non-DT and enable it if requested by DT. */
2927 if (!np
|| of_property_read_bool(np
, "m25p,fast-read"))
2928 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_FAST
;
2930 /* (Fast) Read settings. */
2931 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ
;
2932 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ
],
2933 0, 0, SPINOR_OP_READ
,
2936 if (params
->hwcaps
.mask
& SNOR_HWCAPS_READ_FAST
)
2937 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_FAST
],
2938 0, 8, SPINOR_OP_READ_FAST
,
2940 /* Page Program settings. */
2941 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP
;
2942 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP
],
2943 SPINOR_OP_PP
, SNOR_PROTO_1_1_1
);
2945 if (info
->flags
& SPI_NOR_QUAD_PP
) {
2946 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP_1_1_4
;
2947 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP_1_1_4
],
2948 SPINOR_OP_PP_1_1_4
, SNOR_PROTO_1_1_4
);
2953 * spi_nor_init_params() - Initialize the flash's parameters and settings.
2954 * @nor: pointer to a 'struct spi_nor'.
2956 * The flash parameters and settings are initialized based on a sequence of
2957 * calls that are ordered by priority:
2959 * 1/ Default flash parameters initialization. The initializations are done
2960 * based on nor->info data:
2961 * spi_nor_info_init_params()
2963 * which can be overwritten by:
2964 * 2/ Manufacturer flash parameters initialization. The initializations are
2965 * done based on MFR register, or when the decisions can not be done solely
2966 * based on MFR, by using specific flash_info tweeks, ->default_init():
2967 * spi_nor_manufacturer_init_params()
2969 * which can be overwritten by:
2970 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2971 * should be more accurate that the above.
2972 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
2974 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
2975 * the flash parameters and settings immediately after parsing the Basic
2976 * Flash Parameter Table.
2977 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
2978 * It is used to tweak various flash parameters when information provided
2979 * by the SFDP tables are wrong.
2981 * which can be overwritten by:
2982 * 4/ Late flash parameters initialization, used to initialize flash
2983 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
2984 * tables are not defined at all.
2985 * spi_nor_late_init_params()
2987 * Return: 0 on success, -errno otherwise.
2989 static int spi_nor_init_params(struct spi_nor
*nor
)
2993 nor
->params
= devm_kzalloc(nor
->dev
, sizeof(*nor
->params
), GFP_KERNEL
);
2997 spi_nor_init_default_params(nor
);
2999 if (spi_nor_needs_sfdp(nor
)) {
3000 ret
= spi_nor_parse_sfdp(nor
);
3002 dev_err(nor
->dev
, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
3005 } else if (nor
->info
->no_sfdp_flags
& SPI_NOR_SKIP_SFDP
) {
3006 spi_nor_no_sfdp_init_params(nor
);
3008 spi_nor_init_params_deprecated(nor
);
3011 ret
= spi_nor_late_init_params(nor
);
3015 if (WARN_ON(!is_power_of_2(nor
->params
->page_size
)))
3021 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
3022 * @nor: pointer to a 'struct spi_nor'
3023 * @enable: whether to enable or disable Octal DTR
3025 * Return: 0 on success, -errno otherwise.
3027 static int spi_nor_set_octal_dtr(struct spi_nor
*nor
, bool enable
)
3031 if (!nor
->params
->set_octal_dtr
)
3034 if (!(nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
&&
3035 nor
->write_proto
== SNOR_PROTO_8_8_8_DTR
))
3038 if (!(nor
->flags
& SNOR_F_IO_MODE_EN_VOLATILE
))
3041 ret
= nor
->params
->set_octal_dtr(nor
, enable
);
3046 nor
->reg_proto
= SNOR_PROTO_8_8_8_DTR
;
3048 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3054 * spi_nor_quad_enable() - enable Quad I/O if needed.
3055 * @nor: pointer to a 'struct spi_nor'
3057 * Return: 0 on success, -errno otherwise.
3059 static int spi_nor_quad_enable(struct spi_nor
*nor
)
3061 if (!nor
->params
->quad_enable
)
3064 if (!(spi_nor_get_protocol_width(nor
->read_proto
) == 4 ||
3065 spi_nor_get_protocol_width(nor
->write_proto
) == 4))
3068 return nor
->params
->quad_enable(nor
);
3072 * spi_nor_set_4byte_addr_mode() - Set address mode.
3073 * @nor: pointer to a 'struct spi_nor'.
3074 * @enable: enable/disable 4 byte address mode.
3076 * Return: 0 on success, -errno otherwise.
3078 int spi_nor_set_4byte_addr_mode(struct spi_nor
*nor
, bool enable
)
3080 struct spi_nor_flash_parameter
*params
= nor
->params
;
3085 * If the RESET# pin isn't hooked up properly, or the system
3086 * otherwise doesn't perform a reset command in the boot
3087 * sequence, it's impossible to 100% protect against unexpected
3088 * reboots (e.g., crashes). Warn the user (or hopefully, system
3089 * designer) that this is bad.
3091 WARN_ONCE(nor
->flags
& SNOR_F_BROKEN_RESET
,
3092 "enabling reset hack; may not recover from unexpected reboots\n");
3095 ret
= params
->set_4byte_addr_mode(nor
, enable
);
3096 if (ret
&& ret
!= -EOPNOTSUPP
)
3100 params
->addr_nbytes
= 4;
3101 params
->addr_mode_nbytes
= 4;
3103 params
->addr_nbytes
= 3;
3104 params
->addr_mode_nbytes
= 3;
3110 static int spi_nor_init(struct spi_nor
*nor
)
3114 err
= spi_nor_set_octal_dtr(nor
, true);
3116 dev_dbg(nor
->dev
, "octal mode not supported\n");
3120 err
= spi_nor_quad_enable(nor
);
3122 dev_dbg(nor
->dev
, "quad mode not supported\n");
3127 * Some SPI NOR flashes are write protected by default after a power-on
3128 * reset cycle, in order to avoid inadvertent writes during power-up.
3129 * Backward compatibility imposes to unlock the entire flash memory
3130 * array at power-up by default. Depending on the kernel configuration
3131 * (1) do nothing, (2) always unlock the entire flash array or (3)
3132 * unlock the entire flash array only when the software write
3133 * protection bits are volatile. The latter is indicated by
3134 * SNOR_F_SWP_IS_VOLATILE.
3136 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE
) ||
3137 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
) &&
3138 nor
->flags
& SNOR_F_SWP_IS_VOLATILE
))
3139 spi_nor_try_unlock_all(nor
);
3141 if (nor
->addr_nbytes
== 4 &&
3142 nor
->read_proto
!= SNOR_PROTO_8_8_8_DTR
&&
3143 !(nor
->flags
& SNOR_F_4B_OPCODES
))
3144 return spi_nor_set_4byte_addr_mode(nor
, true);
3150 * spi_nor_soft_reset() - Perform a software reset
3151 * @nor: pointer to 'struct spi_nor'
3153 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3154 * the device to its power-on-reset state. This is useful when the software has
3155 * made some changes to device (volatile) registers and needs to reset it before
3156 * shutting down, for example.
3158 * Not every flash supports this sequence. The same set of opcodes might be used
3159 * for some other operation on a flash that does not support this. Support for
3160 * this sequence can be discovered via SFDP in the BFPT table.
3162 * Return: 0 on success, -errno otherwise.
3164 static void spi_nor_soft_reset(struct spi_nor
*nor
)
3166 struct spi_mem_op op
;
3169 op
= (struct spi_mem_op
)SPINOR_SRSTEN_OP
;
3171 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3173 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3175 if (ret
!= -EOPNOTSUPP
)
3176 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3180 op
= (struct spi_mem_op
)SPINOR_SRST_OP
;
3182 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3184 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3186 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3191 * Software Reset is not instant, and the delay varies from flash to
3192 * flash. Looking at a few flashes, most range somewhere below 100
3193 * microseconds. So, sleep for a range of 200-400 us.
3195 usleep_range(SPI_NOR_SRST_SLEEP_MIN
, SPI_NOR_SRST_SLEEP_MAX
);
3198 /* mtd suspend handler */
3199 static int spi_nor_suspend(struct mtd_info
*mtd
)
3201 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3204 /* Disable octal DTR mode if we enabled it. */
3205 ret
= spi_nor_set_octal_dtr(nor
, false);
3207 dev_err(nor
->dev
, "suspend() failed\n");
3212 /* mtd resume handler */
3213 static void spi_nor_resume(struct mtd_info
*mtd
)
3215 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3216 struct device
*dev
= nor
->dev
;
3219 /* re-initialize the nor chip */
3220 ret
= spi_nor_init(nor
);
3222 dev_err(dev
, "resume() failed\n");
3225 static int spi_nor_get_device(struct mtd_info
*mtd
)
3227 struct mtd_info
*master
= mtd_get_master(mtd
);
3228 struct spi_nor
*nor
= mtd_to_spi_nor(master
);
3232 dev
= nor
->spimem
->spi
->controller
->dev
.parent
;
3236 if (!try_module_get(dev
->driver
->owner
))
3242 static void spi_nor_put_device(struct mtd_info
*mtd
)
3244 struct mtd_info
*master
= mtd_get_master(mtd
);
3245 struct spi_nor
*nor
= mtd_to_spi_nor(master
);
3249 dev
= nor
->spimem
->spi
->controller
->dev
.parent
;
3253 module_put(dev
->driver
->owner
);
3256 static void spi_nor_restore(struct spi_nor
*nor
)
3260 /* restore the addressing mode */
3261 if (nor
->addr_nbytes
== 4 && !(nor
->flags
& SNOR_F_4B_OPCODES
) &&
3262 nor
->flags
& SNOR_F_BROKEN_RESET
) {
3263 ret
= spi_nor_set_4byte_addr_mode(nor
, false);
3266 * Do not stop the execution in the hope that the flash
3267 * will default to the 3-byte address mode after the
3270 dev_err(nor
->dev
, "Failed to exit 4-byte address mode, err = %d\n", ret
);
3273 if (nor
->flags
& SNOR_F_SOFT_RESET
)
3274 spi_nor_soft_reset(nor
);
3277 static const struct flash_info
*spi_nor_match_name(struct spi_nor
*nor
,
3282 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
3283 for (j
= 0; j
< manufacturers
[i
]->nparts
; j
++) {
3284 if (manufacturers
[i
]->parts
[j
].name
&&
3285 !strcmp(name
, manufacturers
[i
]->parts
[j
].name
)) {
3286 nor
->manufacturer
= manufacturers
[i
];
3287 return &manufacturers
[i
]->parts
[j
];
3295 static const struct flash_info
*spi_nor_get_flash_info(struct spi_nor
*nor
,
3298 const struct flash_info
*info
= NULL
;
3301 info
= spi_nor_match_name(nor
, name
);
3303 * Auto-detect if chip name wasn't specified or not found, or the chip
3304 * has an ID. If the chip supposedly has an ID, we also do an
3305 * auto-detection to compare it later.
3307 if (!info
|| info
->id
) {
3308 const struct flash_info
*jinfo
;
3310 jinfo
= spi_nor_detect(nor
);
3315 * If caller has specified name of flash model that can normally
3316 * be detected using JEDEC, let's verify it.
3318 if (info
&& jinfo
!= info
)
3319 dev_warn(nor
->dev
, "found %s, expected %s\n",
3320 jinfo
->name
, info
->name
);
3322 /* If info was set before, JEDEC knows better. */
3330 spi_nor_get_region_erasesize(const struct spi_nor_erase_region
*region
,
3331 const struct spi_nor_erase_type
*erase_type
)
3335 if (region
->overlaid
)
3336 return region
->size
;
3338 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
3339 if (region
->erase_mask
& BIT(i
))
3340 return erase_type
[i
].size
;
3346 static int spi_nor_set_mtd_eraseregions(struct spi_nor
*nor
)
3348 const struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
3349 const struct spi_nor_erase_region
*region
= map
->regions
;
3350 struct mtd_erase_region_info
*mtd_region
;
3351 struct mtd_info
*mtd
= &nor
->mtd
;
3354 mtd_region
= devm_kcalloc(nor
->dev
, map
->n_regions
, sizeof(*mtd_region
),
3359 for (i
= 0; i
< map
->n_regions
; i
++) {
3360 erasesize
= spi_nor_get_region_erasesize(®ion
[i
],
3365 mtd_region
[i
].erasesize
= erasesize
;
3366 mtd_region
[i
].numblocks
= div_u64(region
[i
].size
, erasesize
);
3367 mtd_region
[i
].offset
= region
[i
].offset
;
3370 mtd
->numeraseregions
= map
->n_regions
;
3371 mtd
->eraseregions
= mtd_region
;
3376 static int spi_nor_set_mtd_info(struct spi_nor
*nor
)
3378 struct mtd_info
*mtd
= &nor
->mtd
;
3379 struct device
*dev
= nor
->dev
;
3381 spi_nor_set_mtd_locking_ops(nor
);
3382 spi_nor_set_mtd_otp_ops(nor
);
3384 mtd
->dev
.parent
= dev
;
3386 mtd
->name
= dev_name(dev
);
3387 mtd
->type
= MTD_NORFLASH
;
3388 mtd
->flags
= MTD_CAP_NORFLASH
;
3389 /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3390 if (nor
->flags
& SNOR_F_ECC
)
3391 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
3392 if (nor
->info
->flags
& SPI_NOR_NO_ERASE
)
3393 mtd
->flags
|= MTD_NO_ERASE
;
3395 mtd
->_erase
= spi_nor_erase
;
3396 mtd
->writesize
= nor
->params
->writesize
;
3397 mtd
->writebufsize
= nor
->params
->page_size
;
3398 mtd
->size
= nor
->params
->size
;
3399 mtd
->_read
= spi_nor_read
;
3400 /* Might be already set by some SST flashes. */
3402 mtd
->_write
= spi_nor_write
;
3403 mtd
->_suspend
= spi_nor_suspend
;
3404 mtd
->_resume
= spi_nor_resume
;
3405 mtd
->_get_device
= spi_nor_get_device
;
3406 mtd
->_put_device
= spi_nor_put_device
;
3408 if (!spi_nor_has_uniform_erase(nor
))
3409 return spi_nor_set_mtd_eraseregions(nor
);
3414 static int spi_nor_hw_reset(struct spi_nor
*nor
)
3416 struct gpio_desc
*reset
;
3418 reset
= devm_gpiod_get_optional(nor
->dev
, "reset", GPIOD_OUT_LOW
);
3419 if (IS_ERR_OR_NULL(reset
))
3420 return PTR_ERR_OR_ZERO(reset
);
3423 * Experimental delay values by looking at different flash device
3424 * vendors datasheets.
3427 gpiod_set_value_cansleep(reset
, 1);
3428 usleep_range(100, 150);
3429 gpiod_set_value_cansleep(reset
, 0);
3430 usleep_range(1000, 1200);
3435 int spi_nor_scan(struct spi_nor
*nor
, const char *name
,
3436 const struct spi_nor_hwcaps
*hwcaps
)
3438 const struct flash_info
*info
;
3439 struct device
*dev
= nor
->dev
;
3442 ret
= spi_nor_check(nor
);
3446 /* Reset SPI protocol for all commands. */
3447 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3448 nor
->read_proto
= SNOR_PROTO_1_1_1
;
3449 nor
->write_proto
= SNOR_PROTO_1_1_1
;
3452 * We need the bounce buffer early to read/write registers when going
3453 * through the spi-mem layer (buffers have to be DMA-able).
3454 * For spi-mem drivers, we'll reallocate a new buffer if
3455 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3456 * shouldn't happen before long since NOR pages are usually less
3457 * than 1KB) after spi_nor_scan() returns.
3459 nor
->bouncebuf_size
= PAGE_SIZE
;
3460 nor
->bouncebuf
= devm_kmalloc(dev
, nor
->bouncebuf_size
,
3462 if (!nor
->bouncebuf
)
3465 ret
= spi_nor_hw_reset(nor
);
3469 info
= spi_nor_get_flash_info(nor
, name
);
3471 return PTR_ERR(info
);
3475 mutex_init(&nor
->lock
);
3477 /* Init flash parameters based on flash_info struct and SFDP */
3478 ret
= spi_nor_init_params(nor
);
3482 if (spi_nor_use_parallel_locking(nor
))
3483 init_waitqueue_head(&nor
->rww
.wait
);
3486 * Configure the SPI memory:
3487 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3488 * - set the number of dummy cycles (mode cycles + wait states).
3489 * - set the SPI protocols for register and memory accesses.
3490 * - set the number of address bytes.
3492 ret
= spi_nor_setup(nor
, hwcaps
);
3496 /* Send all the required SPI flash commands to initialize device */
3497 ret
= spi_nor_init(nor
);
3501 /* No mtd_info fields should be used up to this point. */
3502 ret
= spi_nor_set_mtd_info(nor
);
3506 dev_dbg(dev
, "Manufacturer and device ID: %*phN\n",
3507 SPI_NOR_MAX_ID_LEN
, nor
->id
);
3511 EXPORT_SYMBOL_GPL(spi_nor_scan
);
3513 static int spi_nor_create_read_dirmap(struct spi_nor
*nor
)
3515 struct spi_mem_dirmap_info info
= {
3516 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
3517 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, 0, 0),
3518 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
3519 SPI_MEM_OP_DATA_IN(0, NULL
, 0)),
3521 .length
= nor
->params
->size
,
3523 struct spi_mem_op
*op
= &info
.op_tmpl
;
3525 spi_nor_spimem_setup_op(nor
, op
, nor
->read_proto
);
3527 /* convert the dummy cycles to the number of bytes */
3528 op
->dummy
.nbytes
= (nor
->read_dummy
* op
->dummy
.buswidth
) / 8;
3529 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
3530 op
->dummy
.nbytes
*= 2;
3533 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3534 * of data bytes is non-zero, the data buswidth won't be set here. So,
3537 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->read_proto
);
3539 nor
->dirmap
.rdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3541 return PTR_ERR_OR_ZERO(nor
->dirmap
.rdesc
);
3544 static int spi_nor_create_write_dirmap(struct spi_nor
*nor
)
3546 struct spi_mem_dirmap_info info
= {
3547 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
3548 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, 0, 0),
3549 SPI_MEM_OP_NO_DUMMY
,
3550 SPI_MEM_OP_DATA_OUT(0, NULL
, 0)),
3552 .length
= nor
->params
->size
,
3554 struct spi_mem_op
*op
= &info
.op_tmpl
;
3556 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
3557 op
->addr
.nbytes
= 0;
3559 spi_nor_spimem_setup_op(nor
, op
, nor
->write_proto
);
3562 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3563 * of data bytes is non-zero, the data buswidth won't be set here. So,
3566 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->write_proto
);
3568 nor
->dirmap
.wdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3570 return PTR_ERR_OR_ZERO(nor
->dirmap
.wdesc
);
3573 static int spi_nor_probe(struct spi_mem
*spimem
)
3575 struct spi_device
*spi
= spimem
->spi
;
3576 struct flash_platform_data
*data
= dev_get_platdata(&spi
->dev
);
3577 struct spi_nor
*nor
;
3579 * Enable all caps by default. The core will mask them after
3580 * checking what's really supported using spi_mem_supports_op().
3582 const struct spi_nor_hwcaps hwcaps
= { .mask
= SNOR_HWCAPS_ALL
};
3586 nor
= devm_kzalloc(&spi
->dev
, sizeof(*nor
), GFP_KERNEL
);
3590 nor
->spimem
= spimem
;
3591 nor
->dev
= &spi
->dev
;
3592 spi_nor_set_flash_node(nor
, spi
->dev
.of_node
);
3594 spi_mem_set_drvdata(spimem
, nor
);
3596 if (data
&& data
->name
)
3597 nor
->mtd
.name
= data
->name
;
3600 nor
->mtd
.name
= spi_mem_get_name(spimem
);
3603 * For some (historical?) reason many platforms provide two different
3604 * names in flash_platform_data: "name" and "type". Quite often name is
3605 * set to "m25p80" and then "type" provides a real chip name.
3606 * If that's the case, respect "type" and ignore a "name".
3608 if (data
&& data
->type
)
3609 flash_name
= data
->type
;
3610 else if (!strcmp(spi
->modalias
, "spi-nor"))
3611 flash_name
= NULL
; /* auto-detect */
3613 flash_name
= spi
->modalias
;
3615 ret
= spi_nor_scan(nor
, flash_name
, &hwcaps
);
3619 spi_nor_debugfs_register(nor
);
3622 * None of the existing parts have > 512B pages, but let's play safe
3623 * and add this logic so that if anyone ever adds support for such
3624 * a NOR we don't end up with buffer overflows.
3626 if (nor
->params
->page_size
> PAGE_SIZE
) {
3627 nor
->bouncebuf_size
= nor
->params
->page_size
;
3628 devm_kfree(nor
->dev
, nor
->bouncebuf
);
3629 nor
->bouncebuf
= devm_kmalloc(nor
->dev
,
3630 nor
->bouncebuf_size
,
3632 if (!nor
->bouncebuf
)
3636 ret
= spi_nor_create_read_dirmap(nor
);
3640 ret
= spi_nor_create_write_dirmap(nor
);
3644 return mtd_device_register(&nor
->mtd
, data
? data
->parts
: NULL
,
3645 data
? data
->nr_parts
: 0);
3648 static int spi_nor_remove(struct spi_mem
*spimem
)
3650 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3652 spi_nor_restore(nor
);
3654 /* Clean up MTD stuff. */
3655 return mtd_device_unregister(&nor
->mtd
);
3658 static void spi_nor_shutdown(struct spi_mem
*spimem
)
3660 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3662 spi_nor_restore(nor
);
3666 * Do NOT add to this array without reading the following:
3668 * Historically, many flash devices are bound to this driver by their name. But
3669 * since most of these flash are compatible to some extent, and their
3670 * differences can often be differentiated by the JEDEC read-ID command, we
3671 * encourage new users to add support to the spi-nor library, and simply bind
3672 * against a generic string here (e.g., "jedec,spi-nor").
3674 * Many flash names are kept here in this list to keep them available
3675 * as module aliases for existing platforms.
3677 static const struct spi_device_id spi_nor_dev_ids
[] = {
3679 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3680 * hack around the fact that the SPI core does not provide uevent
3681 * matching for .of_match_table
3686 * Entries not used in DTs that should be safe to drop after replacing
3687 * them with "spi-nor" in platform data.
3689 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3692 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3693 * should be kept for backward compatibility.
3695 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3696 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3697 {"mx25l25635e"},{"mx66l51235l"},
3698 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3699 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3701 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3702 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3703 {"m25p64"}, {"m25p128"},
3704 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3705 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3707 /* Flashes that can't be detected using JEDEC */
3708 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3709 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3710 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3712 /* Everspin MRAMs (non-JEDEC) */
3713 { "mr25h128" }, /* 128 Kib, 40 MHz */
3714 { "mr25h256" }, /* 256 Kib, 40 MHz */
3715 { "mr25h10" }, /* 1 Mib, 40 MHz */
3716 { "mr25h40" }, /* 4 Mib, 40 MHz */
3720 MODULE_DEVICE_TABLE(spi
, spi_nor_dev_ids
);
3722 static const struct of_device_id spi_nor_of_table
[] = {
3724 * Generic compatibility for SPI NOR that can be identified by the
3725 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3727 { .compatible
= "jedec,spi-nor" },
3730 MODULE_DEVICE_TABLE(of
, spi_nor_of_table
);
3733 * REVISIT: many of these chips have deep power-down modes, which
3734 * should clearly be entered on suspend() to minimize power use.
3735 * And also when they're otherwise idle...
3737 static struct spi_mem_driver spi_nor_driver
= {
3741 .of_match_table
= spi_nor_of_table
,
3742 .dev_groups
= spi_nor_sysfs_groups
,
3744 .id_table
= spi_nor_dev_ids
,
3746 .probe
= spi_nor_probe
,
3747 .remove
= spi_nor_remove
,
3748 .shutdown
= spi_nor_shutdown
,
3751 static int __init
spi_nor_module_init(void)
3753 return spi_mem_driver_register(&spi_nor_driver
);
3755 module_init(spi_nor_module_init
);
3757 static void __exit
spi_nor_module_exit(void)
3759 spi_mem_driver_unregister(&spi_nor_driver
);
3760 spi_nor_debugfs_shutdown();
3762 module_exit(spi_nor_module_exit
);
3764 MODULE_LICENSE("GPL v2");
3765 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3766 MODULE_AUTHOR("Mike Lavender");
3767 MODULE_DESCRIPTION("framework for SPI NOR");