1 // SPDX-License-Identifier: GPL-2.0
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/delay.h>
13 #include <linux/device.h>
14 #include <linux/math64.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/spi-nor.h>
18 #include <linux/mutex.h>
19 #include <linux/of_platform.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sizes.h>
22 #include <linux/slab.h>
23 #include <linux/spi/flash.h>
27 /* Define max times to check status register before we give up. */
30 * For everything but full-chip erase; probably could be much smaller, but kept
31 * around for safety for now
33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
41 #define SPI_NOR_MAX_ADDR_NBYTES 4
43 #define SPI_NOR_SRST_SLEEP_MIN 200
44 #define SPI_NOR_SRST_SLEEP_MAX 400
47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the
49 * @nor: pointer to a 'struct spi_nor'
50 * @op: pointer to the 'struct spi_mem_op' whose properties
51 * need to be initialized.
53 * Right now, only "repeat" and "invert" are supported.
55 * Return: The opcode extension.
57 static u8
spi_nor_get_cmd_ext(const struct spi_nor
*nor
,
58 const struct spi_mem_op
*op
)
60 switch (nor
->cmd_ext_type
) {
61 case SPI_NOR_EXT_INVERT
:
62 return ~op
->cmd
.opcode
;
64 case SPI_NOR_EXT_REPEAT
:
65 return op
->cmd
.opcode
;
68 dev_err(nor
->dev
, "Unknown command extension type\n");
74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op.
75 * @nor: pointer to a 'struct spi_nor'
76 * @op: pointer to the 'struct spi_mem_op' whose properties
77 * need to be initialized.
78 * @proto: the protocol from which the properties need to be set.
80 void spi_nor_spimem_setup_op(const struct spi_nor
*nor
,
81 struct spi_mem_op
*op
,
82 const enum spi_nor_protocol proto
)
86 op
->cmd
.buswidth
= spi_nor_get_protocol_inst_nbits(proto
);
89 op
->addr
.buswidth
= spi_nor_get_protocol_addr_nbits(proto
);
92 op
->dummy
.buswidth
= spi_nor_get_protocol_data_nbits(proto
);
95 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(proto
);
97 if (spi_nor_protocol_is_dtr(proto
)) {
99 * SPIMEM supports mixed DTR modes, but right now we can only
100 * have all phases either DTR or STR. IOW, SPIMEM can have
101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4
102 * phases to either DTR or STR.
106 op
->dummy
.dtr
= true;
109 /* 2 bytes per clock cycle in DTR mode. */
110 op
->dummy
.nbytes
*= 2;
112 ext
= spi_nor_get_cmd_ext(nor
, op
);
113 op
->cmd
.opcode
= (op
->cmd
.opcode
<< 8) | ext
;
117 if (proto
== SNOR_PROTO_8_8_8_DTR
&& nor
->flags
& SNOR_F_SWAP16
)
118 op
->data
.swap16
= true;
122 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
124 * @nor: pointer to 'struct spi_nor'
125 * @op: pointer to 'struct spi_mem_op' template for transfer
127 * If we have to use the bounce buffer, the data field in @op will be updated.
129 * Return: true if the bounce buffer is needed, false if not
131 static bool spi_nor_spimem_bounce(struct spi_nor
*nor
, struct spi_mem_op
*op
)
133 /* op->data.buf.in occupies the same memory as op->data.buf.out */
134 if (object_is_on_stack(op
->data
.buf
.in
) ||
135 !virt_addr_valid(op
->data
.buf
.in
)) {
136 if (op
->data
.nbytes
> nor
->bouncebuf_size
)
137 op
->data
.nbytes
= nor
->bouncebuf_size
;
138 op
->data
.buf
.in
= nor
->bouncebuf
;
146 * spi_nor_spimem_exec_op() - execute a memory operation
147 * @nor: pointer to 'struct spi_nor'
148 * @op: pointer to 'struct spi_mem_op' template for transfer
150 * Return: 0 on success, -error otherwise.
152 static int spi_nor_spimem_exec_op(struct spi_nor
*nor
, struct spi_mem_op
*op
)
156 error
= spi_mem_adjust_op_size(nor
->spimem
, op
);
160 return spi_mem_exec_op(nor
->spimem
, op
);
163 int spi_nor_controller_ops_read_reg(struct spi_nor
*nor
, u8 opcode
,
166 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
169 return nor
->controller_ops
->read_reg(nor
, opcode
, buf
, len
);
172 int spi_nor_controller_ops_write_reg(struct spi_nor
*nor
, u8 opcode
,
173 const u8
*buf
, size_t len
)
175 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
178 return nor
->controller_ops
->write_reg(nor
, opcode
, buf
, len
);
181 static int spi_nor_controller_ops_erase(struct spi_nor
*nor
, loff_t offs
)
183 if (spi_nor_protocol_is_dtr(nor
->reg_proto
))
186 return nor
->controller_ops
->erase(nor
, offs
);
190 * spi_nor_spimem_read_data() - read data from flash's memory region via
192 * @nor: pointer to 'struct spi_nor'
193 * @from: offset to read from
194 * @len: number of bytes to read
195 * @buf: pointer to dst buffer
197 * Return: number of bytes read successfully, -errno otherwise
199 static ssize_t
spi_nor_spimem_read_data(struct spi_nor
*nor
, loff_t from
,
202 struct spi_mem_op op
=
203 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
204 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, from
, 0),
205 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
206 SPI_MEM_OP_DATA_IN(len
, buf
, 0));
211 spi_nor_spimem_setup_op(nor
, &op
, nor
->read_proto
);
213 /* convert the dummy cycles to the number of bytes */
214 op
.dummy
.nbytes
= (nor
->read_dummy
* op
.dummy
.buswidth
) / 8;
215 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
216 op
.dummy
.nbytes
*= 2;
218 usebouncebuf
= spi_nor_spimem_bounce(nor
, &op
);
220 if (nor
->dirmap
.rdesc
) {
221 nbytes
= spi_mem_dirmap_read(nor
->dirmap
.rdesc
, op
.addr
.val
,
222 op
.data
.nbytes
, op
.data
.buf
.in
);
224 error
= spi_nor_spimem_exec_op(nor
, &op
);
227 nbytes
= op
.data
.nbytes
;
230 if (usebouncebuf
&& nbytes
> 0)
231 memcpy(buf
, op
.data
.buf
.in
, nbytes
);
237 * spi_nor_read_data() - read data from flash memory
238 * @nor: pointer to 'struct spi_nor'
239 * @from: offset to read from
240 * @len: number of bytes to read
241 * @buf: pointer to dst buffer
243 * Return: number of bytes read successfully, -errno otherwise
245 ssize_t
spi_nor_read_data(struct spi_nor
*nor
, loff_t from
, size_t len
, u8
*buf
)
248 return spi_nor_spimem_read_data(nor
, from
, len
, buf
);
250 return nor
->controller_ops
->read(nor
, from
, len
, buf
);
254 * spi_nor_spimem_write_data() - write data to flash memory via
256 * @nor: pointer to 'struct spi_nor'
257 * @to: offset to write to
258 * @len: number of bytes to write
259 * @buf: pointer to src buffer
261 * Return: number of bytes written successfully, -errno otherwise
263 static ssize_t
spi_nor_spimem_write_data(struct spi_nor
*nor
, loff_t to
,
264 size_t len
, const u8
*buf
)
266 struct spi_mem_op op
=
267 SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
268 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, to
, 0),
270 SPI_MEM_OP_DATA_OUT(len
, buf
, 0));
274 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
277 spi_nor_spimem_setup_op(nor
, &op
, nor
->write_proto
);
279 if (spi_nor_spimem_bounce(nor
, &op
))
280 memcpy(nor
->bouncebuf
, buf
, op
.data
.nbytes
);
282 if (nor
->dirmap
.wdesc
) {
283 nbytes
= spi_mem_dirmap_write(nor
->dirmap
.wdesc
, op
.addr
.val
,
284 op
.data
.nbytes
, op
.data
.buf
.out
);
286 error
= spi_nor_spimem_exec_op(nor
, &op
);
289 nbytes
= op
.data
.nbytes
;
296 * spi_nor_write_data() - write data to flash memory
297 * @nor: pointer to 'struct spi_nor'
298 * @to: offset to write to
299 * @len: number of bytes to write
300 * @buf: pointer to src buffer
302 * Return: number of bytes written successfully, -errno otherwise
304 ssize_t
spi_nor_write_data(struct spi_nor
*nor
, loff_t to
, size_t len
,
308 return spi_nor_spimem_write_data(nor
, to
, len
, buf
);
310 return nor
->controller_ops
->write(nor
, to
, len
, buf
);
314 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or
316 * @nor: pointer to 'struct spi_nor'.
317 * @op: SPI memory operation. op->data.buf must be DMA-able.
318 * @proto: SPI protocol to use for the register operation.
320 * Return: zero on success, -errno otherwise
322 int spi_nor_read_any_reg(struct spi_nor
*nor
, struct spi_mem_op
*op
,
323 enum spi_nor_protocol proto
)
328 spi_nor_spimem_setup_op(nor
, op
, proto
);
329 return spi_nor_spimem_exec_op(nor
, op
);
333 * spi_nor_write_any_volatile_reg() - write any volatile register to flash
335 * @nor: pointer to 'struct spi_nor'
336 * @op: SPI memory operation. op->data.buf must be DMA-able.
337 * @proto: SPI protocol to use for the register operation.
339 * Writing volatile registers are instant according to some manufacturers
340 * (Cypress, Micron) and do not need any status polling.
342 * Return: zero on success, -errno otherwise
344 int spi_nor_write_any_volatile_reg(struct spi_nor
*nor
, struct spi_mem_op
*op
,
345 enum spi_nor_protocol proto
)
352 ret
= spi_nor_write_enable(nor
);
355 spi_nor_spimem_setup_op(nor
, op
, proto
);
356 return spi_nor_spimem_exec_op(nor
, op
);
360 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
361 * @nor: pointer to 'struct spi_nor'.
363 * Return: 0 on success, -errno otherwise.
365 int spi_nor_write_enable(struct spi_nor
*nor
)
370 struct spi_mem_op op
= SPI_NOR_WREN_OP
;
372 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
374 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
376 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WREN
,
381 dev_dbg(nor
->dev
, "error %d on Write Enable\n", ret
);
387 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
388 * @nor: pointer to 'struct spi_nor'.
390 * Return: 0 on success, -errno otherwise.
392 int spi_nor_write_disable(struct spi_nor
*nor
)
397 struct spi_mem_op op
= SPI_NOR_WRDI_OP
;
399 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
401 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
403 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRDI
,
408 dev_dbg(nor
->dev
, "error %d on Write Disable\n", ret
);
414 * spi_nor_read_id() - Read the JEDEC ID.
415 * @nor: pointer to 'struct spi_nor'.
416 * @naddr: number of address bytes to send. Can be zero if the operation
417 * does not need to send an address.
418 * @ndummy: number of dummy bytes to send after an opcode or address. Can
419 * be zero if the operation does not require dummy bytes.
420 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID
422 * @proto: the SPI protocol for register operation.
424 * Return: 0 on success, -errno otherwise.
426 int spi_nor_read_id(struct spi_nor
*nor
, u8 naddr
, u8 ndummy
, u8
*id
,
427 enum spi_nor_protocol proto
)
432 struct spi_mem_op op
=
433 SPI_NOR_READID_OP(naddr
, ndummy
, id
, SPI_NOR_MAX_ID_LEN
);
435 spi_nor_spimem_setup_op(nor
, &op
, proto
);
436 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
438 ret
= nor
->controller_ops
->read_reg(nor
, SPINOR_OP_RDID
, id
,
445 * spi_nor_read_sr() - Read the Status Register.
446 * @nor: pointer to 'struct spi_nor'.
447 * @sr: pointer to a DMA-able buffer where the value of the
448 * Status Register will be written. Should be at least 2 bytes.
450 * Return: 0 on success, -errno otherwise.
452 int spi_nor_read_sr(struct spi_nor
*nor
, u8
*sr
)
457 struct spi_mem_op op
= SPI_NOR_RDSR_OP(sr
);
459 if (nor
->reg_proto
== SNOR_PROTO_8_8_8_DTR
) {
460 op
.addr
.nbytes
= nor
->params
->rdsr_addr_nbytes
;
461 op
.dummy
.nbytes
= nor
->params
->rdsr_dummy
;
463 * We don't want to read only one byte in DTR mode. So,
464 * read 2 and then discard the second byte.
469 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
471 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
473 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR
, sr
,
478 dev_dbg(nor
->dev
, "error %d reading SR\n", ret
);
484 * spi_nor_read_cr() - Read the Configuration Register using the
485 * SPINOR_OP_RDCR (35h) command.
486 * @nor: pointer to 'struct spi_nor'
487 * @cr: pointer to a DMA-able buffer where the value of the
488 * Configuration Register will be written.
490 * Return: 0 on success, -errno otherwise.
492 int spi_nor_read_cr(struct spi_nor
*nor
, u8
*cr
)
497 struct spi_mem_op op
= SPI_NOR_RDCR_OP(cr
);
499 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
501 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
503 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDCR
, cr
,
508 dev_dbg(nor
->dev
, "error %d reading CR\n", ret
);
514 * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode
515 * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by
516 * Winbond and Macronix.
517 * @nor: pointer to 'struct spi_nor'.
518 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
521 * Return: 0 on success, -errno otherwise.
523 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor
*nor
, bool enable
)
528 struct spi_mem_op op
= SPI_NOR_EN4B_EX4B_OP(enable
);
530 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
532 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
534 ret
= spi_nor_controller_ops_write_reg(nor
,
535 enable
? SPINOR_OP_EN4B
:
541 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
547 * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using
548 * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used
549 * by ST and Micron flashes.
550 * @nor: pointer to 'struct spi_nor'.
551 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
554 * Return: 0 on success, -errno otherwise.
556 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor
*nor
, bool enable
)
560 ret
= spi_nor_write_enable(nor
);
564 ret
= spi_nor_set_4byte_addr_mode_en4b_ex4b(nor
, enable
);
568 return spi_nor_write_disable(nor
);
572 * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using
573 * SPINOR_OP_BRWR. Typically used by Spansion flashes.
574 * @nor: pointer to 'struct spi_nor'.
575 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
578 * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is
579 * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte
580 * address mode is active and A[30:24] bits are don’t care. Write instruction is
581 * SPINOR_OP_BRWR(17h) with 1 byte of data.
583 * Return: 0 on success, -errno otherwise.
585 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor
*nor
, bool enable
)
589 nor
->bouncebuf
[0] = enable
<< 7;
592 struct spi_mem_op op
= SPI_NOR_BRWR_OP(nor
->bouncebuf
);
594 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
596 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
598 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_BRWR
,
603 dev_dbg(nor
->dev
, "error %d setting 4-byte mode\n", ret
);
609 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
611 * @nor: pointer to 'struct spi_nor'.
613 * Return: 1 if ready, 0 if not ready, -errno on errors.
615 int spi_nor_sr_ready(struct spi_nor
*nor
)
619 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
623 return !(nor
->bouncebuf
[0] & SR_WIP
);
627 * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used
628 * @nor: pointer to 'struct spi_nor'.
630 * Return: true if parallel locking is enabled, false otherwise.
632 static bool spi_nor_use_parallel_locking(struct spi_nor
*nor
)
634 return nor
->flags
& SNOR_F_RWW
;
637 /* Locking helpers for status read operations */
638 static int spi_nor_rww_start_rdst(struct spi_nor
*nor
)
640 struct spi_nor_rww
*rww
= &nor
->rww
;
643 mutex_lock(&nor
->lock
);
645 if (rww
->ongoing_io
|| rww
->ongoing_rd
)
648 rww
->ongoing_io
= true;
649 rww
->ongoing_rd
= true;
653 mutex_unlock(&nor
->lock
);
657 static void spi_nor_rww_end_rdst(struct spi_nor
*nor
)
659 struct spi_nor_rww
*rww
= &nor
->rww
;
661 mutex_lock(&nor
->lock
);
663 rww
->ongoing_io
= false;
664 rww
->ongoing_rd
= false;
666 mutex_unlock(&nor
->lock
);
669 static int spi_nor_lock_rdst(struct spi_nor
*nor
)
671 if (spi_nor_use_parallel_locking(nor
))
672 return spi_nor_rww_start_rdst(nor
);
677 static void spi_nor_unlock_rdst(struct spi_nor
*nor
)
679 if (spi_nor_use_parallel_locking(nor
)) {
680 spi_nor_rww_end_rdst(nor
);
681 wake_up(&nor
->rww
.wait
);
686 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
687 * @nor: pointer to 'struct spi_nor'.
689 * Return: 1 if ready, 0 if not ready, -errno on errors.
691 static int spi_nor_ready(struct spi_nor
*nor
)
695 ret
= spi_nor_lock_rdst(nor
);
699 /* Flashes might override the standard routine. */
700 if (nor
->params
->ready
)
701 ret
= nor
->params
->ready(nor
);
703 ret
= spi_nor_sr_ready(nor
);
705 spi_nor_unlock_rdst(nor
);
711 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
712 * Status Register until ready, or timeout occurs.
713 * @nor: pointer to "struct spi_nor".
714 * @timeout_jiffies: jiffies to wait until timeout.
716 * Return: 0 on success, -errno otherwise.
718 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor
*nor
,
719 unsigned long timeout_jiffies
)
721 unsigned long deadline
;
722 int timeout
= 0, ret
;
724 deadline
= jiffies
+ timeout_jiffies
;
727 if (time_after_eq(jiffies
, deadline
))
730 ret
= spi_nor_ready(nor
);
739 dev_dbg(nor
->dev
, "flash operation timed out\n");
745 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
746 * flash to be ready, or timeout occurs.
747 * @nor: pointer to "struct spi_nor".
749 * Return: 0 on success, -errno otherwise.
751 int spi_nor_wait_till_ready(struct spi_nor
*nor
)
753 return spi_nor_wait_till_ready_with_timeout(nor
,
754 DEFAULT_READY_WAIT_JIFFIES
);
758 * spi_nor_global_block_unlock() - Unlock Global Block Protection.
759 * @nor: pointer to 'struct spi_nor'.
761 * Return: 0 on success, -errno otherwise.
763 int spi_nor_global_block_unlock(struct spi_nor
*nor
)
767 ret
= spi_nor_write_enable(nor
);
772 struct spi_mem_op op
= SPI_NOR_GBULK_OP
;
774 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
776 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
778 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_GBULK
,
783 dev_dbg(nor
->dev
, "error %d on Global Block Unlock\n", ret
);
787 return spi_nor_wait_till_ready(nor
);
791 * spi_nor_write_sr() - Write the Status Register.
792 * @nor: pointer to 'struct spi_nor'.
793 * @sr: pointer to DMA-able buffer to write to the Status Register.
794 * @len: number of bytes to write to the Status Register.
796 * Return: 0 on success, -errno otherwise.
798 int spi_nor_write_sr(struct spi_nor
*nor
, const u8
*sr
, size_t len
)
802 ret
= spi_nor_write_enable(nor
);
807 struct spi_mem_op op
= SPI_NOR_WRSR_OP(sr
, len
);
809 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
811 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
813 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR
, sr
,
818 dev_dbg(nor
->dev
, "error %d writing SR\n", ret
);
822 return spi_nor_wait_till_ready(nor
);
826 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
827 * ensure that the byte written match the received value.
828 * @nor: pointer to a 'struct spi_nor'.
829 * @sr1: byte value to be written to the Status Register.
831 * Return: 0 on success, -errno otherwise.
833 static int spi_nor_write_sr1_and_check(struct spi_nor
*nor
, u8 sr1
)
837 nor
->bouncebuf
[0] = sr1
;
839 ret
= spi_nor_write_sr(nor
, nor
->bouncebuf
, 1);
843 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
847 if (nor
->bouncebuf
[0] != sr1
) {
848 dev_dbg(nor
->dev
, "SR1: read back test failed\n");
856 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
857 * Status Register 2 in one shot. Ensure that the byte written in the Status
858 * Register 1 match the received value, and that the 16-bit Write did not
859 * affect what was already in the Status Register 2.
860 * @nor: pointer to a 'struct spi_nor'.
861 * @sr1: byte value to be written to the Status Register 1.
863 * Return: 0 on success, -errno otherwise.
865 static int spi_nor_write_16bit_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
868 u8
*sr_cr
= nor
->bouncebuf
;
871 /* Make sure we don't overwrite the contents of Status Register 2. */
872 if (!(nor
->flags
& SNOR_F_NO_READ_CR
)) {
873 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
876 } else if (spi_nor_get_protocol_width(nor
->read_proto
) == 4 &&
877 spi_nor_get_protocol_width(nor
->write_proto
) == 4 &&
878 nor
->params
->quad_enable
) {
880 * If the Status Register 2 Read command (35h) is not
881 * supported, we should at least be sure we don't
882 * change the value of the SR2 Quad Enable bit.
884 * When the Quad Enable method is set and the buswidth is 4, we
885 * can safely assume that the value of the QE bit is one, as a
886 * consequence of the nor->params->quad_enable() call.
888 * According to the JESD216 revB standard, BFPT DWORDS[15],
889 * bits 22:20, the 16-bit Write Status (01h) command is
890 * available just for the cases in which the QE bit is
891 * described in SR2 at BIT(1).
893 sr_cr
[1] = SR2_QUAD_EN_BIT1
;
900 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
904 ret
= spi_nor_read_sr(nor
, sr_cr
);
908 if (sr1
!= sr_cr
[0]) {
909 dev_dbg(nor
->dev
, "SR: Read back test failed\n");
913 if (nor
->flags
& SNOR_F_NO_READ_CR
)
916 cr_written
= sr_cr
[1];
918 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
922 if (cr_written
!= sr_cr
[1]) {
923 dev_dbg(nor
->dev
, "CR: read back test failed\n");
931 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
932 * Configuration Register in one shot. Ensure that the byte written in the
933 * Configuration Register match the received value, and that the 16-bit Write
934 * did not affect what was already in the Status Register 1.
935 * @nor: pointer to a 'struct spi_nor'.
936 * @cr: byte value to be written to the Configuration Register.
938 * Return: 0 on success, -errno otherwise.
940 int spi_nor_write_16bit_cr_and_check(struct spi_nor
*nor
, u8 cr
)
943 u8
*sr_cr
= nor
->bouncebuf
;
946 /* Keep the current value of the Status Register 1. */
947 ret
= spi_nor_read_sr(nor
, sr_cr
);
953 ret
= spi_nor_write_sr(nor
, sr_cr
, 2);
957 sr_written
= sr_cr
[0];
959 ret
= spi_nor_read_sr(nor
, sr_cr
);
963 if (sr_written
!= sr_cr
[0]) {
964 dev_dbg(nor
->dev
, "SR: Read back test failed\n");
968 if (nor
->flags
& SNOR_F_NO_READ_CR
)
971 ret
= spi_nor_read_cr(nor
, &sr_cr
[1]);
975 if (cr
!= sr_cr
[1]) {
976 dev_dbg(nor
->dev
, "CR: read back test failed\n");
984 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
985 * the byte written match the received value without affecting other bits in the
986 * Status Register 1 and 2.
987 * @nor: pointer to a 'struct spi_nor'.
988 * @sr1: byte value to be written to the Status Register.
990 * Return: 0 on success, -errno otherwise.
992 int spi_nor_write_sr_and_check(struct spi_nor
*nor
, u8 sr1
)
994 if (nor
->flags
& SNOR_F_HAS_16BIT_SR
)
995 return spi_nor_write_16bit_sr_and_check(nor
, sr1
);
997 return spi_nor_write_sr1_and_check(nor
, sr1
);
1001 * spi_nor_write_sr2() - Write the Status Register 2 using the
1002 * SPINOR_OP_WRSR2 (3eh) command.
1003 * @nor: pointer to 'struct spi_nor'.
1004 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
1006 * Return: 0 on success, -errno otherwise.
1008 static int spi_nor_write_sr2(struct spi_nor
*nor
, const u8
*sr2
)
1012 ret
= spi_nor_write_enable(nor
);
1017 struct spi_mem_op op
= SPI_NOR_WRSR2_OP(sr2
);
1019 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1021 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1023 ret
= spi_nor_controller_ops_write_reg(nor
, SPINOR_OP_WRSR2
,
1028 dev_dbg(nor
->dev
, "error %d writing SR2\n", ret
);
1032 return spi_nor_wait_till_ready(nor
);
1036 * spi_nor_read_sr2() - Read the Status Register 2 using the
1037 * SPINOR_OP_RDSR2 (3fh) command.
1038 * @nor: pointer to 'struct spi_nor'.
1039 * @sr2: pointer to DMA-able buffer where the value of the
1040 * Status Register 2 will be written.
1042 * Return: 0 on success, -errno otherwise.
1044 static int spi_nor_read_sr2(struct spi_nor
*nor
, u8
*sr2
)
1049 struct spi_mem_op op
= SPI_NOR_RDSR2_OP(sr2
);
1051 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1053 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1055 ret
= spi_nor_controller_ops_read_reg(nor
, SPINOR_OP_RDSR2
, sr2
,
1060 dev_dbg(nor
->dev
, "error %d reading SR2\n", ret
);
1066 * spi_nor_erase_die() - Erase the entire die.
1067 * @nor: pointer to 'struct spi_nor'.
1068 * @addr: address of the die.
1069 * @die_size: size of the die.
1071 * Return: 0 on success, -errno otherwise.
1073 static int spi_nor_erase_die(struct spi_nor
*nor
, loff_t addr
, size_t die_size
)
1075 bool multi_die
= nor
->mtd
.size
!= die_size
;
1078 dev_dbg(nor
->dev
, " %lldKiB\n", (long long)(die_size
>> 10));
1081 struct spi_mem_op op
=
1082 SPI_NOR_DIE_ERASE_OP(nor
->params
->die_erase_opcode
,
1083 nor
->addr_nbytes
, addr
, multi_die
);
1085 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1087 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
1092 ret
= spi_nor_controller_ops_write_reg(nor
,
1093 SPINOR_OP_CHIP_ERASE
,
1098 dev_dbg(nor
->dev
, "error %d erasing chip\n", ret
);
1103 static u8
spi_nor_convert_opcode(u8 opcode
, const u8 table
[][2], size_t size
)
1107 for (i
= 0; i
< size
; i
++)
1108 if (table
[i
][0] == opcode
)
1111 /* No conversion found, keep input op code. */
1115 u8
spi_nor_convert_3to4_read(u8 opcode
)
1117 static const u8 spi_nor_3to4_read
[][2] = {
1118 { SPINOR_OP_READ
, SPINOR_OP_READ_4B
},
1119 { SPINOR_OP_READ_FAST
, SPINOR_OP_READ_FAST_4B
},
1120 { SPINOR_OP_READ_1_1_2
, SPINOR_OP_READ_1_1_2_4B
},
1121 { SPINOR_OP_READ_1_2_2
, SPINOR_OP_READ_1_2_2_4B
},
1122 { SPINOR_OP_READ_1_1_4
, SPINOR_OP_READ_1_1_4_4B
},
1123 { SPINOR_OP_READ_1_4_4
, SPINOR_OP_READ_1_4_4_4B
},
1124 { SPINOR_OP_READ_1_1_8
, SPINOR_OP_READ_1_1_8_4B
},
1125 { SPINOR_OP_READ_1_8_8
, SPINOR_OP_READ_1_8_8_4B
},
1127 { SPINOR_OP_READ_1_1_1_DTR
, SPINOR_OP_READ_1_1_1_DTR_4B
},
1128 { SPINOR_OP_READ_1_2_2_DTR
, SPINOR_OP_READ_1_2_2_DTR_4B
},
1129 { SPINOR_OP_READ_1_4_4_DTR
, SPINOR_OP_READ_1_4_4_DTR_4B
},
1132 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_read
,
1133 ARRAY_SIZE(spi_nor_3to4_read
));
1136 static u8
spi_nor_convert_3to4_program(u8 opcode
)
1138 static const u8 spi_nor_3to4_program
[][2] = {
1139 { SPINOR_OP_PP
, SPINOR_OP_PP_4B
},
1140 { SPINOR_OP_PP_1_1_4
, SPINOR_OP_PP_1_1_4_4B
},
1141 { SPINOR_OP_PP_1_4_4
, SPINOR_OP_PP_1_4_4_4B
},
1142 { SPINOR_OP_PP_1_1_8
, SPINOR_OP_PP_1_1_8_4B
},
1143 { SPINOR_OP_PP_1_8_8
, SPINOR_OP_PP_1_8_8_4B
},
1146 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_program
,
1147 ARRAY_SIZE(spi_nor_3to4_program
));
1150 static u8
spi_nor_convert_3to4_erase(u8 opcode
)
1152 static const u8 spi_nor_3to4_erase
[][2] = {
1153 { SPINOR_OP_BE_4K
, SPINOR_OP_BE_4K_4B
},
1154 { SPINOR_OP_BE_32K
, SPINOR_OP_BE_32K_4B
},
1155 { SPINOR_OP_SE
, SPINOR_OP_SE_4B
},
1158 return spi_nor_convert_opcode(opcode
, spi_nor_3to4_erase
,
1159 ARRAY_SIZE(spi_nor_3to4_erase
));
1162 static bool spi_nor_has_uniform_erase(const struct spi_nor
*nor
)
1164 return !!nor
->params
->erase_map
.uniform_region
.erase_mask
;
1167 static void spi_nor_set_4byte_opcodes(struct spi_nor
*nor
)
1169 nor
->read_opcode
= spi_nor_convert_3to4_read(nor
->read_opcode
);
1170 nor
->program_opcode
= spi_nor_convert_3to4_program(nor
->program_opcode
);
1171 nor
->erase_opcode
= spi_nor_convert_3to4_erase(nor
->erase_opcode
);
1173 if (!spi_nor_has_uniform_erase(nor
)) {
1174 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1175 struct spi_nor_erase_type
*erase
;
1178 for (i
= 0; i
< SNOR_ERASE_TYPE_MAX
; i
++) {
1179 erase
= &map
->erase_type
[i
];
1181 spi_nor_convert_3to4_erase(erase
->opcode
);
1186 static int spi_nor_prep(struct spi_nor
*nor
)
1190 if (nor
->controller_ops
&& nor
->controller_ops
->prepare
)
1191 ret
= nor
->controller_ops
->prepare(nor
);
1196 static void spi_nor_unprep(struct spi_nor
*nor
)
1198 if (nor
->controller_ops
&& nor
->controller_ops
->unprepare
)
1199 nor
->controller_ops
->unprepare(nor
);
1202 static void spi_nor_offset_to_banks(u64 bank_size
, loff_t start
, size_t len
,
1203 u8
*first
, u8
*last
)
1205 /* This is currently safe, the number of banks being very small */
1206 *first
= DIV_ROUND_DOWN_ULL(start
, bank_size
);
1207 *last
= DIV_ROUND_DOWN_ULL(start
+ len
- 1, bank_size
);
1210 /* Generic helpers for internal locking and serialization */
1211 static bool spi_nor_rww_start_io(struct spi_nor
*nor
)
1213 struct spi_nor_rww
*rww
= &nor
->rww
;
1216 mutex_lock(&nor
->lock
);
1218 if (rww
->ongoing_io
)
1221 rww
->ongoing_io
= true;
1225 mutex_unlock(&nor
->lock
);
1229 static void spi_nor_rww_end_io(struct spi_nor
*nor
)
1231 mutex_lock(&nor
->lock
);
1232 nor
->rww
.ongoing_io
= false;
1233 mutex_unlock(&nor
->lock
);
1236 static int spi_nor_lock_device(struct spi_nor
*nor
)
1238 if (!spi_nor_use_parallel_locking(nor
))
1241 return wait_event_killable(nor
->rww
.wait
, spi_nor_rww_start_io(nor
));
1244 static void spi_nor_unlock_device(struct spi_nor
*nor
)
1246 if (spi_nor_use_parallel_locking(nor
)) {
1247 spi_nor_rww_end_io(nor
);
1248 wake_up(&nor
->rww
.wait
);
1252 /* Generic helpers for internal locking and serialization */
1253 static bool spi_nor_rww_start_exclusive(struct spi_nor
*nor
)
1255 struct spi_nor_rww
*rww
= &nor
->rww
;
1258 mutex_lock(&nor
->lock
);
1260 if (rww
->ongoing_io
|| rww
->ongoing_rd
|| rww
->ongoing_pe
)
1263 rww
->ongoing_io
= true;
1264 rww
->ongoing_rd
= true;
1265 rww
->ongoing_pe
= true;
1269 mutex_unlock(&nor
->lock
);
1273 static void spi_nor_rww_end_exclusive(struct spi_nor
*nor
)
1275 struct spi_nor_rww
*rww
= &nor
->rww
;
1277 mutex_lock(&nor
->lock
);
1278 rww
->ongoing_io
= false;
1279 rww
->ongoing_rd
= false;
1280 rww
->ongoing_pe
= false;
1281 mutex_unlock(&nor
->lock
);
1284 int spi_nor_prep_and_lock(struct spi_nor
*nor
)
1288 ret
= spi_nor_prep(nor
);
1292 if (!spi_nor_use_parallel_locking(nor
))
1293 mutex_lock(&nor
->lock
);
1295 ret
= wait_event_killable(nor
->rww
.wait
,
1296 spi_nor_rww_start_exclusive(nor
));
1301 void spi_nor_unlock_and_unprep(struct spi_nor
*nor
)
1303 if (!spi_nor_use_parallel_locking(nor
)) {
1304 mutex_unlock(&nor
->lock
);
1306 spi_nor_rww_end_exclusive(nor
);
1307 wake_up(&nor
->rww
.wait
);
1310 spi_nor_unprep(nor
);
1313 /* Internal locking helpers for program and erase operations */
1314 static bool spi_nor_rww_start_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1316 struct spi_nor_rww
*rww
= &nor
->rww
;
1317 unsigned int used_banks
= 0;
1318 bool started
= false;
1322 mutex_lock(&nor
->lock
);
1324 if (rww
->ongoing_io
|| rww
->ongoing_rd
|| rww
->ongoing_pe
)
1327 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1328 for (bank
= first
; bank
<= last
; bank
++) {
1329 if (rww
->used_banks
& BIT(bank
))
1332 used_banks
|= BIT(bank
);
1335 rww
->used_banks
|= used_banks
;
1336 rww
->ongoing_pe
= true;
1340 mutex_unlock(&nor
->lock
);
1344 static void spi_nor_rww_end_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1346 struct spi_nor_rww
*rww
= &nor
->rww
;
1350 mutex_lock(&nor
->lock
);
1352 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1353 for (bank
= first
; bank
<= last
; bank
++)
1354 rww
->used_banks
&= ~BIT(bank
);
1356 rww
->ongoing_pe
= false;
1358 mutex_unlock(&nor
->lock
);
1361 static int spi_nor_prep_and_lock_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1365 ret
= spi_nor_prep(nor
);
1369 if (!spi_nor_use_parallel_locking(nor
))
1370 mutex_lock(&nor
->lock
);
1372 ret
= wait_event_killable(nor
->rww
.wait
,
1373 spi_nor_rww_start_pe(nor
, start
, len
));
1378 static void spi_nor_unlock_and_unprep_pe(struct spi_nor
*nor
, loff_t start
, size_t len
)
1380 if (!spi_nor_use_parallel_locking(nor
)) {
1381 mutex_unlock(&nor
->lock
);
1383 spi_nor_rww_end_pe(nor
, start
, len
);
1384 wake_up(&nor
->rww
.wait
);
1387 spi_nor_unprep(nor
);
1390 /* Internal locking helpers for read operations */
1391 static bool spi_nor_rww_start_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1393 struct spi_nor_rww
*rww
= &nor
->rww
;
1394 unsigned int used_banks
= 0;
1395 bool started
= false;
1399 mutex_lock(&nor
->lock
);
1401 if (rww
->ongoing_io
|| rww
->ongoing_rd
)
1404 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1405 for (bank
= first
; bank
<= last
; bank
++) {
1406 if (rww
->used_banks
& BIT(bank
))
1409 used_banks
|= BIT(bank
);
1412 rww
->used_banks
|= used_banks
;
1413 rww
->ongoing_io
= true;
1414 rww
->ongoing_rd
= true;
1418 mutex_unlock(&nor
->lock
);
1422 static void spi_nor_rww_end_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1424 struct spi_nor_rww
*rww
= &nor
->rww
;
1428 mutex_lock(&nor
->lock
);
1430 spi_nor_offset_to_banks(nor
->params
->bank_size
, start
, len
, &first
, &last
);
1431 for (bank
= first
; bank
<= last
; bank
++)
1432 nor
->rww
.used_banks
&= ~BIT(bank
);
1434 rww
->ongoing_io
= false;
1435 rww
->ongoing_rd
= false;
1437 mutex_unlock(&nor
->lock
);
1440 static int spi_nor_prep_and_lock_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1444 ret
= spi_nor_prep(nor
);
1448 if (!spi_nor_use_parallel_locking(nor
))
1449 mutex_lock(&nor
->lock
);
1451 ret
= wait_event_killable(nor
->rww
.wait
,
1452 spi_nor_rww_start_rd(nor
, start
, len
));
1457 static void spi_nor_unlock_and_unprep_rd(struct spi_nor
*nor
, loff_t start
, size_t len
)
1459 if (!spi_nor_use_parallel_locking(nor
)) {
1460 mutex_unlock(&nor
->lock
);
1462 spi_nor_rww_end_rd(nor
, start
, len
);
1463 wake_up(&nor
->rww
.wait
);
1466 spi_nor_unprep(nor
);
1470 * Initiate the erasure of a single sector
1472 int spi_nor_erase_sector(struct spi_nor
*nor
, u32 addr
)
1477 struct spi_mem_op op
=
1478 SPI_NOR_SECTOR_ERASE_OP(nor
->erase_opcode
,
1479 nor
->addr_nbytes
, addr
);
1481 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
1483 return spi_mem_exec_op(nor
->spimem
, &op
);
1484 } else if (nor
->controller_ops
->erase
) {
1485 return spi_nor_controller_ops_erase(nor
, addr
);
1489 * Default implementation, if driver doesn't have a specialized HW
1492 for (i
= nor
->addr_nbytes
- 1; i
>= 0; i
--) {
1493 nor
->bouncebuf
[i
] = addr
& 0xff;
1497 return spi_nor_controller_ops_write_reg(nor
, nor
->erase_opcode
,
1498 nor
->bouncebuf
, nor
->addr_nbytes
);
1502 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1503 * @erase: pointer to a structure that describes a SPI NOR erase type
1504 * @dividend: dividend value
1505 * @remainder: pointer to u32 remainder (will be updated)
1507 * Return: the result of the division
1509 static u64
spi_nor_div_by_erase_size(const struct spi_nor_erase_type
*erase
,
1510 u64 dividend
, u32
*remainder
)
1512 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1513 *remainder
= (u32
)dividend
& erase
->size_mask
;
1514 return dividend
>> erase
->size_shift
;
1518 * spi_nor_find_best_erase_type() - find the best erase type for the given
1519 * offset in the serial flash memory and the
1520 * number of bytes to erase. The region in
1521 * which the address fits is expected to be
1523 * @map: the erase map of the SPI NOR
1524 * @region: pointer to a structure that describes a SPI NOR erase region
1525 * @addr: offset in the serial flash memory
1526 * @len: number of bytes to erase
1528 * Return: a pointer to the best fitted erase type, NULL otherwise.
1530 static const struct spi_nor_erase_type
*
1531 spi_nor_find_best_erase_type(const struct spi_nor_erase_map
*map
,
1532 const struct spi_nor_erase_region
*region
,
1535 const struct spi_nor_erase_type
*erase
;
1540 * Erase types are ordered by size, with the smallest erase type at
1543 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
1544 /* Does the erase region support the tested erase type? */
1545 if (!(region
->erase_mask
& BIT(i
)))
1548 erase
= &map
->erase_type
[i
];
1552 /* Alignment is not mandatory for overlaid regions */
1553 if (region
->overlaid
&& region
->size
<= len
)
1556 /* Don't erase more than what the user has asked for. */
1557 if (erase
->size
> len
)
1560 spi_nor_div_by_erase_size(erase
, addr
, &rem
);
1569 * spi_nor_init_erase_cmd() - initialize an erase command
1570 * @region: pointer to a structure that describes a SPI NOR erase region
1571 * @erase: pointer to a structure that describes a SPI NOR erase type
1573 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1576 static struct spi_nor_erase_command
*
1577 spi_nor_init_erase_cmd(const struct spi_nor_erase_region
*region
,
1578 const struct spi_nor_erase_type
*erase
)
1580 struct spi_nor_erase_command
*cmd
;
1582 cmd
= kmalloc(sizeof(*cmd
), GFP_KERNEL
);
1584 return ERR_PTR(-ENOMEM
);
1586 INIT_LIST_HEAD(&cmd
->list
);
1587 cmd
->opcode
= erase
->opcode
;
1590 if (region
->overlaid
)
1591 cmd
->size
= region
->size
;
1593 cmd
->size
= erase
->size
;
1599 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1600 * @erase_list: list of erase commands
1602 static void spi_nor_destroy_erase_cmd_list(struct list_head
*erase_list
)
1604 struct spi_nor_erase_command
*cmd
, *next
;
1606 list_for_each_entry_safe(cmd
, next
, erase_list
, list
) {
1607 list_del(&cmd
->list
);
1613 * spi_nor_init_erase_cmd_list() - initialize erase command list
1614 * @nor: pointer to a 'struct spi_nor'
1615 * @erase_list: list of erase commands to be executed once we validate that the
1616 * erase can be performed
1617 * @addr: offset in the serial flash memory
1618 * @len: number of bytes to erase
1620 * Builds the list of best fitted erase commands and verifies if the erase can
1623 * Return: 0 on success, -errno otherwise.
1625 static int spi_nor_init_erase_cmd_list(struct spi_nor
*nor
,
1626 struct list_head
*erase_list
,
1629 const struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
1630 const struct spi_nor_erase_type
*erase
, *prev_erase
= NULL
;
1631 struct spi_nor_erase_region
*region
;
1632 struct spi_nor_erase_command
*cmd
= NULL
;
1637 for (i
= 0; i
< map
->n_regions
&& len
; i
++) {
1638 region
= &map
->regions
[i
];
1639 region_end
= region
->offset
+ region
->size
;
1641 while (len
&& addr
>= region
->offset
&& addr
< region_end
) {
1642 erase
= spi_nor_find_best_erase_type(map
, region
, addr
,
1645 goto destroy_erase_cmd_list
;
1647 if (prev_erase
!= erase
|| erase
->size
!= cmd
->size
||
1649 cmd
= spi_nor_init_erase_cmd(region
, erase
);
1652 goto destroy_erase_cmd_list
;
1655 list_add_tail(&cmd
->list
, erase_list
);
1668 destroy_erase_cmd_list
:
1669 spi_nor_destroy_erase_cmd_list(erase_list
);
1674 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1675 * @nor: pointer to a 'struct spi_nor'
1676 * @addr: offset in the serial flash memory
1677 * @len: number of bytes to erase
1679 * Build a list of best fitted erase commands and execute it once we validate
1680 * that the erase can be performed.
1682 * Return: 0 on success, -errno otherwise.
1684 static int spi_nor_erase_multi_sectors(struct spi_nor
*nor
, u64 addr
, u32 len
)
1686 LIST_HEAD(erase_list
);
1687 struct spi_nor_erase_command
*cmd
, *next
;
1690 ret
= spi_nor_init_erase_cmd_list(nor
, &erase_list
, addr
, len
);
1694 list_for_each_entry_safe(cmd
, next
, &erase_list
, list
) {
1695 nor
->erase_opcode
= cmd
->opcode
;
1696 while (cmd
->count
) {
1697 dev_vdbg(nor
->dev
, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
1698 cmd
->size
, cmd
->opcode
, cmd
->count
);
1700 ret
= spi_nor_lock_device(nor
);
1702 goto destroy_erase_cmd_list
;
1704 ret
= spi_nor_write_enable(nor
);
1706 spi_nor_unlock_device(nor
);
1707 goto destroy_erase_cmd_list
;
1710 ret
= spi_nor_erase_sector(nor
, addr
);
1711 spi_nor_unlock_device(nor
);
1713 goto destroy_erase_cmd_list
;
1715 ret
= spi_nor_wait_till_ready(nor
);
1717 goto destroy_erase_cmd_list
;
1722 list_del(&cmd
->list
);
1728 destroy_erase_cmd_list
:
1729 spi_nor_destroy_erase_cmd_list(&erase_list
);
1733 static int spi_nor_erase_dice(struct spi_nor
*nor
, loff_t addr
,
1734 size_t len
, size_t die_size
)
1736 unsigned long timeout
;
1740 * Scale the timeout linearly with the size of the flash, with
1741 * a minimum calibrated to an old 2MB flash. We could try to
1742 * pull these from CFI/SFDP, but these values should be good
1745 timeout
= max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES
,
1746 CHIP_ERASE_2MB_READY_WAIT_JIFFIES
*
1747 (unsigned long)(nor
->mtd
.size
/ SZ_2M
));
1750 ret
= spi_nor_lock_device(nor
);
1754 ret
= spi_nor_write_enable(nor
);
1756 spi_nor_unlock_device(nor
);
1760 ret
= spi_nor_erase_die(nor
, addr
, die_size
);
1762 spi_nor_unlock_device(nor
);
1766 ret
= spi_nor_wait_till_ready_with_timeout(nor
, timeout
);
1779 * Erase an address range on the nor chip. The address range may extend
1780 * one or more erase sectors. Return an error if there is a problem erasing.
1782 static int spi_nor_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1784 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
1785 u8 n_dice
= nor
->params
->n_dice
;
1786 bool multi_die_erase
= false;
1791 dev_dbg(nor
->dev
, "at 0x%llx, len %lld\n", (long long)instr
->addr
,
1792 (long long)instr
->len
);
1794 if (spi_nor_has_uniform_erase(nor
)) {
1795 div_u64_rem(instr
->len
, mtd
->erasesize
, &rem
);
1804 die_size
= div_u64(mtd
->size
, n_dice
);
1805 if (!(len
& (die_size
- 1)) && !(addr
& (die_size
- 1)))
1806 multi_die_erase
= true;
1808 die_size
= mtd
->size
;
1811 ret
= spi_nor_prep_and_lock_pe(nor
, instr
->addr
, instr
->len
);
1815 /* chip (die) erase? */
1816 if ((len
== mtd
->size
&& !(nor
->flags
& SNOR_F_NO_OP_CHIP_ERASE
)) ||
1818 ret
= spi_nor_erase_dice(nor
, addr
, len
, die_size
);
1822 /* REVISIT in some cases we could speed up erasing large regions
1823 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1824 * to use "small sector erase", but that's not always optimal.
1827 /* "sector"-at-a-time erase */
1828 } else if (spi_nor_has_uniform_erase(nor
)) {
1830 ret
= spi_nor_lock_device(nor
);
1834 ret
= spi_nor_write_enable(nor
);
1836 spi_nor_unlock_device(nor
);
1840 ret
= spi_nor_erase_sector(nor
, addr
);
1841 spi_nor_unlock_device(nor
);
1845 ret
= spi_nor_wait_till_ready(nor
);
1849 addr
+= mtd
->erasesize
;
1850 len
-= mtd
->erasesize
;
1853 /* erase multiple sectors */
1855 ret
= spi_nor_erase_multi_sectors(nor
, addr
, len
);
1860 ret
= spi_nor_write_disable(nor
);
1863 spi_nor_unlock_and_unprep_pe(nor
, instr
->addr
, instr
->len
);
1869 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1871 * @nor: pointer to a 'struct spi_nor'
1873 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1875 * Return: 0 on success, -errno otherwise.
1877 int spi_nor_sr1_bit6_quad_enable(struct spi_nor
*nor
)
1881 ret
= spi_nor_read_sr(nor
, nor
->bouncebuf
);
1885 if (nor
->bouncebuf
[0] & SR1_QUAD_EN_BIT6
)
1888 nor
->bouncebuf
[0] |= SR1_QUAD_EN_BIT6
;
1890 return spi_nor_write_sr1_and_check(nor
, nor
->bouncebuf
[0]);
1894 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1896 * @nor: pointer to a 'struct spi_nor'.
1898 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1900 * Return: 0 on success, -errno otherwise.
1902 int spi_nor_sr2_bit1_quad_enable(struct spi_nor
*nor
)
1906 if (nor
->flags
& SNOR_F_NO_READ_CR
)
1907 return spi_nor_write_16bit_cr_and_check(nor
, SR2_QUAD_EN_BIT1
);
1909 ret
= spi_nor_read_cr(nor
, nor
->bouncebuf
);
1913 if (nor
->bouncebuf
[0] & SR2_QUAD_EN_BIT1
)
1916 nor
->bouncebuf
[0] |= SR2_QUAD_EN_BIT1
;
1918 return spi_nor_write_16bit_cr_and_check(nor
, nor
->bouncebuf
[0]);
1922 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1923 * @nor: pointer to a 'struct spi_nor'
1925 * Set the Quad Enable (QE) bit in the Status Register 2.
1927 * This is one of the procedures to set the QE bit described in the SFDP
1928 * (JESD216 rev B) specification but no manufacturer using this procedure has
1929 * been identified yet, hence the name of the function.
1931 * Return: 0 on success, -errno otherwise.
1933 int spi_nor_sr2_bit7_quad_enable(struct spi_nor
*nor
)
1935 u8
*sr2
= nor
->bouncebuf
;
1939 /* Check current Quad Enable bit value. */
1940 ret
= spi_nor_read_sr2(nor
, sr2
);
1943 if (*sr2
& SR2_QUAD_EN_BIT7
)
1946 /* Update the Quad Enable bit. */
1947 *sr2
|= SR2_QUAD_EN_BIT7
;
1949 ret
= spi_nor_write_sr2(nor
, sr2
);
1955 /* Read back and check it. */
1956 ret
= spi_nor_read_sr2(nor
, sr2
);
1960 if (*sr2
!= sr2_written
) {
1961 dev_dbg(nor
->dev
, "SR2: Read back test failed\n");
1968 static const struct spi_nor_manufacturer
*manufacturers
[] = {
1973 &spi_nor_gigadevice
,
1985 static const struct flash_info spi_nor_generic_flash
= {
1986 .name
= "spi-nor-generic",
1989 static const struct flash_info
*spi_nor_match_id(struct spi_nor
*nor
,
1992 const struct flash_info
*part
;
1995 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
1996 for (j
= 0; j
< manufacturers
[i
]->nparts
; j
++) {
1997 part
= &manufacturers
[i
]->parts
[j
];
1999 !memcmp(part
->id
->bytes
, id
, part
->id
->len
)) {
2000 nor
->manufacturer
= manufacturers
[i
];
2009 static const struct flash_info
*spi_nor_detect(struct spi_nor
*nor
)
2011 const struct flash_info
*info
;
2012 u8
*id
= nor
->bouncebuf
;
2015 ret
= spi_nor_read_id(nor
, 0, 0, id
, nor
->reg_proto
);
2017 dev_dbg(nor
->dev
, "error %d reading JEDEC ID\n", ret
);
2018 return ERR_PTR(ret
);
2021 /* Cache the complete flash ID. */
2022 nor
->id
= devm_kmemdup(nor
->dev
, id
, SPI_NOR_MAX_ID_LEN
, GFP_KERNEL
);
2024 return ERR_PTR(-ENOMEM
);
2026 info
= spi_nor_match_id(nor
, id
);
2028 /* Fallback to a generic flash described only by its SFDP data. */
2030 ret
= spi_nor_check_sfdp_signature(nor
);
2032 info
= &spi_nor_generic_flash
;
2036 dev_err(nor
->dev
, "unrecognized JEDEC id bytes: %*ph\n",
2037 SPI_NOR_MAX_ID_LEN
, id
);
2038 return ERR_PTR(-ENODEV
);
2043 static int spi_nor_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2044 size_t *retlen
, u_char
*buf
)
2046 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2047 loff_t from_lock
= from
;
2048 size_t len_lock
= len
;
2051 dev_dbg(nor
->dev
, "from 0x%08x, len %zd\n", (u32
)from
, len
);
2053 ret
= spi_nor_prep_and_lock_rd(nor
, from_lock
, len_lock
);
2060 ret
= spi_nor_read_data(nor
, addr
, len
, buf
);
2062 /* We shouldn't see 0-length reads */
2078 spi_nor_unlock_and_unprep_rd(nor
, from_lock
, len_lock
);
2084 * Write an address range to the nor chip. Data must be written in
2085 * FLASH_PAGESIZE chunks. The address range may be any size provided
2086 * it is within the physical boundaries.
2088 static int spi_nor_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
2089 size_t *retlen
, const u_char
*buf
)
2091 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
2094 u32 page_size
= nor
->params
->page_size
;
2096 dev_dbg(nor
->dev
, "to 0x%08x, len %zd\n", (u32
)to
, len
);
2098 ret
= spi_nor_prep_and_lock_pe(nor
, to
, len
);
2102 for (i
= 0; i
< len
; ) {
2104 loff_t addr
= to
+ i
;
2105 size_t page_offset
= addr
& (page_size
- 1);
2106 /* the size of data remaining on the first page */
2107 size_t page_remain
= min_t(size_t, page_size
- page_offset
, len
- i
);
2109 ret
= spi_nor_lock_device(nor
);
2113 ret
= spi_nor_write_enable(nor
);
2115 spi_nor_unlock_device(nor
);
2119 ret
= spi_nor_write_data(nor
, addr
, page_remain
, buf
+ i
);
2120 spi_nor_unlock_device(nor
);
2125 ret
= spi_nor_wait_till_ready(nor
);
2133 spi_nor_unlock_and_unprep_pe(nor
, to
, len
);
2138 static int spi_nor_check(struct spi_nor
*nor
)
2141 (!nor
->spimem
&& !nor
->controller_ops
) ||
2142 (!nor
->spimem
&& nor
->controller_ops
&&
2143 (!nor
->controller_ops
->read
||
2144 !nor
->controller_ops
->write
||
2145 !nor
->controller_ops
->read_reg
||
2146 !nor
->controller_ops
->write_reg
))) {
2147 pr_err("spi-nor: please fill all the necessary fields!\n");
2151 if (nor
->spimem
&& nor
->controller_ops
) {
2152 dev_err(nor
->dev
, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2160 spi_nor_set_read_settings(struct spi_nor_read_command
*read
,
2164 enum spi_nor_protocol proto
)
2166 read
->num_mode_clocks
= num_mode_clocks
;
2167 read
->num_wait_states
= num_wait_states
;
2168 read
->opcode
= opcode
;
2169 read
->proto
= proto
;
2172 void spi_nor_set_pp_settings(struct spi_nor_pp_command
*pp
, u8 opcode
,
2173 enum spi_nor_protocol proto
)
2175 pp
->opcode
= opcode
;
2179 static int spi_nor_hwcaps2cmd(u32 hwcaps
, const int table
[][2], size_t size
)
2183 for (i
= 0; i
< size
; i
++)
2184 if (table
[i
][0] == (int)hwcaps
)
2190 int spi_nor_hwcaps_read2cmd(u32 hwcaps
)
2192 static const int hwcaps_read2cmd
[][2] = {
2193 { SNOR_HWCAPS_READ
, SNOR_CMD_READ
},
2194 { SNOR_HWCAPS_READ_FAST
, SNOR_CMD_READ_FAST
},
2195 { SNOR_HWCAPS_READ_1_1_1_DTR
, SNOR_CMD_READ_1_1_1_DTR
},
2196 { SNOR_HWCAPS_READ_1_1_2
, SNOR_CMD_READ_1_1_2
},
2197 { SNOR_HWCAPS_READ_1_2_2
, SNOR_CMD_READ_1_2_2
},
2198 { SNOR_HWCAPS_READ_2_2_2
, SNOR_CMD_READ_2_2_2
},
2199 { SNOR_HWCAPS_READ_1_2_2_DTR
, SNOR_CMD_READ_1_2_2_DTR
},
2200 { SNOR_HWCAPS_READ_1_1_4
, SNOR_CMD_READ_1_1_4
},
2201 { SNOR_HWCAPS_READ_1_4_4
, SNOR_CMD_READ_1_4_4
},
2202 { SNOR_HWCAPS_READ_4_4_4
, SNOR_CMD_READ_4_4_4
},
2203 { SNOR_HWCAPS_READ_1_4_4_DTR
, SNOR_CMD_READ_1_4_4_DTR
},
2204 { SNOR_HWCAPS_READ_1_1_8
, SNOR_CMD_READ_1_1_8
},
2205 { SNOR_HWCAPS_READ_1_8_8
, SNOR_CMD_READ_1_8_8
},
2206 { SNOR_HWCAPS_READ_8_8_8
, SNOR_CMD_READ_8_8_8
},
2207 { SNOR_HWCAPS_READ_1_8_8_DTR
, SNOR_CMD_READ_1_8_8_DTR
},
2208 { SNOR_HWCAPS_READ_8_8_8_DTR
, SNOR_CMD_READ_8_8_8_DTR
},
2211 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_read2cmd
,
2212 ARRAY_SIZE(hwcaps_read2cmd
));
2215 int spi_nor_hwcaps_pp2cmd(u32 hwcaps
)
2217 static const int hwcaps_pp2cmd
[][2] = {
2218 { SNOR_HWCAPS_PP
, SNOR_CMD_PP
},
2219 { SNOR_HWCAPS_PP_1_1_4
, SNOR_CMD_PP_1_1_4
},
2220 { SNOR_HWCAPS_PP_1_4_4
, SNOR_CMD_PP_1_4_4
},
2221 { SNOR_HWCAPS_PP_4_4_4
, SNOR_CMD_PP_4_4_4
},
2222 { SNOR_HWCAPS_PP_1_1_8
, SNOR_CMD_PP_1_1_8
},
2223 { SNOR_HWCAPS_PP_1_8_8
, SNOR_CMD_PP_1_8_8
},
2224 { SNOR_HWCAPS_PP_8_8_8
, SNOR_CMD_PP_8_8_8
},
2225 { SNOR_HWCAPS_PP_8_8_8_DTR
, SNOR_CMD_PP_8_8_8_DTR
},
2228 return spi_nor_hwcaps2cmd(hwcaps
, hwcaps_pp2cmd
,
2229 ARRAY_SIZE(hwcaps_pp2cmd
));
2233 * spi_nor_spimem_check_op - check if the operation is supported
2235 *@nor: pointer to a 'struct spi_nor'
2236 *@op: pointer to op template to be checked
2238 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2240 static int spi_nor_spimem_check_op(struct spi_nor
*nor
,
2241 struct spi_mem_op
*op
)
2244 * First test with 4 address bytes. The opcode itself might
2245 * be a 3B addressing opcode but we don't care, because
2246 * SPI controller implementation should not check the opcode,
2247 * but just the sequence.
2249 op
->addr
.nbytes
= 4;
2250 if (!spi_mem_supports_op(nor
->spimem
, op
)) {
2251 if (nor
->params
->size
> SZ_16M
)
2254 /* If flash size <= 16MB, 3 address bytes are sufficient */
2255 op
->addr
.nbytes
= 3;
2256 if (!spi_mem_supports_op(nor
->spimem
, op
))
2264 * spi_nor_spimem_check_readop - check if the read op is supported
2266 *@nor: pointer to a 'struct spi_nor'
2267 *@read: pointer to op template to be checked
2269 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2271 static int spi_nor_spimem_check_readop(struct spi_nor
*nor
,
2272 const struct spi_nor_read_command
*read
)
2274 struct spi_mem_op op
= SPI_NOR_READ_OP(read
->opcode
);
2276 spi_nor_spimem_setup_op(nor
, &op
, read
->proto
);
2278 /* convert the dummy cycles to the number of bytes */
2279 op
.dummy
.nbytes
= (read
->num_mode_clocks
+ read
->num_wait_states
) *
2280 op
.dummy
.buswidth
/ 8;
2281 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
2282 op
.dummy
.nbytes
*= 2;
2284 return spi_nor_spimem_check_op(nor
, &op
);
2288 * spi_nor_spimem_check_pp - check if the page program op is supported
2290 *@nor: pointer to a 'struct spi_nor'
2291 *@pp: pointer to op template to be checked
2293 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise.
2295 static int spi_nor_spimem_check_pp(struct spi_nor
*nor
,
2296 const struct spi_nor_pp_command
*pp
)
2298 struct spi_mem_op op
= SPI_NOR_PP_OP(pp
->opcode
);
2300 spi_nor_spimem_setup_op(nor
, &op
, pp
->proto
);
2302 return spi_nor_spimem_check_op(nor
, &op
);
2306 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2307 * based on SPI controller capabilities
2308 * @nor: pointer to a 'struct spi_nor'
2309 * @hwcaps: pointer to resulting capabilities after adjusting
2310 * according to controller and flash's capability
2313 spi_nor_spimem_adjust_hwcaps(struct spi_nor
*nor
, u32
*hwcaps
)
2315 struct spi_nor_flash_parameter
*params
= nor
->params
;
2318 /* X-X-X modes are not supported yet, mask them all. */
2319 *hwcaps
&= ~SNOR_HWCAPS_X_X_X
;
2322 * If the reset line is broken, we do not want to enter a stateful
2325 if (nor
->flags
& SNOR_F_BROKEN_RESET
)
2326 *hwcaps
&= ~(SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
);
2328 for (cap
= 0; cap
< sizeof(*hwcaps
) * BITS_PER_BYTE
; cap
++) {
2331 if (!(*hwcaps
& BIT(cap
)))
2334 rdidx
= spi_nor_hwcaps_read2cmd(BIT(cap
));
2336 spi_nor_spimem_check_readop(nor
, ¶ms
->reads
[rdidx
]))
2337 *hwcaps
&= ~BIT(cap
);
2339 ppidx
= spi_nor_hwcaps_pp2cmd(BIT(cap
));
2343 if (spi_nor_spimem_check_pp(nor
,
2344 ¶ms
->page_programs
[ppidx
]))
2345 *hwcaps
&= ~BIT(cap
);
2350 * spi_nor_set_erase_type() - set a SPI NOR erase type
2351 * @erase: pointer to a structure that describes a SPI NOR erase type
2352 * @size: the size of the sector/block erased by the erase type
2353 * @opcode: the SPI command op code to erase the sector/block
2355 void spi_nor_set_erase_type(struct spi_nor_erase_type
*erase
, u32 size
,
2359 erase
->opcode
= opcode
;
2360 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2361 erase
->size_shift
= ffs(erase
->size
) - 1;
2362 erase
->size_mask
= (1 << erase
->size_shift
) - 1;
2366 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type
2367 * @erase: pointer to a structure that describes a SPI NOR erase type
2369 void spi_nor_mask_erase_type(struct spi_nor_erase_type
*erase
)
2375 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2376 * @map: the erase map of the SPI NOR
2377 * @erase_mask: bitmask encoding erase types that can erase the entire
2379 * @flash_size: the spi nor flash memory size
2381 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map
*map
,
2382 u8 erase_mask
, u64 flash_size
)
2384 map
->uniform_region
.offset
= 0;
2385 map
->uniform_region
.size
= flash_size
;
2386 map
->uniform_region
.erase_mask
= erase_mask
;
2387 map
->regions
= &map
->uniform_region
;
2391 int spi_nor_post_bfpt_fixups(struct spi_nor
*nor
,
2392 const struct sfdp_parameter_header
*bfpt_header
,
2393 const struct sfdp_bfpt
*bfpt
)
2397 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2398 nor
->manufacturer
->fixups
->post_bfpt
) {
2399 ret
= nor
->manufacturer
->fixups
->post_bfpt(nor
, bfpt_header
,
2405 if (nor
->info
->fixups
&& nor
->info
->fixups
->post_bfpt
)
2406 return nor
->info
->fixups
->post_bfpt(nor
, bfpt_header
, bfpt
);
2411 static int spi_nor_select_read(struct spi_nor
*nor
,
2414 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_READ_MASK
) - 1;
2415 const struct spi_nor_read_command
*read
;
2420 cmd
= spi_nor_hwcaps_read2cmd(BIT(best_match
));
2424 read
= &nor
->params
->reads
[cmd
];
2425 nor
->read_opcode
= read
->opcode
;
2426 nor
->read_proto
= read
->proto
;
2429 * In the SPI NOR framework, we don't need to make the difference
2430 * between mode clock cycles and wait state clock cycles.
2431 * Indeed, the value of the mode clock cycles is used by a QSPI
2432 * flash memory to know whether it should enter or leave its 0-4-4
2433 * (Continuous Read / XIP) mode.
2434 * eXecution In Place is out of the scope of the mtd sub-system.
2435 * Hence we choose to merge both mode and wait state clock cycles
2436 * into the so called dummy clock cycles.
2438 nor
->read_dummy
= read
->num_mode_clocks
+ read
->num_wait_states
;
2442 static int spi_nor_select_pp(struct spi_nor
*nor
,
2445 int cmd
, best_match
= fls(shared_hwcaps
& SNOR_HWCAPS_PP_MASK
) - 1;
2446 const struct spi_nor_pp_command
*pp
;
2451 cmd
= spi_nor_hwcaps_pp2cmd(BIT(best_match
));
2455 pp
= &nor
->params
->page_programs
[cmd
];
2456 nor
->program_opcode
= pp
->opcode
;
2457 nor
->write_proto
= pp
->proto
;
2462 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2463 * @map: the erase map of the SPI NOR
2465 * Once the optimum uniform sector erase command is found, disable all the
2468 * Return: pointer to erase type on success, NULL otherwise.
2470 static const struct spi_nor_erase_type
*
2471 spi_nor_select_uniform_erase(struct spi_nor_erase_map
*map
)
2473 const struct spi_nor_erase_type
*tested_erase
, *erase
= NULL
;
2475 u8 uniform_erase_type
= map
->uniform_region
.erase_mask
;
2478 * Search for the biggest erase size, except for when compiled
2481 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2482 if (!(uniform_erase_type
& BIT(i
)))
2485 tested_erase
= &map
->erase_type
[i
];
2487 /* Skip masked erase types. */
2488 if (!tested_erase
->size
)
2492 * If the current erase size is the 4k one, stop here,
2493 * we have found the right uniform Sector Erase command.
2495 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
) &&
2496 tested_erase
->size
== SZ_4K
) {
2497 erase
= tested_erase
;
2502 * Otherwise, the current erase size is still a valid candidate.
2503 * Select the biggest valid candidate.
2505 if (!erase
&& tested_erase
->size
)
2506 erase
= tested_erase
;
2507 /* keep iterating to find the wanted_size */
2513 /* Disable all other Sector Erase commands. */
2514 map
->uniform_region
.erase_mask
= BIT(erase
- map
->erase_type
);
2518 static int spi_nor_select_erase(struct spi_nor
*nor
)
2520 struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
2521 const struct spi_nor_erase_type
*erase
= NULL
;
2522 struct mtd_info
*mtd
= &nor
->mtd
;
2526 * The previous implementation handling Sector Erase commands assumed
2527 * that the SPI flash memory has an uniform layout then used only one
2528 * of the supported erase sizes for all Sector Erase commands.
2529 * So to be backward compatible, the new implementation also tries to
2530 * manage the SPI flash memory as uniform with a single erase sector
2531 * size, when possible.
2533 if (spi_nor_has_uniform_erase(nor
)) {
2534 erase
= spi_nor_select_uniform_erase(map
);
2537 nor
->erase_opcode
= erase
->opcode
;
2538 mtd
->erasesize
= erase
->size
;
2543 * For non-uniform SPI flash memory, set mtd->erasesize to the
2544 * maximum erase sector size. No need to set nor->erase_opcode.
2546 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
2547 if (map
->erase_type
[i
].size
) {
2548 erase
= &map
->erase_type
[i
];
2556 mtd
->erasesize
= erase
->size
;
2560 static int spi_nor_set_addr_nbytes(struct spi_nor
*nor
)
2562 if (nor
->params
->addr_nbytes
) {
2563 nor
->addr_nbytes
= nor
->params
->addr_nbytes
;
2564 } else if (nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
) {
2566 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So
2567 * in this protocol an odd addr_nbytes cannot be used because
2568 * then the address phase would only span a cycle and a half.
2569 * Half a cycle would be left over. We would then have to start
2570 * the dummy phase in the middle of a cycle and so too the data
2571 * phase, and we will end the transaction with half a cycle left
2574 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to
2575 * avoid this situation.
2577 nor
->addr_nbytes
= 4;
2578 } else if (nor
->info
->addr_nbytes
) {
2579 nor
->addr_nbytes
= nor
->info
->addr_nbytes
;
2581 nor
->addr_nbytes
= 3;
2584 if (nor
->addr_nbytes
== 3 && nor
->params
->size
> 0x1000000) {
2585 /* enable 4-byte addressing if the device exceeds 16MiB */
2586 nor
->addr_nbytes
= 4;
2589 if (nor
->addr_nbytes
> SPI_NOR_MAX_ADDR_NBYTES
) {
2590 dev_dbg(nor
->dev
, "The number of address bytes is too large: %u\n",
2595 /* Set 4byte opcodes when possible. */
2596 if (nor
->addr_nbytes
== 4 && nor
->flags
& SNOR_F_4B_OPCODES
&&
2597 !(nor
->flags
& SNOR_F_HAS_4BAIT
))
2598 spi_nor_set_4byte_opcodes(nor
);
2603 static int spi_nor_setup(struct spi_nor
*nor
,
2604 const struct spi_nor_hwcaps
*hwcaps
)
2606 struct spi_nor_flash_parameter
*params
= nor
->params
;
2607 u32 ignored_mask
, shared_mask
;
2611 * Keep only the hardware capabilities supported by both the SPI
2612 * controller and the SPI flash memory.
2614 shared_mask
= hwcaps
->mask
& params
->hwcaps
.mask
;
2618 * When called from spi_nor_probe(), all caps are set and we
2619 * need to discard some of them based on what the SPI
2620 * controller actually supports (using spi_mem_supports_op()).
2622 spi_nor_spimem_adjust_hwcaps(nor
, &shared_mask
);
2625 * SPI n-n-n protocols are not supported when the SPI
2626 * controller directly implements the spi_nor interface.
2627 * Yet another reason to switch to spi-mem.
2629 ignored_mask
= SNOR_HWCAPS_X_X_X
| SNOR_HWCAPS_X_X_X_DTR
;
2630 if (shared_mask
& ignored_mask
) {
2632 "SPI n-n-n protocols are not supported.\n");
2633 shared_mask
&= ~ignored_mask
;
2637 /* Select the (Fast) Read command. */
2638 err
= spi_nor_select_read(nor
, shared_mask
);
2641 "can't select read settings supported by both the SPI controller and memory.\n");
2645 /* Select the Page Program command. */
2646 err
= spi_nor_select_pp(nor
, shared_mask
);
2649 "can't select write settings supported by both the SPI controller and memory.\n");
2653 /* Select the Sector Erase command. */
2654 err
= spi_nor_select_erase(nor
);
2657 "can't select erase settings supported by both the SPI controller and memory.\n");
2661 return spi_nor_set_addr_nbytes(nor
);
2665 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2666 * settings based on MFR register and ->default_init() hook.
2667 * @nor: pointer to a 'struct spi_nor'.
2669 static void spi_nor_manufacturer_init_params(struct spi_nor
*nor
)
2671 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2672 nor
->manufacturer
->fixups
->default_init
)
2673 nor
->manufacturer
->fixups
->default_init(nor
);
2675 if (nor
->info
->fixups
&& nor
->info
->fixups
->default_init
)
2676 nor
->info
->fixups
->default_init(nor
);
2680 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and
2681 * settings based on nor->info->sfdp_flags. This method should be called only by
2682 * flashes that do not define SFDP tables. If the flash supports SFDP but the
2683 * information is wrong and the settings from this function can not be retrieved
2684 * by parsing SFDP, one should instead use the fixup hooks and update the wrong
2686 * @nor: pointer to a 'struct spi_nor'.
2688 static void spi_nor_no_sfdp_init_params(struct spi_nor
*nor
)
2690 struct spi_nor_flash_parameter
*params
= nor
->params
;
2691 struct spi_nor_erase_map
*map
= ¶ms
->erase_map
;
2692 const struct flash_info
*info
= nor
->info
;
2693 const u8 no_sfdp_flags
= info
->no_sfdp_flags
;
2696 if (no_sfdp_flags
& SPI_NOR_DUAL_READ
) {
2697 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_2
;
2698 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_2
],
2699 0, 8, SPINOR_OP_READ_1_1_2
,
2703 if (no_sfdp_flags
& SPI_NOR_QUAD_READ
) {
2704 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_4
;
2705 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_4
],
2706 0, 8, SPINOR_OP_READ_1_1_4
,
2710 if (no_sfdp_flags
& SPI_NOR_OCTAL_READ
) {
2711 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_1_1_8
;
2712 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_1_1_8
],
2713 0, 8, SPINOR_OP_READ_1_1_8
,
2717 if (no_sfdp_flags
& SPI_NOR_OCTAL_DTR_READ
) {
2718 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_8_8_8_DTR
;
2719 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_8_8_8_DTR
],
2720 0, 20, SPINOR_OP_READ_FAST
,
2721 SNOR_PROTO_8_8_8_DTR
);
2724 if (no_sfdp_flags
& SPI_NOR_OCTAL_DTR_PP
) {
2725 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP_8_8_8_DTR
;
2727 * Since xSPI Page Program opcode is backward compatible with
2728 * Legacy SPI, use Legacy SPI opcode there as well.
2730 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP_8_8_8_DTR
],
2731 SPINOR_OP_PP
, SNOR_PROTO_8_8_8_DTR
);
2735 * Sector Erase settings. Sort Erase Types in ascending order, with the
2736 * smallest erase size starting at BIT(0).
2740 if (no_sfdp_flags
& SECT_4K
) {
2741 erase_mask
|= BIT(i
);
2742 spi_nor_set_erase_type(&map
->erase_type
[i
], 4096u,
2746 erase_mask
|= BIT(i
);
2747 spi_nor_set_erase_type(&map
->erase_type
[i
],
2748 info
->sector_size
?: SPI_NOR_DEFAULT_SECTOR_SIZE
,
2750 spi_nor_init_uniform_erase_map(map
, erase_mask
, params
->size
);
2754 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined
2755 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP.
2756 * @nor: pointer to a 'struct spi_nor'
2758 static void spi_nor_init_flags(struct spi_nor
*nor
)
2760 struct device_node
*np
= spi_nor_get_flash_node(nor
);
2761 const u16 flags
= nor
->info
->flags
;
2763 if (of_property_read_bool(np
, "broken-flash-reset"))
2764 nor
->flags
|= SNOR_F_BROKEN_RESET
;
2766 if (of_property_read_bool(np
, "no-wp"))
2767 nor
->flags
|= SNOR_F_NO_WP
;
2769 if (flags
& SPI_NOR_SWP_IS_VOLATILE
)
2770 nor
->flags
|= SNOR_F_SWP_IS_VOLATILE
;
2772 if (flags
& SPI_NOR_HAS_LOCK
)
2773 nor
->flags
|= SNOR_F_HAS_LOCK
;
2775 if (flags
& SPI_NOR_HAS_TB
) {
2776 nor
->flags
|= SNOR_F_HAS_SR_TB
;
2777 if (flags
& SPI_NOR_TB_SR_BIT6
)
2778 nor
->flags
|= SNOR_F_HAS_SR_TB_BIT6
;
2781 if (flags
& SPI_NOR_4BIT_BP
) {
2782 nor
->flags
|= SNOR_F_HAS_4BIT_BP
;
2783 if (flags
& SPI_NOR_BP3_SR_BIT6
)
2784 nor
->flags
|= SNOR_F_HAS_SR_BP3_BIT6
;
2787 if (flags
& SPI_NOR_RWW
&& nor
->params
->n_banks
> 1 &&
2788 !nor
->controller_ops
)
2789 nor
->flags
|= SNOR_F_RWW
;
2793 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not
2794 * be discovered by SFDP for this particular flash because the SFDP table that
2795 * indicates this support is not defined in the flash. In case the table for
2796 * this support is defined but has wrong values, one should instead use a
2797 * post_sfdp() hook to set the SNOR_F equivalent flag.
2798 * @nor: pointer to a 'struct spi_nor'
2800 static void spi_nor_init_fixup_flags(struct spi_nor
*nor
)
2802 const u8 fixup_flags
= nor
->info
->fixup_flags
;
2804 if (fixup_flags
& SPI_NOR_4B_OPCODES
)
2805 nor
->flags
|= SNOR_F_4B_OPCODES
;
2807 if (fixup_flags
& SPI_NOR_IO_MODE_EN_VOLATILE
)
2808 nor
->flags
|= SNOR_F_IO_MODE_EN_VOLATILE
;
2812 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2813 * @nor: pointer to a 'struct spi_nor'
2815 * Used to initialize flash parameters that are not declared in the JESD216
2816 * SFDP standard, or where SFDP tables are not defined at all.
2817 * Will replace the spi_nor_manufacturer_init_params() method.
2819 static int spi_nor_late_init_params(struct spi_nor
*nor
)
2821 struct spi_nor_flash_parameter
*params
= nor
->params
;
2824 if (nor
->manufacturer
&& nor
->manufacturer
->fixups
&&
2825 nor
->manufacturer
->fixups
->late_init
) {
2826 ret
= nor
->manufacturer
->fixups
->late_init(nor
);
2831 /* Needed by some flashes late_init hooks. */
2832 spi_nor_init_flags(nor
);
2834 if (nor
->info
->fixups
&& nor
->info
->fixups
->late_init
) {
2835 ret
= nor
->info
->fixups
->late_init(nor
);
2840 if (!nor
->params
->die_erase_opcode
)
2841 nor
->params
->die_erase_opcode
= SPINOR_OP_CHIP_ERASE
;
2843 /* Default method kept for backward compatibility. */
2844 if (!params
->set_4byte_addr_mode
)
2845 params
->set_4byte_addr_mode
= spi_nor_set_4byte_addr_mode_brwr
;
2847 spi_nor_init_fixup_flags(nor
);
2850 * NOR protection support. When locking_ops are not provided, we pick
2853 if (nor
->flags
& SNOR_F_HAS_LOCK
&& !nor
->params
->locking_ops
)
2854 spi_nor_init_default_locking_ops(nor
);
2856 if (params
->n_banks
> 1)
2857 params
->bank_size
= div_u64(params
->size
, params
->n_banks
);
2863 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash
2864 * parameters and settings based on JESD216 SFDP standard.
2865 * @nor: pointer to a 'struct spi_nor'.
2867 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2868 * legacy flash parameters and settings will be restored.
2870 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor
*nor
)
2872 struct spi_nor_flash_parameter sfdp_params
;
2874 memcpy(&sfdp_params
, nor
->params
, sizeof(sfdp_params
));
2876 if (spi_nor_parse_sfdp(nor
)) {
2877 memcpy(nor
->params
, &sfdp_params
, sizeof(*nor
->params
));
2878 nor
->flags
&= ~SNOR_F_4B_OPCODES
;
2883 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash
2884 * parameters and settings.
2885 * @nor: pointer to a 'struct spi_nor'.
2887 * The method assumes that flash doesn't support SFDP so it initializes flash
2888 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten
2889 * when parsing SFDP, if supported.
2891 static void spi_nor_init_params_deprecated(struct spi_nor
*nor
)
2893 spi_nor_no_sfdp_init_params(nor
);
2895 spi_nor_manufacturer_init_params(nor
);
2897 if (nor
->info
->no_sfdp_flags
& (SPI_NOR_DUAL_READ
|
2899 SPI_NOR_OCTAL_READ
|
2900 SPI_NOR_OCTAL_DTR_READ
))
2901 spi_nor_sfdp_init_params_deprecated(nor
);
2905 * spi_nor_init_default_params() - Default initialization of flash parameters
2906 * and settings. Done for all flashes, regardless is they define SFDP tables
2908 * @nor: pointer to a 'struct spi_nor'.
2910 static void spi_nor_init_default_params(struct spi_nor
*nor
)
2912 struct spi_nor_flash_parameter
*params
= nor
->params
;
2913 const struct flash_info
*info
= nor
->info
;
2914 struct device_node
*np
= spi_nor_get_flash_node(nor
);
2916 params
->quad_enable
= spi_nor_sr2_bit1_quad_enable
;
2917 params
->otp
.org
= info
->otp
;
2919 /* Default to 16-bit Write Status (01h) Command */
2920 nor
->flags
|= SNOR_F_HAS_16BIT_SR
;
2922 /* Set SPI NOR sizes. */
2923 params
->writesize
= 1;
2924 params
->size
= info
->size
;
2925 params
->bank_size
= params
->size
;
2926 params
->page_size
= info
->page_size
?: SPI_NOR_DEFAULT_PAGE_SIZE
;
2927 params
->n_banks
= info
->n_banks
?: SPI_NOR_DEFAULT_N_BANKS
;
2929 /* Default to Fast Read for non-DT and enable it if requested by DT. */
2930 if (!np
|| of_property_read_bool(np
, "m25p,fast-read"))
2931 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ_FAST
;
2933 /* (Fast) Read settings. */
2934 params
->hwcaps
.mask
|= SNOR_HWCAPS_READ
;
2935 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ
],
2936 0, 0, SPINOR_OP_READ
,
2939 if (params
->hwcaps
.mask
& SNOR_HWCAPS_READ_FAST
)
2940 spi_nor_set_read_settings(¶ms
->reads
[SNOR_CMD_READ_FAST
],
2941 0, 8, SPINOR_OP_READ_FAST
,
2943 /* Page Program settings. */
2944 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP
;
2945 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP
],
2946 SPINOR_OP_PP
, SNOR_PROTO_1_1_1
);
2948 if (info
->flags
& SPI_NOR_QUAD_PP
) {
2949 params
->hwcaps
.mask
|= SNOR_HWCAPS_PP_1_1_4
;
2950 spi_nor_set_pp_settings(¶ms
->page_programs
[SNOR_CMD_PP_1_1_4
],
2951 SPINOR_OP_PP_1_1_4
, SNOR_PROTO_1_1_4
);
2956 * spi_nor_init_params() - Initialize the flash's parameters and settings.
2957 * @nor: pointer to a 'struct spi_nor'.
2959 * The flash parameters and settings are initialized based on a sequence of
2960 * calls that are ordered by priority:
2962 * 1/ Default flash parameters initialization. The initializations are done
2963 * based on nor->info data:
2964 * spi_nor_info_init_params()
2966 * which can be overwritten by:
2967 * 2/ Manufacturer flash parameters initialization. The initializations are
2968 * done based on MFR register, or when the decisions can not be done solely
2969 * based on MFR, by using specific flash_info tweeks, ->default_init():
2970 * spi_nor_manufacturer_init_params()
2972 * which can be overwritten by:
2973 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
2974 * should be more accurate that the above.
2975 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params()
2977 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
2978 * the flash parameters and settings immediately after parsing the Basic
2979 * Flash Parameter Table.
2980 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed.
2981 * It is used to tweak various flash parameters when information provided
2982 * by the SFDP tables are wrong.
2984 * which can be overwritten by:
2985 * 4/ Late flash parameters initialization, used to initialize flash
2986 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP
2987 * tables are not defined at all.
2988 * spi_nor_late_init_params()
2990 * Return: 0 on success, -errno otherwise.
2992 static int spi_nor_init_params(struct spi_nor
*nor
)
2996 nor
->params
= devm_kzalloc(nor
->dev
, sizeof(*nor
->params
), GFP_KERNEL
);
3000 spi_nor_init_default_params(nor
);
3002 if (spi_nor_needs_sfdp(nor
)) {
3003 ret
= spi_nor_parse_sfdp(nor
);
3005 dev_err(nor
->dev
, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
3008 } else if (nor
->info
->no_sfdp_flags
& SPI_NOR_SKIP_SFDP
) {
3009 spi_nor_no_sfdp_init_params(nor
);
3011 spi_nor_init_params_deprecated(nor
);
3014 ret
= spi_nor_late_init_params(nor
);
3018 if (WARN_ON(!is_power_of_2(nor
->params
->page_size
)))
3024 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
3025 * @nor: pointer to a 'struct spi_nor'
3026 * @enable: whether to enable or disable Octal DTR
3028 * Return: 0 on success, -errno otherwise.
3030 static int spi_nor_set_octal_dtr(struct spi_nor
*nor
, bool enable
)
3034 if (!nor
->params
->set_octal_dtr
)
3037 if (!(nor
->read_proto
== SNOR_PROTO_8_8_8_DTR
&&
3038 nor
->write_proto
== SNOR_PROTO_8_8_8_DTR
))
3041 if (!(nor
->flags
& SNOR_F_IO_MODE_EN_VOLATILE
))
3044 ret
= nor
->params
->set_octal_dtr(nor
, enable
);
3049 nor
->reg_proto
= SNOR_PROTO_8_8_8_DTR
;
3051 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3057 * spi_nor_quad_enable() - enable Quad I/O if needed.
3058 * @nor: pointer to a 'struct spi_nor'
3060 * Return: 0 on success, -errno otherwise.
3062 static int spi_nor_quad_enable(struct spi_nor
*nor
)
3064 if (!nor
->params
->quad_enable
)
3067 if (!(spi_nor_get_protocol_width(nor
->read_proto
) == 4 ||
3068 spi_nor_get_protocol_width(nor
->write_proto
) == 4))
3071 return nor
->params
->quad_enable(nor
);
3075 * spi_nor_set_4byte_addr_mode() - Set address mode.
3076 * @nor: pointer to a 'struct spi_nor'.
3077 * @enable: enable/disable 4 byte address mode.
3079 * Return: 0 on success, -errno otherwise.
3081 int spi_nor_set_4byte_addr_mode(struct spi_nor
*nor
, bool enable
)
3083 struct spi_nor_flash_parameter
*params
= nor
->params
;
3088 * If the RESET# pin isn't hooked up properly, or the system
3089 * otherwise doesn't perform a reset command in the boot
3090 * sequence, it's impossible to 100% protect against unexpected
3091 * reboots (e.g., crashes). Warn the user (or hopefully, system
3092 * designer) that this is bad.
3094 WARN_ONCE(nor
->flags
& SNOR_F_BROKEN_RESET
,
3095 "enabling reset hack; may not recover from unexpected reboots\n");
3098 ret
= params
->set_4byte_addr_mode(nor
, enable
);
3099 if (ret
&& ret
!= -EOPNOTSUPP
)
3103 params
->addr_nbytes
= 4;
3104 params
->addr_mode_nbytes
= 4;
3106 params
->addr_nbytes
= 3;
3107 params
->addr_mode_nbytes
= 3;
3113 static int spi_nor_init(struct spi_nor
*nor
)
3117 err
= spi_nor_set_octal_dtr(nor
, true);
3119 dev_dbg(nor
->dev
, "octal mode not supported\n");
3123 err
= spi_nor_quad_enable(nor
);
3125 dev_dbg(nor
->dev
, "quad mode not supported\n");
3130 * Some SPI NOR flashes are write protected by default after a power-on
3131 * reset cycle, in order to avoid inadvertent writes during power-up.
3132 * Backward compatibility imposes to unlock the entire flash memory
3133 * array at power-up by default. Depending on the kernel configuration
3134 * (1) do nothing, (2) always unlock the entire flash array or (3)
3135 * unlock the entire flash array only when the software write
3136 * protection bits are volatile. The latter is indicated by
3137 * SNOR_F_SWP_IS_VOLATILE.
3139 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE
) ||
3140 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE
) &&
3141 nor
->flags
& SNOR_F_SWP_IS_VOLATILE
))
3142 spi_nor_try_unlock_all(nor
);
3144 if (nor
->addr_nbytes
== 4 &&
3145 nor
->read_proto
!= SNOR_PROTO_8_8_8_DTR
&&
3146 !(nor
->flags
& SNOR_F_4B_OPCODES
))
3147 return spi_nor_set_4byte_addr_mode(nor
, true);
3153 * spi_nor_soft_reset() - Perform a software reset
3154 * @nor: pointer to 'struct spi_nor'
3156 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets
3157 * the device to its power-on-reset state. This is useful when the software has
3158 * made some changes to device (volatile) registers and needs to reset it before
3159 * shutting down, for example.
3161 * Not every flash supports this sequence. The same set of opcodes might be used
3162 * for some other operation on a flash that does not support this. Support for
3163 * this sequence can be discovered via SFDP in the BFPT table.
3165 * Return: 0 on success, -errno otherwise.
3167 static void spi_nor_soft_reset(struct spi_nor
*nor
)
3169 struct spi_mem_op op
;
3172 op
= (struct spi_mem_op
)SPINOR_SRSTEN_OP
;
3174 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3176 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3178 if (ret
!= -EOPNOTSUPP
)
3179 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3183 op
= (struct spi_mem_op
)SPINOR_SRST_OP
;
3185 spi_nor_spimem_setup_op(nor
, &op
, nor
->reg_proto
);
3187 ret
= spi_mem_exec_op(nor
->spimem
, &op
);
3189 dev_warn(nor
->dev
, "Software reset failed: %d\n", ret
);
3194 * Software Reset is not instant, and the delay varies from flash to
3195 * flash. Looking at a few flashes, most range somewhere below 100
3196 * microseconds. So, sleep for a range of 200-400 us.
3198 usleep_range(SPI_NOR_SRST_SLEEP_MIN
, SPI_NOR_SRST_SLEEP_MAX
);
3201 /* mtd suspend handler */
3202 static int spi_nor_suspend(struct mtd_info
*mtd
)
3204 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3207 /* Disable octal DTR mode if we enabled it. */
3208 ret
= spi_nor_set_octal_dtr(nor
, false);
3210 dev_err(nor
->dev
, "suspend() failed\n");
3215 /* mtd resume handler */
3216 static void spi_nor_resume(struct mtd_info
*mtd
)
3218 struct spi_nor
*nor
= mtd_to_spi_nor(mtd
);
3219 struct device
*dev
= nor
->dev
;
3222 /* re-initialize the nor chip */
3223 ret
= spi_nor_init(nor
);
3225 dev_err(dev
, "resume() failed\n");
3228 static int spi_nor_get_device(struct mtd_info
*mtd
)
3230 struct mtd_info
*master
= mtd_get_master(mtd
);
3231 struct spi_nor
*nor
= mtd_to_spi_nor(master
);
3235 dev
= nor
->spimem
->spi
->controller
->dev
.parent
;
3239 if (!try_module_get(dev
->driver
->owner
))
3245 static void spi_nor_put_device(struct mtd_info
*mtd
)
3247 struct mtd_info
*master
= mtd_get_master(mtd
);
3248 struct spi_nor
*nor
= mtd_to_spi_nor(master
);
3252 dev
= nor
->spimem
->spi
->controller
->dev
.parent
;
3256 module_put(dev
->driver
->owner
);
3259 static void spi_nor_restore(struct spi_nor
*nor
)
3263 /* restore the addressing mode */
3264 if (nor
->addr_nbytes
== 4 && !(nor
->flags
& SNOR_F_4B_OPCODES
) &&
3265 nor
->flags
& SNOR_F_BROKEN_RESET
) {
3266 ret
= spi_nor_set_4byte_addr_mode(nor
, false);
3269 * Do not stop the execution in the hope that the flash
3270 * will default to the 3-byte address mode after the
3273 dev_err(nor
->dev
, "Failed to exit 4-byte address mode, err = %d\n", ret
);
3276 if (nor
->flags
& SNOR_F_SOFT_RESET
)
3277 spi_nor_soft_reset(nor
);
3280 static const struct flash_info
*spi_nor_match_name(struct spi_nor
*nor
,
3285 for (i
= 0; i
< ARRAY_SIZE(manufacturers
); i
++) {
3286 for (j
= 0; j
< manufacturers
[i
]->nparts
; j
++) {
3287 if (manufacturers
[i
]->parts
[j
].name
&&
3288 !strcmp(name
, manufacturers
[i
]->parts
[j
].name
)) {
3289 nor
->manufacturer
= manufacturers
[i
];
3290 return &manufacturers
[i
]->parts
[j
];
3298 static const struct flash_info
*spi_nor_get_flash_info(struct spi_nor
*nor
,
3301 const struct flash_info
*info
= NULL
;
3304 info
= spi_nor_match_name(nor
, name
);
3306 * Auto-detect if chip name wasn't specified or not found, or the chip
3307 * has an ID. If the chip supposedly has an ID, we also do an
3308 * auto-detection to compare it later.
3310 if (!info
|| info
->id
) {
3311 const struct flash_info
*jinfo
;
3313 jinfo
= spi_nor_detect(nor
);
3318 * If caller has specified name of flash model that can normally
3319 * be detected using JEDEC, let's verify it.
3321 if (info
&& jinfo
!= info
)
3322 dev_warn(nor
->dev
, "found %s, expected %s\n",
3323 jinfo
->name
, info
->name
);
3325 /* If info was set before, JEDEC knows better. */
3333 spi_nor_get_region_erasesize(const struct spi_nor_erase_region
*region
,
3334 const struct spi_nor_erase_type
*erase_type
)
3338 if (region
->overlaid
)
3339 return region
->size
;
3341 for (i
= SNOR_ERASE_TYPE_MAX
- 1; i
>= 0; i
--) {
3342 if (region
->erase_mask
& BIT(i
))
3343 return erase_type
[i
].size
;
3349 static int spi_nor_set_mtd_eraseregions(struct spi_nor
*nor
)
3351 const struct spi_nor_erase_map
*map
= &nor
->params
->erase_map
;
3352 const struct spi_nor_erase_region
*region
= map
->regions
;
3353 struct mtd_erase_region_info
*mtd_region
;
3354 struct mtd_info
*mtd
= &nor
->mtd
;
3357 mtd_region
= devm_kcalloc(nor
->dev
, map
->n_regions
, sizeof(*mtd_region
),
3362 for (i
= 0; i
< map
->n_regions
; i
++) {
3363 erasesize
= spi_nor_get_region_erasesize(®ion
[i
],
3368 mtd_region
[i
].erasesize
= erasesize
;
3369 mtd_region
[i
].numblocks
= div_u64(region
[i
].size
, erasesize
);
3370 mtd_region
[i
].offset
= region
[i
].offset
;
3373 mtd
->numeraseregions
= map
->n_regions
;
3374 mtd
->eraseregions
= mtd_region
;
3379 static int spi_nor_set_mtd_info(struct spi_nor
*nor
)
3381 struct mtd_info
*mtd
= &nor
->mtd
;
3382 struct device
*dev
= nor
->dev
;
3384 spi_nor_set_mtd_locking_ops(nor
);
3385 spi_nor_set_mtd_otp_ops(nor
);
3387 mtd
->dev
.parent
= dev
;
3389 mtd
->name
= dev_name(dev
);
3390 mtd
->type
= MTD_NORFLASH
;
3391 mtd
->flags
= MTD_CAP_NORFLASH
;
3392 /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */
3393 if (nor
->flags
& SNOR_F_ECC
)
3394 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
3395 if (nor
->info
->flags
& SPI_NOR_NO_ERASE
)
3396 mtd
->flags
|= MTD_NO_ERASE
;
3398 mtd
->_erase
= spi_nor_erase
;
3399 mtd
->writesize
= nor
->params
->writesize
;
3400 mtd
->writebufsize
= nor
->params
->page_size
;
3401 mtd
->size
= nor
->params
->size
;
3402 mtd
->_read
= spi_nor_read
;
3403 /* Might be already set by some SST flashes. */
3405 mtd
->_write
= spi_nor_write
;
3406 mtd
->_suspend
= spi_nor_suspend
;
3407 mtd
->_resume
= spi_nor_resume
;
3408 mtd
->_get_device
= spi_nor_get_device
;
3409 mtd
->_put_device
= spi_nor_put_device
;
3411 if (!spi_nor_has_uniform_erase(nor
))
3412 return spi_nor_set_mtd_eraseregions(nor
);
3417 static int spi_nor_hw_reset(struct spi_nor
*nor
)
3419 struct gpio_desc
*reset
;
3421 reset
= devm_gpiod_get_optional(nor
->dev
, "reset", GPIOD_OUT_LOW
);
3422 if (IS_ERR_OR_NULL(reset
))
3423 return PTR_ERR_OR_ZERO(reset
);
3426 * Experimental delay values by looking at different flash device
3427 * vendors datasheets.
3430 gpiod_set_value_cansleep(reset
, 1);
3431 usleep_range(100, 150);
3432 gpiod_set_value_cansleep(reset
, 0);
3433 usleep_range(1000, 1200);
3438 int spi_nor_scan(struct spi_nor
*nor
, const char *name
,
3439 const struct spi_nor_hwcaps
*hwcaps
)
3441 const struct flash_info
*info
;
3442 struct device
*dev
= nor
->dev
;
3445 ret
= spi_nor_check(nor
);
3449 /* Reset SPI protocol for all commands. */
3450 nor
->reg_proto
= SNOR_PROTO_1_1_1
;
3451 nor
->read_proto
= SNOR_PROTO_1_1_1
;
3452 nor
->write_proto
= SNOR_PROTO_1_1_1
;
3455 * We need the bounce buffer early to read/write registers when going
3456 * through the spi-mem layer (buffers have to be DMA-able).
3457 * For spi-mem drivers, we'll reallocate a new buffer if
3458 * nor->params->page_size turns out to be greater than PAGE_SIZE (which
3459 * shouldn't happen before long since NOR pages are usually less
3460 * than 1KB) after spi_nor_scan() returns.
3462 nor
->bouncebuf_size
= PAGE_SIZE
;
3463 nor
->bouncebuf
= devm_kmalloc(dev
, nor
->bouncebuf_size
,
3465 if (!nor
->bouncebuf
)
3468 ret
= spi_nor_hw_reset(nor
);
3472 info
= spi_nor_get_flash_info(nor
, name
);
3474 return PTR_ERR(info
);
3478 mutex_init(&nor
->lock
);
3480 /* Init flash parameters based on flash_info struct and SFDP */
3481 ret
= spi_nor_init_params(nor
);
3485 if (spi_nor_use_parallel_locking(nor
))
3486 init_waitqueue_head(&nor
->rww
.wait
);
3489 * Configure the SPI memory:
3490 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3491 * - set the number of dummy cycles (mode cycles + wait states).
3492 * - set the SPI protocols for register and memory accesses.
3493 * - set the number of address bytes.
3495 ret
= spi_nor_setup(nor
, hwcaps
);
3499 /* Send all the required SPI flash commands to initialize device */
3500 ret
= spi_nor_init(nor
);
3504 /* No mtd_info fields should be used up to this point. */
3505 ret
= spi_nor_set_mtd_info(nor
);
3509 dev_dbg(dev
, "Manufacturer and device ID: %*phN\n",
3510 SPI_NOR_MAX_ID_LEN
, nor
->id
);
3514 EXPORT_SYMBOL_GPL(spi_nor_scan
);
3516 static int spi_nor_create_read_dirmap(struct spi_nor
*nor
)
3518 struct spi_mem_dirmap_info info
= {
3519 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->read_opcode
, 0),
3520 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, 0, 0),
3521 SPI_MEM_OP_DUMMY(nor
->read_dummy
, 0),
3522 SPI_MEM_OP_DATA_IN(0, NULL
, 0)),
3524 .length
= nor
->params
->size
,
3526 struct spi_mem_op
*op
= &info
.op_tmpl
;
3528 spi_nor_spimem_setup_op(nor
, op
, nor
->read_proto
);
3530 /* convert the dummy cycles to the number of bytes */
3531 op
->dummy
.nbytes
= (nor
->read_dummy
* op
->dummy
.buswidth
) / 8;
3532 if (spi_nor_protocol_is_dtr(nor
->read_proto
))
3533 op
->dummy
.nbytes
*= 2;
3536 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3537 * of data bytes is non-zero, the data buswidth won't be set here. So,
3540 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->read_proto
);
3542 nor
->dirmap
.rdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3544 return PTR_ERR_OR_ZERO(nor
->dirmap
.rdesc
);
3547 static int spi_nor_create_write_dirmap(struct spi_nor
*nor
)
3549 struct spi_mem_dirmap_info info
= {
3550 .op_tmpl
= SPI_MEM_OP(SPI_MEM_OP_CMD(nor
->program_opcode
, 0),
3551 SPI_MEM_OP_ADDR(nor
->addr_nbytes
, 0, 0),
3552 SPI_MEM_OP_NO_DUMMY
,
3553 SPI_MEM_OP_DATA_OUT(0, NULL
, 0)),
3555 .length
= nor
->params
->size
,
3557 struct spi_mem_op
*op
= &info
.op_tmpl
;
3559 if (nor
->program_opcode
== SPINOR_OP_AAI_WP
&& nor
->sst_write_second
)
3560 op
->addr
.nbytes
= 0;
3562 spi_nor_spimem_setup_op(nor
, op
, nor
->write_proto
);
3565 * Since spi_nor_spimem_setup_op() only sets buswidth when the number
3566 * of data bytes is non-zero, the data buswidth won't be set here. So,
3569 op
->data
.buswidth
= spi_nor_get_protocol_data_nbits(nor
->write_proto
);
3571 nor
->dirmap
.wdesc
= devm_spi_mem_dirmap_create(nor
->dev
, nor
->spimem
,
3573 return PTR_ERR_OR_ZERO(nor
->dirmap
.wdesc
);
3576 static int spi_nor_probe(struct spi_mem
*spimem
)
3578 struct spi_device
*spi
= spimem
->spi
;
3579 struct flash_platform_data
*data
= dev_get_platdata(&spi
->dev
);
3580 struct spi_nor
*nor
;
3582 * Enable all caps by default. The core will mask them after
3583 * checking what's really supported using spi_mem_supports_op().
3585 const struct spi_nor_hwcaps hwcaps
= { .mask
= SNOR_HWCAPS_ALL
};
3589 nor
= devm_kzalloc(&spi
->dev
, sizeof(*nor
), GFP_KERNEL
);
3593 nor
->spimem
= spimem
;
3594 nor
->dev
= &spi
->dev
;
3595 spi_nor_set_flash_node(nor
, spi
->dev
.of_node
);
3597 spi_mem_set_drvdata(spimem
, nor
);
3599 if (data
&& data
->name
)
3600 nor
->mtd
.name
= data
->name
;
3603 nor
->mtd
.name
= spi_mem_get_name(spimem
);
3606 * For some (historical?) reason many platforms provide two different
3607 * names in flash_platform_data: "name" and "type". Quite often name is
3608 * set to "m25p80" and then "type" provides a real chip name.
3609 * If that's the case, respect "type" and ignore a "name".
3611 if (data
&& data
->type
)
3612 flash_name
= data
->type
;
3613 else if (!strcmp(spi
->modalias
, "spi-nor"))
3614 flash_name
= NULL
; /* auto-detect */
3616 flash_name
= spi
->modalias
;
3618 ret
= spi_nor_scan(nor
, flash_name
, &hwcaps
);
3622 spi_nor_debugfs_register(nor
);
3625 * None of the existing parts have > 512B pages, but let's play safe
3626 * and add this logic so that if anyone ever adds support for such
3627 * a NOR we don't end up with buffer overflows.
3629 if (nor
->params
->page_size
> PAGE_SIZE
) {
3630 nor
->bouncebuf_size
= nor
->params
->page_size
;
3631 devm_kfree(nor
->dev
, nor
->bouncebuf
);
3632 nor
->bouncebuf
= devm_kmalloc(nor
->dev
,
3633 nor
->bouncebuf_size
,
3635 if (!nor
->bouncebuf
)
3639 ret
= spi_nor_create_read_dirmap(nor
);
3643 ret
= spi_nor_create_write_dirmap(nor
);
3647 return mtd_device_register(&nor
->mtd
, data
? data
->parts
: NULL
,
3648 data
? data
->nr_parts
: 0);
3651 static int spi_nor_remove(struct spi_mem
*spimem
)
3653 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3655 spi_nor_restore(nor
);
3657 /* Clean up MTD stuff. */
3658 return mtd_device_unregister(&nor
->mtd
);
3661 static void spi_nor_shutdown(struct spi_mem
*spimem
)
3663 struct spi_nor
*nor
= spi_mem_get_drvdata(spimem
);
3665 spi_nor_restore(nor
);
3669 * Do NOT add to this array without reading the following:
3671 * Historically, many flash devices are bound to this driver by their name. But
3672 * since most of these flash are compatible to some extent, and their
3673 * differences can often be differentiated by the JEDEC read-ID command, we
3674 * encourage new users to add support to the spi-nor library, and simply bind
3675 * against a generic string here (e.g., "jedec,spi-nor").
3677 * Many flash names are kept here in this list to keep them available
3678 * as module aliases for existing platforms.
3680 static const struct spi_device_id spi_nor_dev_ids
[] = {
3682 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3683 * hack around the fact that the SPI core does not provide uevent
3684 * matching for .of_match_table
3689 * Entries not used in DTs that should be safe to drop after replacing
3690 * them with "spi-nor" in platform data.
3692 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3695 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3696 * should be kept for backward compatibility.
3698 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3699 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3700 {"mx25l25635e"},{"mx66l51235l"},
3701 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3702 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3704 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3705 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3706 {"m25p64"}, {"m25p128"},
3707 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3708 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3710 /* Flashes that can't be detected using JEDEC */
3711 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3712 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3713 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3715 /* Everspin MRAMs (non-JEDEC) */
3716 { "mr25h128" }, /* 128 Kib, 40 MHz */
3717 { "mr25h256" }, /* 256 Kib, 40 MHz */
3718 { "mr25h10" }, /* 1 Mib, 40 MHz */
3719 { "mr25h40" }, /* 4 Mib, 40 MHz */
3723 MODULE_DEVICE_TABLE(spi
, spi_nor_dev_ids
);
3725 static const struct of_device_id spi_nor_of_table
[] = {
3727 * Generic compatibility for SPI NOR that can be identified by the
3728 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3730 { .compatible
= "jedec,spi-nor" },
3733 MODULE_DEVICE_TABLE(of
, spi_nor_of_table
);
3736 * REVISIT: many of these chips have deep power-down modes, which
3737 * should clearly be entered on suspend() to minimize power use.
3738 * And also when they're otherwise idle...
3740 static struct spi_mem_driver spi_nor_driver
= {
3744 .of_match_table
= spi_nor_of_table
,
3745 .dev_groups
= spi_nor_sysfs_groups
,
3747 .id_table
= spi_nor_dev_ids
,
3749 .probe
= spi_nor_probe
,
3750 .remove
= spi_nor_remove
,
3751 .shutdown
= spi_nor_shutdown
,
3754 static int __init
spi_nor_module_init(void)
3756 return spi_mem_driver_register(&spi_nor_driver
);
3758 module_init(spi_nor_module_init
);
3760 static void __exit
spi_nor_module_exit(void)
3762 spi_mem_driver_unregister(&spi_nor_driver
);
3763 spi_nor_debugfs_shutdown();
3765 module_exit(spi_nor_module_exit
);
3767 MODULE_LICENSE("GPL v2");
3768 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3769 MODULE_AUTHOR("Mike Lavender");
3770 MODULE_DESCRIPTION("framework for SPI NOR");