1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * LPDDR flash memory device operations. This module provides read, write,
4 * erase, lock/unlock support for LPDDR flash memories
5 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
6 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
7 * Many thanks to Roman Borisov for initial enabling
10 * Implement VPP management
11 * Implement XIP support
12 * Implement OTP support
14 #include <linux/mtd/pfow.h>
15 #include <linux/mtd/qinfo.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
19 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
20 size_t *retlen
, u_char
*buf
);
21 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
,
22 size_t len
, size_t *retlen
, const u_char
*buf
);
23 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
24 unsigned long count
, loff_t to
, size_t *retlen
);
25 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
);
26 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
27 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
28 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
29 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
);
30 static int lpddr_unpoint(struct mtd_info
*mtd
, loff_t adr
, size_t len
);
31 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
);
32 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
);
33 static void put_chip(struct map_info
*map
, struct flchip
*chip
);
35 struct mtd_info
*lpddr_cmdset(struct map_info
*map
)
37 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
38 struct flchip_shared
*shared
;
44 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
48 mtd
->type
= MTD_NORFLASH
;
50 /* Fill in the default mtd operations */
51 mtd
->_read
= lpddr_read
;
52 mtd
->type
= MTD_NORFLASH
;
53 mtd
->flags
= MTD_CAP_NORFLASH
;
54 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
55 mtd
->_erase
= lpddr_erase
;
56 mtd
->_write
= lpddr_write_buffers
;
57 mtd
->_writev
= lpddr_writev
;
58 mtd
->_lock
= lpddr_lock
;
59 mtd
->_unlock
= lpddr_unlock
;
60 if (map_is_linear(map
)) {
61 mtd
->_point
= lpddr_point
;
62 mtd
->_unpoint
= lpddr_unpoint
;
64 mtd
->size
= 1 << lpddr
->qinfo
->DevSizeShift
;
65 mtd
->erasesize
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
66 mtd
->writesize
= 1 << lpddr
->qinfo
->BufSizeShift
;
68 shared
= kmalloc_array(lpddr
->numchips
, sizeof(struct flchip_shared
),
76 chip
= &lpddr
->chips
[0];
77 numchips
= lpddr
->numchips
/ lpddr
->qinfo
->HWPartsNum
;
78 for (i
= 0; i
< numchips
; i
++) {
79 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
80 mutex_init(&shared
[i
].lock
);
81 for (j
= 0; j
< lpddr
->qinfo
->HWPartsNum
; j
++) {
82 *chip
= lpddr
->chips
[i
];
83 chip
->start
+= j
<< lpddr
->chipshift
;
84 chip
->oldstate
= chip
->state
= FL_READY
;
85 chip
->priv
= &shared
[i
];
86 /* those should be reset too since
87 they create memory references. */
88 init_waitqueue_head(&chip
->wq
);
89 mutex_init(&chip
->mutex
);
96 EXPORT_SYMBOL(lpddr_cmdset
);
98 static int wait_for_ready(struct map_info
*map
, struct flchip
*chip
,
99 unsigned int chip_op_time
)
101 unsigned int timeo
, reset_timeo
, sleep_time
;
103 flstate_t chip_state
= chip
->state
;
106 /* set our timeout to 8 times the expected delay */
107 timeo
= chip_op_time
* 8;
111 sleep_time
= chip_op_time
/ 2;
114 dsr
= CMDVAL(map_read(map
, map
->pfow_base
+ PFOW_DSR
));
115 if (dsr
& DSR_READY_STATUS
)
118 printk(KERN_ERR
"%s: Flash timeout error state %d \n",
119 map
->name
, chip_state
);
124 /* OK Still waiting. Drop the lock, wait a while and retry. */
125 mutex_unlock(&chip
->mutex
);
126 if (sleep_time
>= 1000000/HZ
) {
128 * Half of the normal delay still remaining
129 * can be performed with a sleeping delay instead
132 msleep(sleep_time
/1000);
134 sleep_time
= 1000000/HZ
;
140 mutex_lock(&chip
->mutex
);
142 while (chip
->state
!= chip_state
) {
143 /* Someone's suspended the operation: sleep */
144 DECLARE_WAITQUEUE(wait
, current
);
145 set_current_state(TASK_UNINTERRUPTIBLE
);
146 add_wait_queue(&chip
->wq
, &wait
);
147 mutex_unlock(&chip
->mutex
);
149 remove_wait_queue(&chip
->wq
, &wait
);
150 mutex_lock(&chip
->mutex
);
152 if (chip
->erase_suspended
|| chip
->write_suspended
) {
153 /* Suspend has occurred while sleep: reset timeout */
155 chip
->erase_suspended
= chip
->write_suspended
= 0;
158 /* check status for errors */
161 map_write(map
, CMD(~(DSR_ERR
)), map
->pfow_base
+ PFOW_DSR
);
162 printk(KERN_WARNING
"%s: Bad status on wait: 0x%x \n",
164 print_drs_error(dsr
);
167 chip
->state
= FL_READY
;
171 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
)
174 DECLARE_WAITQUEUE(wait
, current
);
177 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
)
178 && chip
->state
!= FL_SYNCING
) {
180 * OK. We have possibility for contension on the write/erase
181 * operations which are global to the real chip and not per
182 * partition. So let's fight it over in the partition which
183 * currently has authority on the operation.
185 * The rules are as follows:
187 * - any write operation must own shared->writing.
189 * - any erase operation must own _both_ shared->writing and
192 * - contension arbitration is handled in the owner's context.
194 * The 'shared' struct can be read and/or written only when
197 struct flchip_shared
*shared
= chip
->priv
;
198 struct flchip
*contender
;
199 mutex_lock(&shared
->lock
);
200 contender
= shared
->writing
;
201 if (contender
&& contender
!= chip
) {
203 * The engine to perform desired operation on this
204 * partition is already in use by someone else.
205 * Let's fight over it in the context of the chip
206 * currently using it. If it is possible to suspend,
207 * that other partition will do just that, otherwise
208 * it'll happily send us to sleep. In any case, when
209 * get_chip returns success we're clear to go ahead.
211 ret
= mutex_trylock(&contender
->mutex
);
212 mutex_unlock(&shared
->lock
);
215 mutex_unlock(&chip
->mutex
);
216 ret
= chip_ready(map
, contender
, mode
);
217 mutex_lock(&chip
->mutex
);
219 if (ret
== -EAGAIN
) {
220 mutex_unlock(&contender
->mutex
);
224 mutex_unlock(&contender
->mutex
);
227 mutex_lock(&shared
->lock
);
229 /* We should not own chip if it is already in FL_SYNCING
230 * state. Put contender and retry. */
231 if (chip
->state
== FL_SYNCING
) {
232 put_chip(map
, contender
);
233 mutex_unlock(&contender
->mutex
);
236 mutex_unlock(&contender
->mutex
);
239 /* Check if we have suspended erase on this chip.
240 Must sleep in such a case. */
241 if (mode
== FL_ERASING
&& shared
->erasing
242 && shared
->erasing
->oldstate
== FL_ERASING
) {
243 mutex_unlock(&shared
->lock
);
244 set_current_state(TASK_UNINTERRUPTIBLE
);
245 add_wait_queue(&chip
->wq
, &wait
);
246 mutex_unlock(&chip
->mutex
);
248 remove_wait_queue(&chip
->wq
, &wait
);
249 mutex_lock(&chip
->mutex
);
254 shared
->writing
= chip
;
255 if (mode
== FL_ERASING
)
256 shared
->erasing
= chip
;
257 mutex_unlock(&shared
->lock
);
260 ret
= chip_ready(map
, chip
, mode
);
267 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
)
269 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
271 DECLARE_WAITQUEUE(wait
, current
);
273 /* Prevent setting state FL_SYNCING for chip in suspended state. */
274 if (FL_SYNCING
== mode
&& FL_READY
!= chip
->oldstate
)
277 switch (chip
->state
) {
283 if (!lpddr
->qinfo
->SuspEraseSupp
||
284 !(mode
== FL_READY
|| mode
== FL_POINT
))
287 map_write(map
, CMD(LPDDR_SUSPEND
),
288 map
->pfow_base
+ PFOW_PROGRAM_ERASE_SUSPEND
);
289 chip
->oldstate
= FL_ERASING
;
290 chip
->state
= FL_ERASE_SUSPENDING
;
291 ret
= wait_for_ready(map
, chip
, 0);
293 /* Oops. something got wrong. */
294 /* Resume and pretend we weren't here. */
296 printk(KERN_ERR
"%s: suspend operation failed."
297 "State may be wrong \n", map
->name
);
300 chip
->erase_suspended
= 1;
301 chip
->state
= FL_READY
;
305 /* Only if there's no operation suspended... */
306 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
312 set_current_state(TASK_UNINTERRUPTIBLE
);
313 add_wait_queue(&chip
->wq
, &wait
);
314 mutex_unlock(&chip
->mutex
);
316 remove_wait_queue(&chip
->wq
, &wait
);
317 mutex_lock(&chip
->mutex
);
322 static void put_chip(struct map_info
*map
, struct flchip
*chip
)
325 struct flchip_shared
*shared
= chip
->priv
;
326 mutex_lock(&shared
->lock
);
327 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
328 /* We own the ability to write, but we're done */
329 shared
->writing
= shared
->erasing
;
330 if (shared
->writing
&& shared
->writing
!= chip
) {
331 /* give back the ownership */
332 struct flchip
*loaner
= shared
->writing
;
333 mutex_lock(&loaner
->mutex
);
334 mutex_unlock(&shared
->lock
);
335 mutex_unlock(&chip
->mutex
);
336 put_chip(map
, loaner
);
337 mutex_lock(&chip
->mutex
);
338 mutex_unlock(&loaner
->mutex
);
342 shared
->erasing
= NULL
;
343 shared
->writing
= NULL
;
344 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
346 * We own the ability to erase without the ability
347 * to write, which means the erase was suspended
348 * and some other partition is currently writing.
349 * Don't let the switch below mess things up since
350 * we don't have ownership to resume anything.
352 mutex_unlock(&shared
->lock
);
356 mutex_unlock(&shared
->lock
);
359 switch (chip
->oldstate
) {
361 map_write(map
, CMD(LPDDR_RESUME
),
362 map
->pfow_base
+ PFOW_COMMAND_CODE
);
363 map_write(map
, CMD(LPDDR_START_EXECUTION
),
364 map
->pfow_base
+ PFOW_COMMAND_EXECUTE
);
365 chip
->oldstate
= FL_READY
;
366 chip
->state
= FL_ERASING
;
371 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!\n",
372 map
->name
, chip
->oldstate
);
377 static int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
378 unsigned long adr
, const struct kvec
**pvec
,
379 unsigned long *pvec_seek
, int len
)
381 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
383 int ret
, wbufsize
, word_gap
, words
;
384 const struct kvec
*vec
;
385 unsigned long vec_seek
;
386 unsigned long prog_buf_ofs
;
388 wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
390 mutex_lock(&chip
->mutex
);
391 ret
= get_chip(map
, chip
, FL_WRITING
);
393 mutex_unlock(&chip
->mutex
);
396 /* Figure out the number of words to write */
397 word_gap
= (-adr
& (map_bankwidth(map
)-1));
398 words
= (len
- word_gap
+ map_bankwidth(map
) - 1) / map_bankwidth(map
);
402 word_gap
= map_bankwidth(map
) - word_gap
;
404 datum
= map_word_ff(map
);
407 /* Get the program buffer offset from PFOW register data first*/
408 prog_buf_ofs
= map
->pfow_base
+ CMDVAL(map_read(map
,
409 map
->pfow_base
+ PFOW_PROGRAM_BUFFER_OFFSET
));
411 vec_seek
= *pvec_seek
;
413 int n
= map_bankwidth(map
) - word_gap
;
415 if (n
> vec
->iov_len
- vec_seek
)
416 n
= vec
->iov_len
- vec_seek
;
420 if (!word_gap
&& (len
< map_bankwidth(map
)))
421 datum
= map_word_ff(map
);
423 datum
= map_word_load_partial(map
, datum
,
424 vec
->iov_base
+ vec_seek
, word_gap
, n
);
428 if (!len
|| word_gap
== map_bankwidth(map
)) {
429 map_write(map
, datum
, prog_buf_ofs
);
430 prog_buf_ofs
+= map_bankwidth(map
);
435 if (vec_seek
== vec
->iov_len
) {
441 *pvec_seek
= vec_seek
;
444 send_pfow_command(map
, LPDDR_BUFF_PROGRAM
, adr
, wbufsize
, NULL
);
445 chip
->state
= FL_WRITING
;
446 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->ProgBufferTime
));
448 printk(KERN_WARNING
"%s Buffer program error: %d at %lx; \n",
449 map
->name
, ret
, adr
);
453 out
: put_chip(map
, chip
);
454 mutex_unlock(&chip
->mutex
);
458 static int do_erase_oneblock(struct mtd_info
*mtd
, loff_t adr
)
460 struct map_info
*map
= mtd
->priv
;
461 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
462 int chipnum
= adr
>> lpddr
->chipshift
;
463 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
466 mutex_lock(&chip
->mutex
);
467 ret
= get_chip(map
, chip
, FL_ERASING
);
469 mutex_unlock(&chip
->mutex
);
472 send_pfow_command(map
, LPDDR_BLOCK_ERASE
, adr
, 0, NULL
);
473 chip
->state
= FL_ERASING
;
474 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->BlockEraseTime
)*1000);
476 printk(KERN_WARNING
"%s Erase block error %d at : %llx\n",
477 map
->name
, ret
, adr
);
480 out
: put_chip(map
, chip
);
481 mutex_unlock(&chip
->mutex
);
485 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
486 size_t *retlen
, u_char
*buf
)
488 struct map_info
*map
= mtd
->priv
;
489 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
490 int chipnum
= adr
>> lpddr
->chipshift
;
491 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
494 mutex_lock(&chip
->mutex
);
495 ret
= get_chip(map
, chip
, FL_READY
);
497 mutex_unlock(&chip
->mutex
);
501 map_copy_from(map
, buf
, adr
, len
);
505 mutex_unlock(&chip
->mutex
);
509 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
510 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
)
512 struct map_info
*map
= mtd
->priv
;
513 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
514 int chipnum
= adr
>> lpddr
->chipshift
;
515 unsigned long ofs
, last_end
= 0;
516 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
522 /* ofs: offset within the first chip that the first read should start */
523 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
524 *mtdbuf
= (void *)map
->virt
+ chip
->start
+ ofs
;
527 unsigned long thislen
;
529 if (chipnum
>= lpddr
->numchips
)
532 /* We cannot point across chips that are virtually disjoint */
534 last_end
= chip
->start
;
535 else if (chip
->start
!= last_end
)
538 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
539 thislen
= (1<<lpddr
->chipshift
) - ofs
;
543 mutex_lock(&chip
->mutex
);
544 ret
= get_chip(map
, chip
, FL_POINT
);
545 mutex_unlock(&chip
->mutex
);
549 chip
->state
= FL_POINT
;
550 chip
->ref_point_counter
++;
555 last_end
+= 1 << lpddr
->chipshift
;
557 chip
= &lpddr
->chips
[chipnum
];
562 static int lpddr_unpoint (struct mtd_info
*mtd
, loff_t adr
, size_t len
)
564 struct map_info
*map
= mtd
->priv
;
565 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
566 int chipnum
= adr
>> lpddr
->chipshift
, err
= 0;
569 /* ofs: offset within the first chip that the first read should start */
570 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
573 unsigned long thislen
;
576 chip
= &lpddr
->chips
[chipnum
];
577 if (chipnum
>= lpddr
->numchips
)
580 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
581 thislen
= (1<<lpddr
->chipshift
) - ofs
;
585 mutex_lock(&chip
->mutex
);
586 if (chip
->state
== FL_POINT
) {
587 chip
->ref_point_counter
--;
588 if (chip
->ref_point_counter
== 0)
589 chip
->state
= FL_READY
;
591 printk(KERN_WARNING
"%s: Warning: unpoint called on non"
592 "pointed region\n", map
->name
);
597 mutex_unlock(&chip
->mutex
);
607 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
, size_t len
,
608 size_t *retlen
, const u_char
*buf
)
612 vec
.iov_base
= (void *) buf
;
615 return lpddr_writev(mtd
, &vec
, 1, to
, retlen
);
619 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
620 unsigned long count
, loff_t to
, size_t *retlen
)
622 struct map_info
*map
= mtd
->priv
;
623 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
626 unsigned long ofs
, vec_seek
, i
;
627 int wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
630 for (i
= 0; i
< count
; i
++)
631 len
+= vecs
[i
].iov_len
;
636 chipnum
= to
>> lpddr
->chipshift
;
642 /* We must not cross write block boundaries */
643 int size
= wbufsize
- (ofs
& (wbufsize
-1));
648 ret
= do_write_buffer(map
, &lpddr
->chips
[chipnum
],
649 ofs
, &vecs
, &vec_seek
, size
);
657 /* Be nice and reschedule with the chip in a usable
658 * state for other processes */
666 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
668 unsigned long ofs
, len
;
670 struct map_info
*map
= mtd
->priv
;
671 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
672 int size
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
678 ret
= do_erase_oneblock(mtd
, ofs
);
688 #define DO_XXLOCK_LOCK 1
689 #define DO_XXLOCK_UNLOCK 2
690 static int do_xxlock(struct mtd_info
*mtd
, loff_t adr
, uint32_t len
, int thunk
)
693 struct map_info
*map
= mtd
->priv
;
694 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
695 int chipnum
= adr
>> lpddr
->chipshift
;
696 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
698 mutex_lock(&chip
->mutex
);
699 ret
= get_chip(map
, chip
, FL_LOCKING
);
701 mutex_unlock(&chip
->mutex
);
705 if (thunk
== DO_XXLOCK_LOCK
) {
706 send_pfow_command(map
, LPDDR_LOCK_BLOCK
, adr
, adr
+ len
, NULL
);
707 chip
->state
= FL_LOCKING
;
708 } else if (thunk
== DO_XXLOCK_UNLOCK
) {
709 send_pfow_command(map
, LPDDR_UNLOCK_BLOCK
, adr
, adr
+ len
, NULL
);
710 chip
->state
= FL_UNLOCKING
;
714 ret
= wait_for_ready(map
, chip
, 1);
716 printk(KERN_ERR
"%s: block unlock error status %d \n",
720 out
: put_chip(map
, chip
);
721 mutex_unlock(&chip
->mutex
);
725 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
727 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_LOCK
);
730 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
732 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_UNLOCK
);
735 MODULE_LICENSE("GPL");
736 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
737 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");