2 * LPDDR flash memory device operations. This module provides read, write,
3 * erase, lock/unlock support for LPDDR flash memories
4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
6 * Many thanks to Roman Borisov for initial enabling
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * Implement VPP management
24 * Implement XIP support
25 * Implement OTP support
27 #include <linux/mtd/pfow.h>
28 #include <linux/mtd/qinfo.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
32 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
33 size_t *retlen
, u_char
*buf
);
34 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
,
35 size_t len
, size_t *retlen
, const u_char
*buf
);
36 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
37 unsigned long count
, loff_t to
, size_t *retlen
);
38 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
);
39 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
40 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
41 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
42 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
);
43 static int lpddr_unpoint(struct mtd_info
*mtd
, loff_t adr
, size_t len
);
44 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
);
45 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
);
46 static void put_chip(struct map_info
*map
, struct flchip
*chip
);
48 struct mtd_info
*lpddr_cmdset(struct map_info
*map
)
50 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
51 struct flchip_shared
*shared
;
57 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
61 mtd
->type
= MTD_NORFLASH
;
63 /* Fill in the default mtd operations */
64 mtd
->_read
= lpddr_read
;
65 mtd
->type
= MTD_NORFLASH
;
66 mtd
->flags
= MTD_CAP_NORFLASH
;
67 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
68 mtd
->_erase
= lpddr_erase
;
69 mtd
->_write
= lpddr_write_buffers
;
70 mtd
->_writev
= lpddr_writev
;
71 mtd
->_lock
= lpddr_lock
;
72 mtd
->_unlock
= lpddr_unlock
;
73 if (map_is_linear(map
)) {
74 mtd
->_point
= lpddr_point
;
75 mtd
->_unpoint
= lpddr_unpoint
;
77 mtd
->size
= 1 << lpddr
->qinfo
->DevSizeShift
;
78 mtd
->erasesize
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
79 mtd
->writesize
= 1 << lpddr
->qinfo
->BufSizeShift
;
81 shared
= kmalloc(sizeof(struct flchip_shared
) * lpddr
->numchips
,
89 chip
= &lpddr
->chips
[0];
90 numchips
= lpddr
->numchips
/ lpddr
->qinfo
->HWPartsNum
;
91 for (i
= 0; i
< numchips
; i
++) {
92 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
93 mutex_init(&shared
[i
].lock
);
94 for (j
= 0; j
< lpddr
->qinfo
->HWPartsNum
; j
++) {
95 *chip
= lpddr
->chips
[i
];
96 chip
->start
+= j
<< lpddr
->chipshift
;
97 chip
->oldstate
= chip
->state
= FL_READY
;
98 chip
->priv
= &shared
[i
];
99 /* those should be reset too since
100 they create memory references. */
101 init_waitqueue_head(&chip
->wq
);
102 mutex_init(&chip
->mutex
);
109 EXPORT_SYMBOL(lpddr_cmdset
);
111 static int wait_for_ready(struct map_info
*map
, struct flchip
*chip
,
112 unsigned int chip_op_time
)
114 unsigned int timeo
, reset_timeo
, sleep_time
;
116 flstate_t chip_state
= chip
->state
;
119 /* set our timeout to 8 times the expected delay */
120 timeo
= chip_op_time
* 8;
124 sleep_time
= chip_op_time
/ 2;
127 dsr
= CMDVAL(map_read(map
, map
->pfow_base
+ PFOW_DSR
));
128 if (dsr
& DSR_READY_STATUS
)
131 printk(KERN_ERR
"%s: Flash timeout error state %d \n",
132 map
->name
, chip_state
);
137 /* OK Still waiting. Drop the lock, wait a while and retry. */
138 mutex_unlock(&chip
->mutex
);
139 if (sleep_time
>= 1000000/HZ
) {
141 * Half of the normal delay still remaining
142 * can be performed with a sleeping delay instead
145 msleep(sleep_time
/1000);
147 sleep_time
= 1000000/HZ
;
153 mutex_lock(&chip
->mutex
);
155 while (chip
->state
!= chip_state
) {
156 /* Someone's suspended the operation: sleep */
157 DECLARE_WAITQUEUE(wait
, current
);
158 set_current_state(TASK_UNINTERRUPTIBLE
);
159 add_wait_queue(&chip
->wq
, &wait
);
160 mutex_unlock(&chip
->mutex
);
162 remove_wait_queue(&chip
->wq
, &wait
);
163 mutex_lock(&chip
->mutex
);
165 if (chip
->erase_suspended
|| chip
->write_suspended
) {
166 /* Suspend has occurred while sleep: reset timeout */
168 chip
->erase_suspended
= chip
->write_suspended
= 0;
171 /* check status for errors */
174 map_write(map
, CMD(~(DSR_ERR
)), map
->pfow_base
+ PFOW_DSR
);
175 printk(KERN_WARNING
"%s: Bad status on wait: 0x%x \n",
177 print_drs_error(dsr
);
180 chip
->state
= FL_READY
;
184 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
)
187 DECLARE_WAITQUEUE(wait
, current
);
190 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
)
191 && chip
->state
!= FL_SYNCING
) {
193 * OK. We have possibility for contension on the write/erase
194 * operations which are global to the real chip and not per
195 * partition. So let's fight it over in the partition which
196 * currently has authority on the operation.
198 * The rules are as follows:
200 * - any write operation must own shared->writing.
202 * - any erase operation must own _both_ shared->writing and
205 * - contension arbitration is handled in the owner's context.
207 * The 'shared' struct can be read and/or written only when
210 struct flchip_shared
*shared
= chip
->priv
;
211 struct flchip
*contender
;
212 mutex_lock(&shared
->lock
);
213 contender
= shared
->writing
;
214 if (contender
&& contender
!= chip
) {
216 * The engine to perform desired operation on this
217 * partition is already in use by someone else.
218 * Let's fight over it in the context of the chip
219 * currently using it. If it is possible to suspend,
220 * that other partition will do just that, otherwise
221 * it'll happily send us to sleep. In any case, when
222 * get_chip returns success we're clear to go ahead.
224 ret
= mutex_trylock(&contender
->mutex
);
225 mutex_unlock(&shared
->lock
);
228 mutex_unlock(&chip
->mutex
);
229 ret
= chip_ready(map
, contender
, mode
);
230 mutex_lock(&chip
->mutex
);
232 if (ret
== -EAGAIN
) {
233 mutex_unlock(&contender
->mutex
);
237 mutex_unlock(&contender
->mutex
);
240 mutex_lock(&shared
->lock
);
242 /* We should not own chip if it is already in FL_SYNCING
243 * state. Put contender and retry. */
244 if (chip
->state
== FL_SYNCING
) {
245 put_chip(map
, contender
);
246 mutex_unlock(&contender
->mutex
);
249 mutex_unlock(&contender
->mutex
);
252 /* Check if we have suspended erase on this chip.
253 Must sleep in such a case. */
254 if (mode
== FL_ERASING
&& shared
->erasing
255 && shared
->erasing
->oldstate
== FL_ERASING
) {
256 mutex_unlock(&shared
->lock
);
257 set_current_state(TASK_UNINTERRUPTIBLE
);
258 add_wait_queue(&chip
->wq
, &wait
);
259 mutex_unlock(&chip
->mutex
);
261 remove_wait_queue(&chip
->wq
, &wait
);
262 mutex_lock(&chip
->mutex
);
267 shared
->writing
= chip
;
268 if (mode
== FL_ERASING
)
269 shared
->erasing
= chip
;
270 mutex_unlock(&shared
->lock
);
273 ret
= chip_ready(map
, chip
, mode
);
280 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
)
282 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
284 DECLARE_WAITQUEUE(wait
, current
);
286 /* Prevent setting state FL_SYNCING for chip in suspended state. */
287 if (FL_SYNCING
== mode
&& FL_READY
!= chip
->oldstate
)
290 switch (chip
->state
) {
296 if (!lpddr
->qinfo
->SuspEraseSupp
||
297 !(mode
== FL_READY
|| mode
== FL_POINT
))
300 map_write(map
, CMD(LPDDR_SUSPEND
),
301 map
->pfow_base
+ PFOW_PROGRAM_ERASE_SUSPEND
);
302 chip
->oldstate
= FL_ERASING
;
303 chip
->state
= FL_ERASE_SUSPENDING
;
304 ret
= wait_for_ready(map
, chip
, 0);
306 /* Oops. something got wrong. */
307 /* Resume and pretend we weren't here. */
309 printk(KERN_ERR
"%s: suspend operation failed."
310 "State may be wrong \n", map
->name
);
313 chip
->erase_suspended
= 1;
314 chip
->state
= FL_READY
;
318 /* Only if there's no operation suspended... */
319 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
324 set_current_state(TASK_UNINTERRUPTIBLE
);
325 add_wait_queue(&chip
->wq
, &wait
);
326 mutex_unlock(&chip
->mutex
);
328 remove_wait_queue(&chip
->wq
, &wait
);
329 mutex_lock(&chip
->mutex
);
334 static void put_chip(struct map_info
*map
, struct flchip
*chip
)
337 struct flchip_shared
*shared
= chip
->priv
;
338 mutex_lock(&shared
->lock
);
339 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
340 /* We own the ability to write, but we're done */
341 shared
->writing
= shared
->erasing
;
342 if (shared
->writing
&& shared
->writing
!= chip
) {
343 /* give back the ownership */
344 struct flchip
*loaner
= shared
->writing
;
345 mutex_lock(&loaner
->mutex
);
346 mutex_unlock(&shared
->lock
);
347 mutex_unlock(&chip
->mutex
);
348 put_chip(map
, loaner
);
349 mutex_lock(&chip
->mutex
);
350 mutex_unlock(&loaner
->mutex
);
354 shared
->erasing
= NULL
;
355 shared
->writing
= NULL
;
356 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
358 * We own the ability to erase without the ability
359 * to write, which means the erase was suspended
360 * and some other partition is currently writing.
361 * Don't let the switch below mess things up since
362 * we don't have ownership to resume anything.
364 mutex_unlock(&shared
->lock
);
368 mutex_unlock(&shared
->lock
);
371 switch (chip
->oldstate
) {
373 map_write(map
, CMD(LPDDR_RESUME
),
374 map
->pfow_base
+ PFOW_COMMAND_CODE
);
375 map_write(map
, CMD(LPDDR_START_EXECUTION
),
376 map
->pfow_base
+ PFOW_COMMAND_EXECUTE
);
377 chip
->oldstate
= FL_READY
;
378 chip
->state
= FL_ERASING
;
383 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!\n",
384 map
->name
, chip
->oldstate
);
389 static int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
390 unsigned long adr
, const struct kvec
**pvec
,
391 unsigned long *pvec_seek
, int len
)
393 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
395 int ret
, wbufsize
, word_gap
, words
;
396 const struct kvec
*vec
;
397 unsigned long vec_seek
;
398 unsigned long prog_buf_ofs
;
400 wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
402 mutex_lock(&chip
->mutex
);
403 ret
= get_chip(map
, chip
, FL_WRITING
);
405 mutex_unlock(&chip
->mutex
);
408 /* Figure out the number of words to write */
409 word_gap
= (-adr
& (map_bankwidth(map
)-1));
410 words
= (len
- word_gap
+ map_bankwidth(map
) - 1) / map_bankwidth(map
);
414 word_gap
= map_bankwidth(map
) - word_gap
;
416 datum
= map_word_ff(map
);
419 /* Get the program buffer offset from PFOW register data first*/
420 prog_buf_ofs
= map
->pfow_base
+ CMDVAL(map_read(map
,
421 map
->pfow_base
+ PFOW_PROGRAM_BUFFER_OFFSET
));
423 vec_seek
= *pvec_seek
;
425 int n
= map_bankwidth(map
) - word_gap
;
427 if (n
> vec
->iov_len
- vec_seek
)
428 n
= vec
->iov_len
- vec_seek
;
432 if (!word_gap
&& (len
< map_bankwidth(map
)))
433 datum
= map_word_ff(map
);
435 datum
= map_word_load_partial(map
, datum
,
436 vec
->iov_base
+ vec_seek
, word_gap
, n
);
440 if (!len
|| word_gap
== map_bankwidth(map
)) {
441 map_write(map
, datum
, prog_buf_ofs
);
442 prog_buf_ofs
+= map_bankwidth(map
);
447 if (vec_seek
== vec
->iov_len
) {
453 *pvec_seek
= vec_seek
;
456 send_pfow_command(map
, LPDDR_BUFF_PROGRAM
, adr
, wbufsize
, NULL
);
457 chip
->state
= FL_WRITING
;
458 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->ProgBufferTime
));
460 printk(KERN_WARNING
"%s Buffer program error: %d at %lx; \n",
461 map
->name
, ret
, adr
);
465 out
: put_chip(map
, chip
);
466 mutex_unlock(&chip
->mutex
);
470 static int do_erase_oneblock(struct mtd_info
*mtd
, loff_t adr
)
472 struct map_info
*map
= mtd
->priv
;
473 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
474 int chipnum
= adr
>> lpddr
->chipshift
;
475 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
478 mutex_lock(&chip
->mutex
);
479 ret
= get_chip(map
, chip
, FL_ERASING
);
481 mutex_unlock(&chip
->mutex
);
484 send_pfow_command(map
, LPDDR_BLOCK_ERASE
, adr
, 0, NULL
);
485 chip
->state
= FL_ERASING
;
486 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->BlockEraseTime
)*1000);
488 printk(KERN_WARNING
"%s Erase block error %d at : %llx\n",
489 map
->name
, ret
, adr
);
492 out
: put_chip(map
, chip
);
493 mutex_unlock(&chip
->mutex
);
497 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
498 size_t *retlen
, u_char
*buf
)
500 struct map_info
*map
= mtd
->priv
;
501 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
502 int chipnum
= adr
>> lpddr
->chipshift
;
503 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
506 mutex_lock(&chip
->mutex
);
507 ret
= get_chip(map
, chip
, FL_READY
);
509 mutex_unlock(&chip
->mutex
);
513 map_copy_from(map
, buf
, adr
, len
);
517 mutex_unlock(&chip
->mutex
);
521 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
522 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
)
524 struct map_info
*map
= mtd
->priv
;
525 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
526 int chipnum
= adr
>> lpddr
->chipshift
;
527 unsigned long ofs
, last_end
= 0;
528 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
534 /* ofs: offset within the first chip that the first read should start */
535 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
536 *mtdbuf
= (void *)map
->virt
+ chip
->start
+ ofs
;
539 unsigned long thislen
;
541 if (chipnum
>= lpddr
->numchips
)
544 /* We cannot point across chips that are virtually disjoint */
546 last_end
= chip
->start
;
547 else if (chip
->start
!= last_end
)
550 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
551 thislen
= (1<<lpddr
->chipshift
) - ofs
;
555 mutex_lock(&chip
->mutex
);
556 ret
= get_chip(map
, chip
, FL_POINT
);
557 mutex_unlock(&chip
->mutex
);
561 chip
->state
= FL_POINT
;
562 chip
->ref_point_counter
++;
567 last_end
+= 1 << lpddr
->chipshift
;
569 chip
= &lpddr
->chips
[chipnum
];
574 static int lpddr_unpoint (struct mtd_info
*mtd
, loff_t adr
, size_t len
)
576 struct map_info
*map
= mtd
->priv
;
577 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
578 int chipnum
= adr
>> lpddr
->chipshift
, err
= 0;
581 /* ofs: offset within the first chip that the first read should start */
582 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
585 unsigned long thislen
;
588 chip
= &lpddr
->chips
[chipnum
];
589 if (chipnum
>= lpddr
->numchips
)
592 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
593 thislen
= (1<<lpddr
->chipshift
) - ofs
;
597 mutex_lock(&chip
->mutex
);
598 if (chip
->state
== FL_POINT
) {
599 chip
->ref_point_counter
--;
600 if (chip
->ref_point_counter
== 0)
601 chip
->state
= FL_READY
;
603 printk(KERN_WARNING
"%s: Warning: unpoint called on non"
604 "pointed region\n", map
->name
);
609 mutex_unlock(&chip
->mutex
);
619 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
, size_t len
,
620 size_t *retlen
, const u_char
*buf
)
624 vec
.iov_base
= (void *) buf
;
627 return lpddr_writev(mtd
, &vec
, 1, to
, retlen
);
631 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
632 unsigned long count
, loff_t to
, size_t *retlen
)
634 struct map_info
*map
= mtd
->priv
;
635 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
638 unsigned long ofs
, vec_seek
, i
;
639 int wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
642 for (i
= 0; i
< count
; i
++)
643 len
+= vecs
[i
].iov_len
;
648 chipnum
= to
>> lpddr
->chipshift
;
654 /* We must not cross write block boundaries */
655 int size
= wbufsize
- (ofs
& (wbufsize
-1));
660 ret
= do_write_buffer(map
, &lpddr
->chips
[chipnum
],
661 ofs
, &vecs
, &vec_seek
, size
);
669 /* Be nice and reschedule with the chip in a usable
670 * state for other processes */
678 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
680 unsigned long ofs
, len
;
682 struct map_info
*map
= mtd
->priv
;
683 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
684 int size
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
690 ret
= do_erase_oneblock(mtd
, ofs
);
696 instr
->state
= MTD_ERASE_DONE
;
697 mtd_erase_callback(instr
);
702 #define DO_XXLOCK_LOCK 1
703 #define DO_XXLOCK_UNLOCK 2
704 static int do_xxlock(struct mtd_info
*mtd
, loff_t adr
, uint32_t len
, int thunk
)
707 struct map_info
*map
= mtd
->priv
;
708 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
709 int chipnum
= adr
>> lpddr
->chipshift
;
710 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
712 mutex_lock(&chip
->mutex
);
713 ret
= get_chip(map
, chip
, FL_LOCKING
);
715 mutex_unlock(&chip
->mutex
);
719 if (thunk
== DO_XXLOCK_LOCK
) {
720 send_pfow_command(map
, LPDDR_LOCK_BLOCK
, adr
, adr
+ len
, NULL
);
721 chip
->state
= FL_LOCKING
;
722 } else if (thunk
== DO_XXLOCK_UNLOCK
) {
723 send_pfow_command(map
, LPDDR_UNLOCK_BLOCK
, adr
, adr
+ len
, NULL
);
724 chip
->state
= FL_UNLOCKING
;
728 ret
= wait_for_ready(map
, chip
, 1);
730 printk(KERN_ERR
"%s: block unlock error status %d \n",
734 out
: put_chip(map
, chip
);
735 mutex_unlock(&chip
->mutex
);
739 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
741 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_LOCK
);
744 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
746 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_UNLOCK
);
749 MODULE_LICENSE("GPL");
750 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
751 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");