2 * LPDDR flash memory device operations. This module provides read, write,
3 * erase, lock/unlock support for LPDDR flash memories
4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
6 * Many thanks to Roman Borisov for initial enabling
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * Implement VPP management
24 * Implement XIP support
25 * Implement OTP support
27 #include <linux/mtd/pfow.h>
28 #include <linux/mtd/qinfo.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
32 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
33 size_t *retlen
, u_char
*buf
);
34 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
,
35 size_t len
, size_t *retlen
, const u_char
*buf
);
36 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
37 unsigned long count
, loff_t to
, size_t *retlen
);
38 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
);
39 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
40 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
41 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
42 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
);
43 static int lpddr_unpoint(struct mtd_info
*mtd
, loff_t adr
, size_t len
);
44 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
);
45 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
);
46 static void put_chip(struct map_info
*map
, struct flchip
*chip
);
48 struct mtd_info
*lpddr_cmdset(struct map_info
*map
)
50 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
51 struct flchip_shared
*shared
;
57 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
61 mtd
->type
= MTD_NORFLASH
;
63 /* Fill in the default mtd operations */
64 mtd
->_read
= lpddr_read
;
65 mtd
->type
= MTD_NORFLASH
;
66 mtd
->flags
= MTD_CAP_NORFLASH
;
67 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
68 mtd
->_erase
= lpddr_erase
;
69 mtd
->_write
= lpddr_write_buffers
;
70 mtd
->_writev
= lpddr_writev
;
71 mtd
->_lock
= lpddr_lock
;
72 mtd
->_unlock
= lpddr_unlock
;
73 if (map_is_linear(map
)) {
74 mtd
->_point
= lpddr_point
;
75 mtd
->_unpoint
= lpddr_unpoint
;
77 mtd
->size
= 1 << lpddr
->qinfo
->DevSizeShift
;
78 mtd
->erasesize
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
79 mtd
->writesize
= 1 << lpddr
->qinfo
->BufSizeShift
;
81 shared
= kmalloc_array(lpddr
->numchips
, sizeof(struct flchip_shared
),
88 chip
= &lpddr
->chips
[0];
89 numchips
= lpddr
->numchips
/ lpddr
->qinfo
->HWPartsNum
;
90 for (i
= 0; i
< numchips
; i
++) {
91 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
92 mutex_init(&shared
[i
].lock
);
93 for (j
= 0; j
< lpddr
->qinfo
->HWPartsNum
; j
++) {
94 *chip
= lpddr
->chips
[i
];
95 chip
->start
+= j
<< lpddr
->chipshift
;
96 chip
->oldstate
= chip
->state
= FL_READY
;
97 chip
->priv
= &shared
[i
];
98 /* those should be reset too since
99 they create memory references. */
100 init_waitqueue_head(&chip
->wq
);
101 mutex_init(&chip
->mutex
);
108 EXPORT_SYMBOL(lpddr_cmdset
);
110 static int wait_for_ready(struct map_info
*map
, struct flchip
*chip
,
111 unsigned int chip_op_time
)
113 unsigned int timeo
, reset_timeo
, sleep_time
;
115 flstate_t chip_state
= chip
->state
;
118 /* set our timeout to 8 times the expected delay */
119 timeo
= chip_op_time
* 8;
123 sleep_time
= chip_op_time
/ 2;
126 dsr
= CMDVAL(map_read(map
, map
->pfow_base
+ PFOW_DSR
));
127 if (dsr
& DSR_READY_STATUS
)
130 printk(KERN_ERR
"%s: Flash timeout error state %d \n",
131 map
->name
, chip_state
);
136 /* OK Still waiting. Drop the lock, wait a while and retry. */
137 mutex_unlock(&chip
->mutex
);
138 if (sleep_time
>= 1000000/HZ
) {
140 * Half of the normal delay still remaining
141 * can be performed with a sleeping delay instead
144 msleep(sleep_time
/1000);
146 sleep_time
= 1000000/HZ
;
152 mutex_lock(&chip
->mutex
);
154 while (chip
->state
!= chip_state
) {
155 /* Someone's suspended the operation: sleep */
156 DECLARE_WAITQUEUE(wait
, current
);
157 set_current_state(TASK_UNINTERRUPTIBLE
);
158 add_wait_queue(&chip
->wq
, &wait
);
159 mutex_unlock(&chip
->mutex
);
161 remove_wait_queue(&chip
->wq
, &wait
);
162 mutex_lock(&chip
->mutex
);
164 if (chip
->erase_suspended
|| chip
->write_suspended
) {
165 /* Suspend has occurred while sleep: reset timeout */
167 chip
->erase_suspended
= chip
->write_suspended
= 0;
170 /* check status for errors */
173 map_write(map
, CMD(~(DSR_ERR
)), map
->pfow_base
+ PFOW_DSR
);
174 printk(KERN_WARNING
"%s: Bad status on wait: 0x%x \n",
176 print_drs_error(dsr
);
179 chip
->state
= FL_READY
;
183 static int get_chip(struct map_info
*map
, struct flchip
*chip
, int mode
)
186 DECLARE_WAITQUEUE(wait
, current
);
189 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
)
190 && chip
->state
!= FL_SYNCING
) {
192 * OK. We have possibility for contension on the write/erase
193 * operations which are global to the real chip and not per
194 * partition. So let's fight it over in the partition which
195 * currently has authority on the operation.
197 * The rules are as follows:
199 * - any write operation must own shared->writing.
201 * - any erase operation must own _both_ shared->writing and
204 * - contension arbitration is handled in the owner's context.
206 * The 'shared' struct can be read and/or written only when
209 struct flchip_shared
*shared
= chip
->priv
;
210 struct flchip
*contender
;
211 mutex_lock(&shared
->lock
);
212 contender
= shared
->writing
;
213 if (contender
&& contender
!= chip
) {
215 * The engine to perform desired operation on this
216 * partition is already in use by someone else.
217 * Let's fight over it in the context of the chip
218 * currently using it. If it is possible to suspend,
219 * that other partition will do just that, otherwise
220 * it'll happily send us to sleep. In any case, when
221 * get_chip returns success we're clear to go ahead.
223 ret
= mutex_trylock(&contender
->mutex
);
224 mutex_unlock(&shared
->lock
);
227 mutex_unlock(&chip
->mutex
);
228 ret
= chip_ready(map
, contender
, mode
);
229 mutex_lock(&chip
->mutex
);
231 if (ret
== -EAGAIN
) {
232 mutex_unlock(&contender
->mutex
);
236 mutex_unlock(&contender
->mutex
);
239 mutex_lock(&shared
->lock
);
241 /* We should not own chip if it is already in FL_SYNCING
242 * state. Put contender and retry. */
243 if (chip
->state
== FL_SYNCING
) {
244 put_chip(map
, contender
);
245 mutex_unlock(&contender
->mutex
);
248 mutex_unlock(&contender
->mutex
);
251 /* Check if we have suspended erase on this chip.
252 Must sleep in such a case. */
253 if (mode
== FL_ERASING
&& shared
->erasing
254 && shared
->erasing
->oldstate
== FL_ERASING
) {
255 mutex_unlock(&shared
->lock
);
256 set_current_state(TASK_UNINTERRUPTIBLE
);
257 add_wait_queue(&chip
->wq
, &wait
);
258 mutex_unlock(&chip
->mutex
);
260 remove_wait_queue(&chip
->wq
, &wait
);
261 mutex_lock(&chip
->mutex
);
266 shared
->writing
= chip
;
267 if (mode
== FL_ERASING
)
268 shared
->erasing
= chip
;
269 mutex_unlock(&shared
->lock
);
272 ret
= chip_ready(map
, chip
, mode
);
279 static int chip_ready(struct map_info
*map
, struct flchip
*chip
, int mode
)
281 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
283 DECLARE_WAITQUEUE(wait
, current
);
285 /* Prevent setting state FL_SYNCING for chip in suspended state. */
286 if (FL_SYNCING
== mode
&& FL_READY
!= chip
->oldstate
)
289 switch (chip
->state
) {
295 if (!lpddr
->qinfo
->SuspEraseSupp
||
296 !(mode
== FL_READY
|| mode
== FL_POINT
))
299 map_write(map
, CMD(LPDDR_SUSPEND
),
300 map
->pfow_base
+ PFOW_PROGRAM_ERASE_SUSPEND
);
301 chip
->oldstate
= FL_ERASING
;
302 chip
->state
= FL_ERASE_SUSPENDING
;
303 ret
= wait_for_ready(map
, chip
, 0);
305 /* Oops. something got wrong. */
306 /* Resume and pretend we weren't here. */
308 printk(KERN_ERR
"%s: suspend operation failed."
309 "State may be wrong \n", map
->name
);
312 chip
->erase_suspended
= 1;
313 chip
->state
= FL_READY
;
317 /* Only if there's no operation suspended... */
318 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
323 set_current_state(TASK_UNINTERRUPTIBLE
);
324 add_wait_queue(&chip
->wq
, &wait
);
325 mutex_unlock(&chip
->mutex
);
327 remove_wait_queue(&chip
->wq
, &wait
);
328 mutex_lock(&chip
->mutex
);
333 static void put_chip(struct map_info
*map
, struct flchip
*chip
)
336 struct flchip_shared
*shared
= chip
->priv
;
337 mutex_lock(&shared
->lock
);
338 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
339 /* We own the ability to write, but we're done */
340 shared
->writing
= shared
->erasing
;
341 if (shared
->writing
&& shared
->writing
!= chip
) {
342 /* give back the ownership */
343 struct flchip
*loaner
= shared
->writing
;
344 mutex_lock(&loaner
->mutex
);
345 mutex_unlock(&shared
->lock
);
346 mutex_unlock(&chip
->mutex
);
347 put_chip(map
, loaner
);
348 mutex_lock(&chip
->mutex
);
349 mutex_unlock(&loaner
->mutex
);
353 shared
->erasing
= NULL
;
354 shared
->writing
= NULL
;
355 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
357 * We own the ability to erase without the ability
358 * to write, which means the erase was suspended
359 * and some other partition is currently writing.
360 * Don't let the switch below mess things up since
361 * we don't have ownership to resume anything.
363 mutex_unlock(&shared
->lock
);
367 mutex_unlock(&shared
->lock
);
370 switch (chip
->oldstate
) {
372 map_write(map
, CMD(LPDDR_RESUME
),
373 map
->pfow_base
+ PFOW_COMMAND_CODE
);
374 map_write(map
, CMD(LPDDR_START_EXECUTION
),
375 map
->pfow_base
+ PFOW_COMMAND_EXECUTE
);
376 chip
->oldstate
= FL_READY
;
377 chip
->state
= FL_ERASING
;
382 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!\n",
383 map
->name
, chip
->oldstate
);
388 static int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
389 unsigned long adr
, const struct kvec
**pvec
,
390 unsigned long *pvec_seek
, int len
)
392 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
394 int ret
, wbufsize
, word_gap
, words
;
395 const struct kvec
*vec
;
396 unsigned long vec_seek
;
397 unsigned long prog_buf_ofs
;
399 wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
401 mutex_lock(&chip
->mutex
);
402 ret
= get_chip(map
, chip
, FL_WRITING
);
404 mutex_unlock(&chip
->mutex
);
407 /* Figure out the number of words to write */
408 word_gap
= (-adr
& (map_bankwidth(map
)-1));
409 words
= (len
- word_gap
+ map_bankwidth(map
) - 1) / map_bankwidth(map
);
413 word_gap
= map_bankwidth(map
) - word_gap
;
415 datum
= map_word_ff(map
);
418 /* Get the program buffer offset from PFOW register data first*/
419 prog_buf_ofs
= map
->pfow_base
+ CMDVAL(map_read(map
,
420 map
->pfow_base
+ PFOW_PROGRAM_BUFFER_OFFSET
));
422 vec_seek
= *pvec_seek
;
424 int n
= map_bankwidth(map
) - word_gap
;
426 if (n
> vec
->iov_len
- vec_seek
)
427 n
= vec
->iov_len
- vec_seek
;
431 if (!word_gap
&& (len
< map_bankwidth(map
)))
432 datum
= map_word_ff(map
);
434 datum
= map_word_load_partial(map
, datum
,
435 vec
->iov_base
+ vec_seek
, word_gap
, n
);
439 if (!len
|| word_gap
== map_bankwidth(map
)) {
440 map_write(map
, datum
, prog_buf_ofs
);
441 prog_buf_ofs
+= map_bankwidth(map
);
446 if (vec_seek
== vec
->iov_len
) {
452 *pvec_seek
= vec_seek
;
455 send_pfow_command(map
, LPDDR_BUFF_PROGRAM
, adr
, wbufsize
, NULL
);
456 chip
->state
= FL_WRITING
;
457 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->ProgBufferTime
));
459 printk(KERN_WARNING
"%s Buffer program error: %d at %lx; \n",
460 map
->name
, ret
, adr
);
464 out
: put_chip(map
, chip
);
465 mutex_unlock(&chip
->mutex
);
469 static int do_erase_oneblock(struct mtd_info
*mtd
, loff_t adr
)
471 struct map_info
*map
= mtd
->priv
;
472 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
473 int chipnum
= adr
>> lpddr
->chipshift
;
474 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
477 mutex_lock(&chip
->mutex
);
478 ret
= get_chip(map
, chip
, FL_ERASING
);
480 mutex_unlock(&chip
->mutex
);
483 send_pfow_command(map
, LPDDR_BLOCK_ERASE
, adr
, 0, NULL
);
484 chip
->state
= FL_ERASING
;
485 ret
= wait_for_ready(map
, chip
, (1<<lpddr
->qinfo
->BlockEraseTime
)*1000);
487 printk(KERN_WARNING
"%s Erase block error %d at : %llx\n",
488 map
->name
, ret
, adr
);
491 out
: put_chip(map
, chip
);
492 mutex_unlock(&chip
->mutex
);
496 static int lpddr_read(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
497 size_t *retlen
, u_char
*buf
)
499 struct map_info
*map
= mtd
->priv
;
500 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
501 int chipnum
= adr
>> lpddr
->chipshift
;
502 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
505 mutex_lock(&chip
->mutex
);
506 ret
= get_chip(map
, chip
, FL_READY
);
508 mutex_unlock(&chip
->mutex
);
512 map_copy_from(map
, buf
, adr
, len
);
516 mutex_unlock(&chip
->mutex
);
520 static int lpddr_point(struct mtd_info
*mtd
, loff_t adr
, size_t len
,
521 size_t *retlen
, void **mtdbuf
, resource_size_t
*phys
)
523 struct map_info
*map
= mtd
->priv
;
524 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
525 int chipnum
= adr
>> lpddr
->chipshift
;
526 unsigned long ofs
, last_end
= 0;
527 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
533 /* ofs: offset within the first chip that the first read should start */
534 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
535 *mtdbuf
= (void *)map
->virt
+ chip
->start
+ ofs
;
538 unsigned long thislen
;
540 if (chipnum
>= lpddr
->numchips
)
543 /* We cannot point across chips that are virtually disjoint */
545 last_end
= chip
->start
;
546 else if (chip
->start
!= last_end
)
549 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
550 thislen
= (1<<lpddr
->chipshift
) - ofs
;
554 mutex_lock(&chip
->mutex
);
555 ret
= get_chip(map
, chip
, FL_POINT
);
556 mutex_unlock(&chip
->mutex
);
560 chip
->state
= FL_POINT
;
561 chip
->ref_point_counter
++;
566 last_end
+= 1 << lpddr
->chipshift
;
568 chip
= &lpddr
->chips
[chipnum
];
573 static int lpddr_unpoint (struct mtd_info
*mtd
, loff_t adr
, size_t len
)
575 struct map_info
*map
= mtd
->priv
;
576 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
577 int chipnum
= adr
>> lpddr
->chipshift
, err
= 0;
580 /* ofs: offset within the first chip that the first read should start */
581 ofs
= adr
- (chipnum
<< lpddr
->chipshift
);
584 unsigned long thislen
;
587 chip
= &lpddr
->chips
[chipnum
];
588 if (chipnum
>= lpddr
->numchips
)
591 if ((len
+ ofs
- 1) >> lpddr
->chipshift
)
592 thislen
= (1<<lpddr
->chipshift
) - ofs
;
596 mutex_lock(&chip
->mutex
);
597 if (chip
->state
== FL_POINT
) {
598 chip
->ref_point_counter
--;
599 if (chip
->ref_point_counter
== 0)
600 chip
->state
= FL_READY
;
602 printk(KERN_WARNING
"%s: Warning: unpoint called on non"
603 "pointed region\n", map
->name
);
608 mutex_unlock(&chip
->mutex
);
618 static int lpddr_write_buffers(struct mtd_info
*mtd
, loff_t to
, size_t len
,
619 size_t *retlen
, const u_char
*buf
)
623 vec
.iov_base
= (void *) buf
;
626 return lpddr_writev(mtd
, &vec
, 1, to
, retlen
);
630 static int lpddr_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
631 unsigned long count
, loff_t to
, size_t *retlen
)
633 struct map_info
*map
= mtd
->priv
;
634 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
637 unsigned long ofs
, vec_seek
, i
;
638 int wbufsize
= 1 << lpddr
->qinfo
->BufSizeShift
;
641 for (i
= 0; i
< count
; i
++)
642 len
+= vecs
[i
].iov_len
;
647 chipnum
= to
>> lpddr
->chipshift
;
653 /* We must not cross write block boundaries */
654 int size
= wbufsize
- (ofs
& (wbufsize
-1));
659 ret
= do_write_buffer(map
, &lpddr
->chips
[chipnum
],
660 ofs
, &vecs
, &vec_seek
, size
);
668 /* Be nice and reschedule with the chip in a usable
669 * state for other processes */
677 static int lpddr_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
679 unsigned long ofs
, len
;
681 struct map_info
*map
= mtd
->priv
;
682 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
683 int size
= 1 << lpddr
->qinfo
->UniformBlockSizeShift
;
689 ret
= do_erase_oneblock(mtd
, ofs
);
699 #define DO_XXLOCK_LOCK 1
700 #define DO_XXLOCK_UNLOCK 2
701 static int do_xxlock(struct mtd_info
*mtd
, loff_t adr
, uint32_t len
, int thunk
)
704 struct map_info
*map
= mtd
->priv
;
705 struct lpddr_private
*lpddr
= map
->fldrv_priv
;
706 int chipnum
= adr
>> lpddr
->chipshift
;
707 struct flchip
*chip
= &lpddr
->chips
[chipnum
];
709 mutex_lock(&chip
->mutex
);
710 ret
= get_chip(map
, chip
, FL_LOCKING
);
712 mutex_unlock(&chip
->mutex
);
716 if (thunk
== DO_XXLOCK_LOCK
) {
717 send_pfow_command(map
, LPDDR_LOCK_BLOCK
, adr
, adr
+ len
, NULL
);
718 chip
->state
= FL_LOCKING
;
719 } else if (thunk
== DO_XXLOCK_UNLOCK
) {
720 send_pfow_command(map
, LPDDR_UNLOCK_BLOCK
, adr
, adr
+ len
, NULL
);
721 chip
->state
= FL_UNLOCKING
;
725 ret
= wait_for_ready(map
, chip
, 1);
727 printk(KERN_ERR
"%s: block unlock error status %d \n",
731 out
: put_chip(map
, chip
);
732 mutex_unlock(&chip
->mutex
);
736 static int lpddr_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
738 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_LOCK
);
741 static int lpddr_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
743 return do_xxlock(mtd
, ofs
, len
, DO_XXLOCK_UNLOCK
);
746 MODULE_LICENSE("GPL");
747 MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
748 MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");