2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0020.c,v 1.17 2004/11/20 12:49:04 dwmw2 Exp $
9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
16 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
17 * (command set 0x0020)
18 * - added a writev function
21 #include <linux/version.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/cfi.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
40 static int cfi_staa_read(struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
41 static int cfi_staa_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
42 static int cfi_staa_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
43 unsigned long count
, loff_t to
, size_t *retlen
);
44 static int cfi_staa_erase_varsize(struct mtd_info
*, struct erase_info
*);
45 static void cfi_staa_sync (struct mtd_info
*);
46 static int cfi_staa_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
47 static int cfi_staa_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
48 static int cfi_staa_suspend (struct mtd_info
*);
49 static void cfi_staa_resume (struct mtd_info
*);
51 static void cfi_staa_destroy(struct mtd_info
*);
53 struct mtd_info
*cfi_cmdset_0020(struct map_info
*, int);
55 static struct mtd_info
*cfi_staa_setup (struct map_info
*);
57 static struct mtd_chip_driver cfi_staa_chipdrv
= {
58 .probe
= NULL
, /* Not usable directly */
59 .destroy
= cfi_staa_destroy
,
60 .name
= "cfi_cmdset_0020",
64 /* #define DEBUG_LOCK_BITS */
65 //#define DEBUG_CFI_FEATURES
67 #ifdef DEBUG_CFI_FEATURES
68 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
71 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
72 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
73 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
74 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
75 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
76 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
77 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
78 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
79 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
80 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
81 for (i
=9; i
<32; i
++) {
82 if (extp
->FeatureSupport
& (1<<i
))
83 printk(" - Unknown Bit %X: supported\n", i
);
86 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
87 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
89 if (extp
->SuspendCmdSupport
& (1<<i
))
90 printk(" - Unknown Bit %X: supported\n", i
);
93 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
94 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
95 printk(" - Valid Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
96 for (i
=2; i
<16; i
++) {
97 if (extp
->BlkStatusRegMask
& (1<<i
))
98 printk(" - Unknown Bit %X Active: yes\n",i
);
101 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp
->VccOptimal
>> 8, extp
->VccOptimal
& 0xf);
103 if (extp
->VppOptimal
)
104 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
105 extp
->VppOptimal
>> 8, extp
->VppOptimal
& 0xf);
109 /* This routine is made available to other mtd code via
110 * inter_module_register. It must only be accessed through
111 * inter_module_get which will bump the use count of this module. The
112 * addresses passed back in cfi are valid as long as the use count of
113 * this module is non-zero, i.e. between inter_module_get and
114 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116 struct mtd_info
*cfi_cmdset_0020(struct map_info
*map
, int primary
)
118 struct cfi_private
*cfi
= map
->fldrv_priv
;
123 * It's a real CFI chip, not one for which the probe
124 * routine faked a CFI structure. So we read the feature
127 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
128 struct cfi_pri_intelext
*extp
;
130 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, sizeof(*extp
), "ST Microelectronics");
134 /* Do some byteswapping if necessary */
135 extp
->FeatureSupport
= cfi32_to_cpu(extp
->FeatureSupport
);
136 extp
->BlkStatusRegMask
= cfi32_to_cpu(extp
->BlkStatusRegMask
);
138 #ifdef DEBUG_CFI_FEATURES
139 /* Tell the user about it in lots of lovely detail */
140 cfi_tell_features(extp
);
143 /* Install our own private info structure */
144 cfi
->cmdset_priv
= extp
;
147 for (i
=0; i
< cfi
->numchips
; i
++) {
148 cfi
->chips
[i
].word_write_time
= 128;
149 cfi
->chips
[i
].buffer_write_time
= 128;
150 cfi
->chips
[i
].erase_time
= 1024;
153 return cfi_staa_setup(map
);
156 static struct mtd_info
*cfi_staa_setup(struct map_info
*map
)
158 struct cfi_private
*cfi
= map
->fldrv_priv
;
159 struct mtd_info
*mtd
;
160 unsigned long offset
= 0;
162 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
164 mtd
= kmalloc(sizeof(*mtd
), GFP_KERNEL
);
165 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
168 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
169 kfree(cfi
->cmdset_priv
);
173 memset(mtd
, 0, sizeof(*mtd
));
175 mtd
->type
= MTD_NORFLASH
;
176 mtd
->size
= devsize
* cfi
->numchips
;
178 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
179 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
180 * mtd
->numeraseregions
, GFP_KERNEL
);
181 if (!mtd
->eraseregions
) {
182 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
183 kfree(cfi
->cmdset_priv
);
188 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
189 unsigned long ernum
, ersize
;
190 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
191 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
193 if (mtd
->erasesize
< ersize
) {
194 mtd
->erasesize
= ersize
;
196 for (j
=0; j
<cfi
->numchips
; j
++) {
197 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
198 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
199 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
201 offset
+= (ersize
* ernum
);
204 if (offset
!= devsize
) {
206 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
207 kfree(mtd
->eraseregions
);
208 kfree(cfi
->cmdset_priv
);
213 for (i
=0; i
<mtd
->numeraseregions
;i
++){
214 printk(KERN_DEBUG
"%d: offset=0x%x,size=0x%x,blocks=%d\n",
215 i
,mtd
->eraseregions
[i
].offset
,
216 mtd
->eraseregions
[i
].erasesize
,
217 mtd
->eraseregions
[i
].numblocks
);
220 /* Also select the correct geometry setup too */
221 mtd
->erase
= cfi_staa_erase_varsize
;
222 mtd
->read
= cfi_staa_read
;
223 mtd
->write
= cfi_staa_write_buffers
;
224 mtd
->writev
= cfi_staa_writev
;
225 mtd
->sync
= cfi_staa_sync
;
226 mtd
->lock
= cfi_staa_lock
;
227 mtd
->unlock
= cfi_staa_unlock
;
228 mtd
->suspend
= cfi_staa_suspend
;
229 mtd
->resume
= cfi_staa_resume
;
230 mtd
->flags
= MTD_CAP_NORFLASH
;
231 mtd
->flags
|= MTD_ECC
; /* FIXME: Not all STMicro flashes have this */
232 mtd
->eccsize
= 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
233 map
->fldrv
= &cfi_staa_chipdrv
;
234 __module_get(THIS_MODULE
);
235 mtd
->name
= map
->name
;
240 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
242 map_word status
, status_OK
;
244 DECLARE_WAITQUEUE(wait
, current
);
246 unsigned long cmd_addr
;
247 struct cfi_private
*cfi
= map
->fldrv_priv
;
251 /* Ensure cmd read/writes are aligned. */
252 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
254 /* Let's determine this according to the interleave only once */
255 status_OK
= CMD(0x80);
257 timeo
= jiffies
+ HZ
;
259 spin_lock_bh(chip
->mutex
);
261 /* Check that the chip's ready to talk to us.
262 * If it's in FL_ERASING state, suspend it and make it talk now.
264 switch (chip
->state
) {
266 if (!(((struct cfi_pri_intelext
*)cfi
->cmdset_priv
)->FeatureSupport
& 2))
267 goto sleep
; /* We don't support erase suspend */
269 map_write (map
, CMD(0xb0), cmd_addr
);
270 /* If the flash has finished erasing, then 'erase suspend'
271 * appears to make some (28F320) flash devices switch to
272 * 'read' mode. Make sure that we switch to 'read status'
273 * mode so we get the right data. --rmk
275 map_write(map
, CMD(0x70), cmd_addr
);
276 chip
->oldstate
= FL_ERASING
;
277 chip
->state
= FL_ERASE_SUSPENDING
;
278 // printk("Erase suspending at 0x%lx\n", cmd_addr);
280 status
= map_read(map
, cmd_addr
);
281 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
284 if (time_after(jiffies
, timeo
)) {
286 map_write(map
, CMD(0xd0), cmd_addr
);
287 /* make sure we're in 'read status' mode */
288 map_write(map
, CMD(0x70), cmd_addr
);
289 chip
->state
= FL_ERASING
;
290 spin_unlock_bh(chip
->mutex
);
291 printk(KERN_ERR
"Chip not ready after erase "
292 "suspended: status = 0x%lx\n", status
.x
[0]);
296 spin_unlock_bh(chip
->mutex
);
298 spin_lock_bh(chip
->mutex
);
302 map_write(map
, CMD(0xff), cmd_addr
);
303 chip
->state
= FL_READY
;
316 map_write(map
, CMD(0x70), cmd_addr
);
317 chip
->state
= FL_STATUS
;
320 status
= map_read(map
, cmd_addr
);
321 if (map_word_andequal(map
, status
, status_OK
, status_OK
)) {
322 map_write(map
, CMD(0xff), cmd_addr
);
323 chip
->state
= FL_READY
;
327 /* Urgh. Chip not yet ready to talk to us. */
328 if (time_after(jiffies
, timeo
)) {
329 spin_unlock_bh(chip
->mutex
);
330 printk(KERN_ERR
"waiting for chip to be ready timed out in read. WSM status = %lx\n", status
.x
[0]);
334 /* Latency issues. Drop the lock, wait a while and retry */
335 spin_unlock_bh(chip
->mutex
);
341 /* Stick ourselves on a wait queue to be woken when
342 someone changes the status */
343 set_current_state(TASK_UNINTERRUPTIBLE
);
344 add_wait_queue(&chip
->wq
, &wait
);
345 spin_unlock_bh(chip
->mutex
);
347 remove_wait_queue(&chip
->wq
, &wait
);
348 timeo
= jiffies
+ HZ
;
352 map_copy_from(map
, buf
, adr
, len
);
355 chip
->state
= chip
->oldstate
;
356 /* What if one interleaved chip has finished and the
357 other hasn't? The old code would leave the finished
358 one in READY mode. That's bad, and caused -EROFS
359 errors to be returned from do_erase_oneblock because
360 that's the only bit it checked for at the time.
361 As the state machine appears to explicitly allow
362 sending the 0x70 (Read Status) command to an erasing
363 chip and expecting it to be ignored, that's what we
365 map_write(map
, CMD(0xd0), cmd_addr
);
366 map_write(map
, CMD(0x70), cmd_addr
);
370 spin_unlock_bh(chip
->mutex
);
374 static int cfi_staa_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
376 struct map_info
*map
= mtd
->priv
;
377 struct cfi_private
*cfi
= map
->fldrv_priv
;
382 /* ofs: offset within the first chip that the first read should start */
383 chipnum
= (from
>> cfi
->chipshift
);
384 ofs
= from
- (chipnum
<< cfi
->chipshift
);
389 unsigned long thislen
;
391 if (chipnum
>= cfi
->numchips
)
394 if ((len
+ ofs
-1) >> cfi
->chipshift
)
395 thislen
= (1<<cfi
->chipshift
) - ofs
;
399 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
413 static inline int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
414 unsigned long adr
, const u_char
*buf
, int len
)
416 struct cfi_private
*cfi
= map
->fldrv_priv
;
417 map_word status
, status_OK
;
418 unsigned long cmd_adr
, timeo
;
419 DECLARE_WAITQUEUE(wait
, current
);
422 /* M58LW064A requires bus alignment for buffer wriets -- saw */
423 if (adr
& (map_bankwidth(map
)-1))
426 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
428 cmd_adr
= adr
& ~(wbufsize
-1);
430 /* Let's determine this according to the interleave only once */
431 status_OK
= CMD(0x80);
433 timeo
= jiffies
+ HZ
;
436 #ifdef DEBUG_CFI_FEATURES
437 printk("%s: chip->state[%d]\n", __FUNCTION__
, chip
->state
);
439 spin_lock_bh(chip
->mutex
);
441 /* Check that the chip's ready to talk to us.
442 * Later, we can actually think about interrupting it
443 * if it's in FL_ERASING state.
444 * Not just yet, though.
446 switch (chip
->state
) {
452 map_write(map
, CMD(0x70), cmd_adr
);
453 chip
->state
= FL_STATUS
;
454 #ifdef DEBUG_CFI_FEATURES
455 printk("%s: 1 status[%x]\n", __FUNCTION__
, map_read(map
, cmd_adr
));
459 status
= map_read(map
, cmd_adr
);
460 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
462 /* Urgh. Chip not yet ready to talk to us. */
463 if (time_after(jiffies
, timeo
)) {
464 spin_unlock_bh(chip
->mutex
);
465 printk(KERN_ERR
"waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
466 status
.x
[0], map_read(map
, cmd_adr
).x
[0]);
470 /* Latency issues. Drop the lock, wait a while and retry */
471 spin_unlock_bh(chip
->mutex
);
476 /* Stick ourselves on a wait queue to be woken when
477 someone changes the status */
478 set_current_state(TASK_UNINTERRUPTIBLE
);
479 add_wait_queue(&chip
->wq
, &wait
);
480 spin_unlock_bh(chip
->mutex
);
482 remove_wait_queue(&chip
->wq
, &wait
);
483 timeo
= jiffies
+ HZ
;
488 map_write(map
, CMD(0xe8), cmd_adr
);
489 chip
->state
= FL_WRITING_TO_BUFFER
;
493 status
= map_read(map
, cmd_adr
);
494 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
497 spin_unlock_bh(chip
->mutex
);
499 spin_lock_bh(chip
->mutex
);
502 /* Argh. Not ready for write to buffer */
504 map_write(map
, CMD(0x70), cmd_adr
);
505 chip
->state
= FL_STATUS
;
506 spin_unlock_bh(chip
->mutex
);
507 printk(KERN_ERR
"Chip not ready for buffer write. Xstatus = %lx\n", status
.x
[0]);
512 /* Write length of data to come */
513 map_write(map
, CMD(len
/map_bankwidth(map
)-1), cmd_adr
);
517 z
+= map_bankwidth(map
), buf
+= map_bankwidth(map
)) {
519 d
= map_word_load(map
, buf
);
520 map_write(map
, d
, adr
+z
);
523 map_write(map
, CMD(0xd0), cmd_adr
);
524 chip
->state
= FL_WRITING
;
526 spin_unlock_bh(chip
->mutex
);
527 cfi_udelay(chip
->buffer_write_time
);
528 spin_lock_bh(chip
->mutex
);
530 timeo
= jiffies
+ (HZ
/2);
533 if (chip
->state
!= FL_WRITING
) {
534 /* Someone's suspended the write. Sleep */
535 set_current_state(TASK_UNINTERRUPTIBLE
);
536 add_wait_queue(&chip
->wq
, &wait
);
537 spin_unlock_bh(chip
->mutex
);
539 remove_wait_queue(&chip
->wq
, &wait
);
540 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
541 spin_lock_bh(chip
->mutex
);
545 status
= map_read(map
, cmd_adr
);
546 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
549 /* OK Still waiting */
550 if (time_after(jiffies
, timeo
)) {
552 map_write(map
, CMD(0x50), cmd_adr
);
553 /* put back into read status register mode */
554 map_write(map
, CMD(0x70), adr
);
555 chip
->state
= FL_STATUS
;
557 spin_unlock_bh(chip
->mutex
);
558 printk(KERN_ERR
"waiting for chip to be ready timed out in bufwrite\n");
562 /* Latency issues. Drop the lock, wait a while and retry */
563 spin_unlock_bh(chip
->mutex
);
566 spin_lock_bh(chip
->mutex
);
569 chip
->buffer_write_time
--;
570 if (!chip
->buffer_write_time
)
571 chip
->buffer_write_time
++;
574 chip
->buffer_write_time
++;
576 /* Done and happy. */
578 chip
->state
= FL_STATUS
;
580 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
581 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
582 #ifdef DEBUG_CFI_FEATURES
583 printk("%s: 2 status[%lx]\n", __FUNCTION__
, status
.x
[0]);
586 map_write(map
, CMD(0x50), cmd_adr
);
587 /* put back into read status register mode */
588 map_write(map
, CMD(0x70), adr
);
590 spin_unlock_bh(chip
->mutex
);
591 return map_word_bitsset(map
, status
, CMD(0x02)) ? -EROFS
: -EIO
;
594 spin_unlock_bh(chip
->mutex
);
599 static int cfi_staa_write_buffers (struct mtd_info
*mtd
, loff_t to
,
600 size_t len
, size_t *retlen
, const u_char
*buf
)
602 struct map_info
*map
= mtd
->priv
;
603 struct cfi_private
*cfi
= map
->fldrv_priv
;
604 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
613 chipnum
= to
>> cfi
->chipshift
;
614 ofs
= to
- (chipnum
<< cfi
->chipshift
);
616 #ifdef DEBUG_CFI_FEATURES
617 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__
, map_bankwidth(map
));
618 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__
, chipnum
, wbufsize
);
619 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__
, ofs
, len
);
622 /* Write buffer is worth it only if more than one word to write... */
624 /* We must not cross write block boundaries */
625 int size
= wbufsize
- (ofs
& (wbufsize
-1));
630 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
640 if (ofs
>> cfi
->chipshift
) {
643 if (chipnum
== cfi
->numchips
)
652 * Writev for ECC-Flashes is a little more complicated. We need to maintain
653 * a small buffer for this.
654 * XXX: If the buffer size is not a multiple of 2, this will break
656 #define ECCBUF_SIZE (mtd->eccsize)
657 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
658 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
660 cfi_staa_writev(struct mtd_info
*mtd
, const struct kvec
*vecs
,
661 unsigned long count
, loff_t to
, size_t *retlen
)
664 size_t totlen
= 0, thislen
;
670 /* We should fall back to a general writev implementation.
671 * Until that is written, just break.
675 buffer
= kmalloc(ECCBUF_SIZE
, GFP_KERNEL
);
679 for (i
=0; i
<count
; i
++) {
680 size_t elem_len
= vecs
[i
].iov_len
;
681 void *elem_base
= vecs
[i
].iov_base
;
682 if (!elem_len
) /* FIXME: Might be unnecessary. Check that */
684 if (buflen
) { /* cut off head */
685 if (buflen
+ elem_len
< ECCBUF_SIZE
) { /* just accumulate */
686 memcpy(buffer
+buflen
, elem_base
, elem_len
);
690 memcpy(buffer
+buflen
, elem_base
, ECCBUF_SIZE
-buflen
);
691 ret
= mtd
->write(mtd
, to
, ECCBUF_SIZE
, &thislen
, buffer
);
693 if (ret
|| thislen
!= ECCBUF_SIZE
)
695 elem_len
-= thislen
-buflen
;
696 elem_base
+= thislen
-buflen
;
699 if (ECCBUF_DIV(elem_len
)) { /* write clean aligned data */
700 ret
= mtd
->write(mtd
, to
, ECCBUF_DIV(elem_len
), &thislen
, elem_base
);
702 if (ret
|| thislen
!= ECCBUF_DIV(elem_len
))
706 buflen
= ECCBUF_MOD(elem_len
); /* cut off tail */
708 memset(buffer
, 0xff, ECCBUF_SIZE
);
709 memcpy(buffer
, elem_base
+ thislen
, buflen
);
712 if (buflen
) { /* flush last page, even if not full */
713 /* This is sometimes intended behaviour, really */
714 ret
= mtd
->write(mtd
, to
, buflen
, &thislen
, buffer
);
716 if (ret
|| thislen
!= ECCBUF_SIZE
)
726 static inline int do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
728 struct cfi_private
*cfi
= map
->fldrv_priv
;
729 map_word status
, status_OK
;
732 DECLARE_WAITQUEUE(wait
, current
);
737 /* Let's determine this according to the interleave only once */
738 status_OK
= CMD(0x80);
740 timeo
= jiffies
+ HZ
;
742 spin_lock_bh(chip
->mutex
);
744 /* Check that the chip's ready to talk to us. */
745 switch (chip
->state
) {
749 map_write(map
, CMD(0x70), adr
);
750 chip
->state
= FL_STATUS
;
753 status
= map_read(map
, adr
);
754 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
757 /* Urgh. Chip not yet ready to talk to us. */
758 if (time_after(jiffies
, timeo
)) {
759 spin_unlock_bh(chip
->mutex
);
760 printk(KERN_ERR
"waiting for chip to be ready timed out in erase\n");
764 /* Latency issues. Drop the lock, wait a while and retry */
765 spin_unlock_bh(chip
->mutex
);
770 /* Stick ourselves on a wait queue to be woken when
771 someone changes the status */
772 set_current_state(TASK_UNINTERRUPTIBLE
);
773 add_wait_queue(&chip
->wq
, &wait
);
774 spin_unlock_bh(chip
->mutex
);
776 remove_wait_queue(&chip
->wq
, &wait
);
777 timeo
= jiffies
+ HZ
;
782 /* Clear the status register first */
783 map_write(map
, CMD(0x50), adr
);
786 map_write(map
, CMD(0x20), adr
);
787 map_write(map
, CMD(0xD0), adr
);
788 chip
->state
= FL_ERASING
;
790 spin_unlock_bh(chip
->mutex
);
792 spin_lock_bh(chip
->mutex
);
794 /* FIXME. Use a timer to check this, and return immediately. */
795 /* Once the state machine's known to be working I'll do that */
797 timeo
= jiffies
+ (HZ
*20);
799 if (chip
->state
!= FL_ERASING
) {
800 /* Someone's suspended the erase. Sleep */
801 set_current_state(TASK_UNINTERRUPTIBLE
);
802 add_wait_queue(&chip
->wq
, &wait
);
803 spin_unlock_bh(chip
->mutex
);
805 remove_wait_queue(&chip
->wq
, &wait
);
806 timeo
= jiffies
+ (HZ
*20); /* FIXME */
807 spin_lock_bh(chip
->mutex
);
811 status
= map_read(map
, adr
);
812 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
815 /* OK Still waiting */
816 if (time_after(jiffies
, timeo
)) {
817 map_write(map
, CMD(0x70), adr
);
818 chip
->state
= FL_STATUS
;
819 printk(KERN_ERR
"waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
821 spin_unlock_bh(chip
->mutex
);
825 /* Latency issues. Drop the lock, wait a while and retry */
826 spin_unlock_bh(chip
->mutex
);
828 spin_lock_bh(chip
->mutex
);
834 /* We've broken this before. It doesn't hurt to be safe */
835 map_write(map
, CMD(0x70), adr
);
836 chip
->state
= FL_STATUS
;
837 status
= map_read(map
, adr
);
839 /* check for lock bit */
840 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
841 unsigned char chipstatus
= status
.x
[0];
842 if (!map_word_equal(map
, status
, CMD(chipstatus
))) {
844 for (w
=0; w
<map_words(map
); w
++) {
845 for (i
= 0; i
<cfi_interleave(cfi
); i
++) {
846 chipstatus
|= status
.x
[w
] >> (cfi
->device_type
* 8);
849 printk(KERN_WARNING
"Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
850 status
.x
[0], chipstatus
);
852 /* Reset the error bits */
853 map_write(map
, CMD(0x50), adr
);
854 map_write(map
, CMD(0x70), adr
);
856 if ((chipstatus
& 0x30) == 0x30) {
857 printk(KERN_NOTICE
"Chip reports improper command sequence: status 0x%x\n", chipstatus
);
859 } else if (chipstatus
& 0x02) {
860 /* Protection bit set */
862 } else if (chipstatus
& 0x8) {
864 printk(KERN_WARNING
"Chip reports voltage low on erase: status 0x%x\n", chipstatus
);
866 } else if (chipstatus
& 0x20) {
868 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr
, chipstatus
);
869 timeo
= jiffies
+ HZ
;
870 chip
->state
= FL_STATUS
;
871 spin_unlock_bh(chip
->mutex
);
874 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x\n", adr
, chipstatus
);
880 spin_unlock_bh(chip
->mutex
);
884 int cfi_staa_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
885 { struct map_info
*map
= mtd
->priv
;
886 struct cfi_private
*cfi
= map
->fldrv_priv
;
887 unsigned long adr
, len
;
888 int chipnum
, ret
= 0;
890 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
892 if (instr
->addr
> mtd
->size
)
895 if ((instr
->len
+ instr
->addr
) > mtd
->size
)
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
904 /* Skip all erase regions which are ended before the start of
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
910 while (i
< mtd
->numeraseregions
&& instr
->addr
>= regions
[i
].offset
)
914 /* OK, now i is pointing at the erase region in which this
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
920 if (instr
->addr
& (regions
[i
].erasesize
-1))
923 /* Remember the erase region we start on */
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
930 while (i
<mtd
->numeraseregions
&& (instr
->addr
+ instr
->len
) >= regions
[i
].offset
)
933 /* As before, drop back one to point at the region in which
934 the address actually falls
938 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1))
941 chipnum
= instr
->addr
>> cfi
->chipshift
;
942 adr
= instr
->addr
- (chipnum
<< cfi
->chipshift
);
948 ret
= do_erase_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
953 adr
+= regions
[i
].erasesize
;
954 len
-= regions
[i
].erasesize
;
956 if (adr
% (1<< cfi
->chipshift
) == ((regions
[i
].offset
+ (regions
[i
].erasesize
* regions
[i
].numblocks
)) %( 1<< cfi
->chipshift
)))
959 if (adr
>> cfi
->chipshift
) {
963 if (chipnum
>= cfi
->numchips
)
968 instr
->state
= MTD_ERASE_DONE
;
969 mtd_erase_callback(instr
);
974 static void cfi_staa_sync (struct mtd_info
*mtd
)
976 struct map_info
*map
= mtd
->priv
;
977 struct cfi_private
*cfi
= map
->fldrv_priv
;
981 DECLARE_WAITQUEUE(wait
, current
);
983 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
984 chip
= &cfi
->chips
[i
];
987 spin_lock_bh(chip
->mutex
);
989 switch(chip
->state
) {
994 chip
->oldstate
= chip
->state
;
995 chip
->state
= FL_SYNCING
;
996 /* No need to wake_up() on this state change -
997 * as the whole point is that nobody can do anything
998 * with the chip now anyway.
1001 spin_unlock_bh(chip
->mutex
);
1005 /* Not an idle state */
1006 add_wait_queue(&chip
->wq
, &wait
);
1008 spin_unlock_bh(chip
->mutex
);
1010 remove_wait_queue(&chip
->wq
, &wait
);
1016 /* Unlock the chips again */
1018 for (i
--; i
>=0; i
--) {
1019 chip
= &cfi
->chips
[i
];
1021 spin_lock_bh(chip
->mutex
);
1023 if (chip
->state
== FL_SYNCING
) {
1024 chip
->state
= chip
->oldstate
;
1027 spin_unlock_bh(chip
->mutex
);
1031 static inline int do_lock_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
1033 struct cfi_private
*cfi
= map
->fldrv_priv
;
1034 map_word status
, status_OK
;
1035 unsigned long timeo
= jiffies
+ HZ
;
1036 DECLARE_WAITQUEUE(wait
, current
);
1040 /* Let's determine this according to the interleave only once */
1041 status_OK
= CMD(0x80);
1043 timeo
= jiffies
+ HZ
;
1045 spin_lock_bh(chip
->mutex
);
1047 /* Check that the chip's ready to talk to us. */
1048 switch (chip
->state
) {
1050 case FL_JEDEC_QUERY
:
1052 map_write(map
, CMD(0x70), adr
);
1053 chip
->state
= FL_STATUS
;
1056 status
= map_read(map
, adr
);
1057 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1060 /* Urgh. Chip not yet ready to talk to us. */
1061 if (time_after(jiffies
, timeo
)) {
1062 spin_unlock_bh(chip
->mutex
);
1063 printk(KERN_ERR
"waiting for chip to be ready timed out in lock\n");
1067 /* Latency issues. Drop the lock, wait a while and retry */
1068 spin_unlock_bh(chip
->mutex
);
1073 /* Stick ourselves on a wait queue to be woken when
1074 someone changes the status */
1075 set_current_state(TASK_UNINTERRUPTIBLE
);
1076 add_wait_queue(&chip
->wq
, &wait
);
1077 spin_unlock_bh(chip
->mutex
);
1079 remove_wait_queue(&chip
->wq
, &wait
);
1080 timeo
= jiffies
+ HZ
;
1085 map_write(map
, CMD(0x60), adr
);
1086 map_write(map
, CMD(0x01), adr
);
1087 chip
->state
= FL_LOCKING
;
1089 spin_unlock_bh(chip
->mutex
);
1091 spin_lock_bh(chip
->mutex
);
1093 /* FIXME. Use a timer to check this, and return immediately. */
1094 /* Once the state machine's known to be working I'll do that */
1096 timeo
= jiffies
+ (HZ
*2);
1099 status
= map_read(map
, adr
);
1100 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1103 /* OK Still waiting */
1104 if (time_after(jiffies
, timeo
)) {
1105 map_write(map
, CMD(0x70), adr
);
1106 chip
->state
= FL_STATUS
;
1107 printk(KERN_ERR
"waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
1109 spin_unlock_bh(chip
->mutex
);
1113 /* Latency issues. Drop the lock, wait a while and retry */
1114 spin_unlock_bh(chip
->mutex
);
1116 spin_lock_bh(chip
->mutex
);
1119 /* Done and happy. */
1120 chip
->state
= FL_STATUS
;
1123 spin_unlock_bh(chip
->mutex
);
1126 static int cfi_staa_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1128 struct map_info
*map
= mtd
->priv
;
1129 struct cfi_private
*cfi
= map
->fldrv_priv
;
1131 int chipnum
, ret
= 0;
1132 #ifdef DEBUG_LOCK_BITS
1133 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1136 if (ofs
& (mtd
->erasesize
- 1))
1139 if (len
& (mtd
->erasesize
-1))
1142 if ((len
+ ofs
) > mtd
->size
)
1145 chipnum
= ofs
>> cfi
->chipshift
;
1146 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
1150 #ifdef DEBUG_LOCK_BITS
1151 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1152 printk("before lock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1153 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1156 ret
= do_lock_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
1158 #ifdef DEBUG_LOCK_BITS
1159 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1160 printk("after lock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1161 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1167 adr
+= mtd
->erasesize
;
1168 len
-= mtd
->erasesize
;
1170 if (adr
>> cfi
->chipshift
) {
1174 if (chipnum
>= cfi
->numchips
)
1180 static inline int do_unlock_oneblock(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
1182 struct cfi_private
*cfi
= map
->fldrv_priv
;
1183 map_word status
, status_OK
;
1184 unsigned long timeo
= jiffies
+ HZ
;
1185 DECLARE_WAITQUEUE(wait
, current
);
1189 /* Let's determine this according to the interleave only once */
1190 status_OK
= CMD(0x80);
1192 timeo
= jiffies
+ HZ
;
1194 spin_lock_bh(chip
->mutex
);
1196 /* Check that the chip's ready to talk to us. */
1197 switch (chip
->state
) {
1199 case FL_JEDEC_QUERY
:
1201 map_write(map
, CMD(0x70), adr
);
1202 chip
->state
= FL_STATUS
;
1205 status
= map_read(map
, adr
);
1206 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1209 /* Urgh. Chip not yet ready to talk to us. */
1210 if (time_after(jiffies
, timeo
)) {
1211 spin_unlock_bh(chip
->mutex
);
1212 printk(KERN_ERR
"waiting for chip to be ready timed out in unlock\n");
1216 /* Latency issues. Drop the lock, wait a while and retry */
1217 spin_unlock_bh(chip
->mutex
);
1222 /* Stick ourselves on a wait queue to be woken when
1223 someone changes the status */
1224 set_current_state(TASK_UNINTERRUPTIBLE
);
1225 add_wait_queue(&chip
->wq
, &wait
);
1226 spin_unlock_bh(chip
->mutex
);
1228 remove_wait_queue(&chip
->wq
, &wait
);
1229 timeo
= jiffies
+ HZ
;
1234 map_write(map
, CMD(0x60), adr
);
1235 map_write(map
, CMD(0xD0), adr
);
1236 chip
->state
= FL_UNLOCKING
;
1238 spin_unlock_bh(chip
->mutex
);
1240 spin_lock_bh(chip
->mutex
);
1242 /* FIXME. Use a timer to check this, and return immediately. */
1243 /* Once the state machine's known to be working I'll do that */
1245 timeo
= jiffies
+ (HZ
*2);
1248 status
= map_read(map
, adr
);
1249 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1252 /* OK Still waiting */
1253 if (time_after(jiffies
, timeo
)) {
1254 map_write(map
, CMD(0x70), adr
);
1255 chip
->state
= FL_STATUS
;
1256 printk(KERN_ERR
"waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status
.x
[0], map_read(map
, adr
).x
[0]);
1258 spin_unlock_bh(chip
->mutex
);
1262 /* Latency issues. Drop the unlock, wait a while and retry */
1263 spin_unlock_bh(chip
->mutex
);
1265 spin_lock_bh(chip
->mutex
);
1268 /* Done and happy. */
1269 chip
->state
= FL_STATUS
;
1272 spin_unlock_bh(chip
->mutex
);
1275 static int cfi_staa_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1277 struct map_info
*map
= mtd
->priv
;
1278 struct cfi_private
*cfi
= map
->fldrv_priv
;
1280 int chipnum
, ret
= 0;
1281 #ifdef DEBUG_LOCK_BITS
1282 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1285 chipnum
= ofs
>> cfi
->chipshift
;
1286 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
1288 #ifdef DEBUG_LOCK_BITS
1290 unsigned long temp_adr
= adr
;
1291 unsigned long temp_len
= len
;
1293 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1295 printk("before unlock %x: block status register is %x\n",temp_adr
,cfi_read_query(map
, temp_adr
+(2*ofs_factor
)));
1296 temp_adr
+= mtd
->erasesize
;
1297 temp_len
-= mtd
->erasesize
;
1299 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1303 ret
= do_unlock_oneblock(map
, &cfi
->chips
[chipnum
], adr
);
1305 #ifdef DEBUG_LOCK_BITS
1306 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1307 printk("after unlock: block status register is %x\n",cfi_read_query(map
, adr
+(2*ofs_factor
)));
1308 cfi_send_gen_cmd(0xff, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1314 static int cfi_staa_suspend(struct mtd_info
*mtd
)
1316 struct map_info
*map
= mtd
->priv
;
1317 struct cfi_private
*cfi
= map
->fldrv_priv
;
1319 struct flchip
*chip
;
1322 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1323 chip
= &cfi
->chips
[i
];
1325 spin_lock_bh(chip
->mutex
);
1327 switch(chip
->state
) {
1331 case FL_JEDEC_QUERY
:
1332 chip
->oldstate
= chip
->state
;
1333 chip
->state
= FL_PM_SUSPENDED
;
1334 /* No need to wake_up() on this state change -
1335 * as the whole point is that nobody can do anything
1336 * with the chip now anyway.
1338 case FL_PM_SUSPENDED
:
1345 spin_unlock_bh(chip
->mutex
);
1348 /* Unlock the chips again */
1351 for (i
--; i
>=0; i
--) {
1352 chip
= &cfi
->chips
[i
];
1354 spin_lock_bh(chip
->mutex
);
1356 if (chip
->state
== FL_PM_SUSPENDED
) {
1357 /* No need to force it into a known state here,
1358 because we're returning failure, and it didn't
1360 chip
->state
= chip
->oldstate
;
1363 spin_unlock_bh(chip
->mutex
);
1370 static void cfi_staa_resume(struct mtd_info
*mtd
)
1372 struct map_info
*map
= mtd
->priv
;
1373 struct cfi_private
*cfi
= map
->fldrv_priv
;
1375 struct flchip
*chip
;
1377 for (i
=0; i
<cfi
->numchips
; i
++) {
1379 chip
= &cfi
->chips
[i
];
1381 spin_lock_bh(chip
->mutex
);
1383 /* Go to known state. Chip may have been power cycled */
1384 if (chip
->state
== FL_PM_SUSPENDED
) {
1385 map_write(map
, CMD(0xFF), 0);
1386 chip
->state
= FL_READY
;
1390 spin_unlock_bh(chip
->mutex
);
1394 static void cfi_staa_destroy(struct mtd_info
*mtd
)
1396 struct map_info
*map
= mtd
->priv
;
1397 struct cfi_private
*cfi
= map
->fldrv_priv
;
1398 kfree(cfi
->cmdset_priv
);
1402 static char im_name
[]="cfi_cmdset_0020";
1404 static int __init
cfi_staa_init(void)
1406 inter_module_register(im_name
, THIS_MODULE
, &cfi_cmdset_0020
);
1410 static void __exit
cfi_staa_exit(void)
1412 inter_module_unregister(im_name
);
1415 module_init(cfi_staa_init
);
1416 module_exit(cfi_staa_exit
);
1418 MODULE_LICENSE("GPL");