[TG3]: Add tagged status support.
[linux-2.6/verdex.git] / drivers / mtd / chips / cfi_cmdset_0020.c
blob8c24e18db3b442354cf3c1362f04092dbeddd2bf
1 /*
2 * Common Flash Interface support:
3 * ST Advanced Architecture Command Set (ID 0x0020)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0020.c,v 1.17 2004/11/20 12:49:04 dwmw2 Exp $
8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.)
12 * - scalability vs code size is completely set at compile-time
13 * (see include/linux/mtd/cfi.h for selection)
14 * - optimized write buffer method
15 * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
16 * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
17 * (command set 0x0020)
18 * - added a writev function
21 #include <linux/version.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/cfi.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
40 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
41 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
42 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43 unsigned long count, loff_t to, size_t *retlen);
44 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
45 static void cfi_staa_sync (struct mtd_info *);
46 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
47 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
48 static int cfi_staa_suspend (struct mtd_info *);
49 static void cfi_staa_resume (struct mtd_info *);
51 static void cfi_staa_destroy(struct mtd_info *);
53 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
55 static struct mtd_info *cfi_staa_setup (struct map_info *);
57 static struct mtd_chip_driver cfi_staa_chipdrv = {
58 .probe = NULL, /* Not usable directly */
59 .destroy = cfi_staa_destroy,
60 .name = "cfi_cmdset_0020",
61 .module = THIS_MODULE
64 /* #define DEBUG_LOCK_BITS */
65 //#define DEBUG_CFI_FEATURES
67 #ifdef DEBUG_CFI_FEATURES
68 static void cfi_tell_features(struct cfi_pri_intelext *extp)
70 int i;
71 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
72 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
73 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
74 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
75 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
76 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
77 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
78 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
79 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
80 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
81 for (i=9; i<32; i++) {
82 if (extp->FeatureSupport & (1<<i))
83 printk(" - Unknown Bit %X: supported\n", i);
86 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
87 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
88 for (i=1; i<8; i++) {
89 if (extp->SuspendCmdSupport & (1<<i))
90 printk(" - Unknown Bit %X: supported\n", i);
93 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
94 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
95 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
96 for (i=2; i<16; i++) {
97 if (extp->BlkStatusRegMask & (1<<i))
98 printk(" - Unknown Bit %X Active: yes\n",i);
101 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
102 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
103 if (extp->VppOptimal)
104 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
105 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
107 #endif
109 /* This routine is made available to other mtd code via
110 * inter_module_register. It must only be accessed through
111 * inter_module_get which will bump the use count of this module. The
112 * addresses passed back in cfi are valid as long as the use count of
113 * this module is non-zero, i.e. between inter_module_get and
114 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
116 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
118 struct cfi_private *cfi = map->fldrv_priv;
119 int i;
121 if (cfi->cfi_mode) {
123 * It's a real CFI chip, not one for which the probe
124 * routine faked a CFI structure. So we read the feature
125 * table from it.
127 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
128 struct cfi_pri_intelext *extp;
130 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
131 if (!extp)
132 return NULL;
134 /* Do some byteswapping if necessary */
135 extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
136 extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
138 #ifdef DEBUG_CFI_FEATURES
139 /* Tell the user about it in lots of lovely detail */
140 cfi_tell_features(extp);
141 #endif
143 /* Install our own private info structure */
144 cfi->cmdset_priv = extp;
147 for (i=0; i< cfi->numchips; i++) {
148 cfi->chips[i].word_write_time = 128;
149 cfi->chips[i].buffer_write_time = 128;
150 cfi->chips[i].erase_time = 1024;
153 return cfi_staa_setup(map);
156 static struct mtd_info *cfi_staa_setup(struct map_info *map)
158 struct cfi_private *cfi = map->fldrv_priv;
159 struct mtd_info *mtd;
160 unsigned long offset = 0;
161 int i,j;
162 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
164 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
165 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
167 if (!mtd) {
168 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
169 kfree(cfi->cmdset_priv);
170 return NULL;
173 memset(mtd, 0, sizeof(*mtd));
174 mtd->priv = map;
175 mtd->type = MTD_NORFLASH;
176 mtd->size = devsize * cfi->numchips;
178 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
179 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
180 * mtd->numeraseregions, GFP_KERNEL);
181 if (!mtd->eraseregions) {
182 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
183 kfree(cfi->cmdset_priv);
184 kfree(mtd);
185 return NULL;
188 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
189 unsigned long ernum, ersize;
190 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
191 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
193 if (mtd->erasesize < ersize) {
194 mtd->erasesize = ersize;
196 for (j=0; j<cfi->numchips; j++) {
197 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
198 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
199 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
201 offset += (ersize * ernum);
204 if (offset != devsize) {
205 /* Argh */
206 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
207 kfree(mtd->eraseregions);
208 kfree(cfi->cmdset_priv);
209 kfree(mtd);
210 return NULL;
213 for (i=0; i<mtd->numeraseregions;i++){
214 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
215 i,mtd->eraseregions[i].offset,
216 mtd->eraseregions[i].erasesize,
217 mtd->eraseregions[i].numblocks);
220 /* Also select the correct geometry setup too */
221 mtd->erase = cfi_staa_erase_varsize;
222 mtd->read = cfi_staa_read;
223 mtd->write = cfi_staa_write_buffers;
224 mtd->writev = cfi_staa_writev;
225 mtd->sync = cfi_staa_sync;
226 mtd->lock = cfi_staa_lock;
227 mtd->unlock = cfi_staa_unlock;
228 mtd->suspend = cfi_staa_suspend;
229 mtd->resume = cfi_staa_resume;
230 mtd->flags = MTD_CAP_NORFLASH;
231 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
232 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
233 map->fldrv = &cfi_staa_chipdrv;
234 __module_get(THIS_MODULE);
235 mtd->name = map->name;
236 return mtd;
240 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
242 map_word status, status_OK;
243 unsigned long timeo;
244 DECLARE_WAITQUEUE(wait, current);
245 int suspended = 0;
246 unsigned long cmd_addr;
247 struct cfi_private *cfi = map->fldrv_priv;
249 adr += chip->start;
251 /* Ensure cmd read/writes are aligned. */
252 cmd_addr = adr & ~(map_bankwidth(map)-1);
254 /* Let's determine this according to the interleave only once */
255 status_OK = CMD(0x80);
257 timeo = jiffies + HZ;
258 retry:
259 spin_lock_bh(chip->mutex);
261 /* Check that the chip's ready to talk to us.
262 * If it's in FL_ERASING state, suspend it and make it talk now.
264 switch (chip->state) {
265 case FL_ERASING:
266 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
267 goto sleep; /* We don't support erase suspend */
269 map_write (map, CMD(0xb0), cmd_addr);
270 /* If the flash has finished erasing, then 'erase suspend'
271 * appears to make some (28F320) flash devices switch to
272 * 'read' mode. Make sure that we switch to 'read status'
273 * mode so we get the right data. --rmk
275 map_write(map, CMD(0x70), cmd_addr);
276 chip->oldstate = FL_ERASING;
277 chip->state = FL_ERASE_SUSPENDING;
278 // printk("Erase suspending at 0x%lx\n", cmd_addr);
279 for (;;) {
280 status = map_read(map, cmd_addr);
281 if (map_word_andequal(map, status, status_OK, status_OK))
282 break;
284 if (time_after(jiffies, timeo)) {
285 /* Urgh */
286 map_write(map, CMD(0xd0), cmd_addr);
287 /* make sure we're in 'read status' mode */
288 map_write(map, CMD(0x70), cmd_addr);
289 chip->state = FL_ERASING;
290 spin_unlock_bh(chip->mutex);
291 printk(KERN_ERR "Chip not ready after erase "
292 "suspended: status = 0x%lx\n", status.x[0]);
293 return -EIO;
296 spin_unlock_bh(chip->mutex);
297 cfi_udelay(1);
298 spin_lock_bh(chip->mutex);
301 suspended = 1;
302 map_write(map, CMD(0xff), cmd_addr);
303 chip->state = FL_READY;
304 break;
306 #if 0
307 case FL_WRITING:
308 /* Not quite yet */
309 #endif
311 case FL_READY:
312 break;
314 case FL_CFI_QUERY:
315 case FL_JEDEC_QUERY:
316 map_write(map, CMD(0x70), cmd_addr);
317 chip->state = FL_STATUS;
319 case FL_STATUS:
320 status = map_read(map, cmd_addr);
321 if (map_word_andequal(map, status, status_OK, status_OK)) {
322 map_write(map, CMD(0xff), cmd_addr);
323 chip->state = FL_READY;
324 break;
327 /* Urgh. Chip not yet ready to talk to us. */
328 if (time_after(jiffies, timeo)) {
329 spin_unlock_bh(chip->mutex);
330 printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
331 return -EIO;
334 /* Latency issues. Drop the lock, wait a while and retry */
335 spin_unlock_bh(chip->mutex);
336 cfi_udelay(1);
337 goto retry;
339 default:
340 sleep:
341 /* Stick ourselves on a wait queue to be woken when
342 someone changes the status */
343 set_current_state(TASK_UNINTERRUPTIBLE);
344 add_wait_queue(&chip->wq, &wait);
345 spin_unlock_bh(chip->mutex);
346 schedule();
347 remove_wait_queue(&chip->wq, &wait);
348 timeo = jiffies + HZ;
349 goto retry;
352 map_copy_from(map, buf, adr, len);
354 if (suspended) {
355 chip->state = chip->oldstate;
356 /* What if one interleaved chip has finished and the
357 other hasn't? The old code would leave the finished
358 one in READY mode. That's bad, and caused -EROFS
359 errors to be returned from do_erase_oneblock because
360 that's the only bit it checked for at the time.
361 As the state machine appears to explicitly allow
362 sending the 0x70 (Read Status) command to an erasing
363 chip and expecting it to be ignored, that's what we
364 do. */
365 map_write(map, CMD(0xd0), cmd_addr);
366 map_write(map, CMD(0x70), cmd_addr);
369 wake_up(&chip->wq);
370 spin_unlock_bh(chip->mutex);
371 return 0;
374 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
376 struct map_info *map = mtd->priv;
377 struct cfi_private *cfi = map->fldrv_priv;
378 unsigned long ofs;
379 int chipnum;
380 int ret = 0;
382 /* ofs: offset within the first chip that the first read should start */
383 chipnum = (from >> cfi->chipshift);
384 ofs = from - (chipnum << cfi->chipshift);
386 *retlen = 0;
388 while (len) {
389 unsigned long thislen;
391 if (chipnum >= cfi->numchips)
392 break;
394 if ((len + ofs -1) >> cfi->chipshift)
395 thislen = (1<<cfi->chipshift) - ofs;
396 else
397 thislen = len;
399 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
400 if (ret)
401 break;
403 *retlen += thislen;
404 len -= thislen;
405 buf += thislen;
407 ofs = 0;
408 chipnum++;
410 return ret;
413 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
414 unsigned long adr, const u_char *buf, int len)
416 struct cfi_private *cfi = map->fldrv_priv;
417 map_word status, status_OK;
418 unsigned long cmd_adr, timeo;
419 DECLARE_WAITQUEUE(wait, current);
420 int wbufsize, z;
422 /* M58LW064A requires bus alignment for buffer wriets -- saw */
423 if (adr & (map_bankwidth(map)-1))
424 return -EINVAL;
426 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
427 adr += chip->start;
428 cmd_adr = adr & ~(wbufsize-1);
430 /* Let's determine this according to the interleave only once */
431 status_OK = CMD(0x80);
433 timeo = jiffies + HZ;
434 retry:
436 #ifdef DEBUG_CFI_FEATURES
437 printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
438 #endif
439 spin_lock_bh(chip->mutex);
441 /* Check that the chip's ready to talk to us.
442 * Later, we can actually think about interrupting it
443 * if it's in FL_ERASING state.
444 * Not just yet, though.
446 switch (chip->state) {
447 case FL_READY:
448 break;
450 case FL_CFI_QUERY:
451 case FL_JEDEC_QUERY:
452 map_write(map, CMD(0x70), cmd_adr);
453 chip->state = FL_STATUS;
454 #ifdef DEBUG_CFI_FEATURES
455 printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
456 #endif
458 case FL_STATUS:
459 status = map_read(map, cmd_adr);
460 if (map_word_andequal(map, status, status_OK, status_OK))
461 break;
462 /* Urgh. Chip not yet ready to talk to us. */
463 if (time_after(jiffies, timeo)) {
464 spin_unlock_bh(chip->mutex);
465 printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
466 status.x[0], map_read(map, cmd_adr).x[0]);
467 return -EIO;
470 /* Latency issues. Drop the lock, wait a while and retry */
471 spin_unlock_bh(chip->mutex);
472 cfi_udelay(1);
473 goto retry;
475 default:
476 /* Stick ourselves on a wait queue to be woken when
477 someone changes the status */
478 set_current_state(TASK_UNINTERRUPTIBLE);
479 add_wait_queue(&chip->wq, &wait);
480 spin_unlock_bh(chip->mutex);
481 schedule();
482 remove_wait_queue(&chip->wq, &wait);
483 timeo = jiffies + HZ;
484 goto retry;
487 ENABLE_VPP(map);
488 map_write(map, CMD(0xe8), cmd_adr);
489 chip->state = FL_WRITING_TO_BUFFER;
491 z = 0;
492 for (;;) {
493 status = map_read(map, cmd_adr);
494 if (map_word_andequal(map, status, status_OK, status_OK))
495 break;
497 spin_unlock_bh(chip->mutex);
498 cfi_udelay(1);
499 spin_lock_bh(chip->mutex);
501 if (++z > 100) {
502 /* Argh. Not ready for write to buffer */
503 DISABLE_VPP(map);
504 map_write(map, CMD(0x70), cmd_adr);
505 chip->state = FL_STATUS;
506 spin_unlock_bh(chip->mutex);
507 printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
508 return -EIO;
512 /* Write length of data to come */
513 map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
515 /* Write data */
516 for (z = 0; z < len;
517 z += map_bankwidth(map), buf += map_bankwidth(map)) {
518 map_word d;
519 d = map_word_load(map, buf);
520 map_write(map, d, adr+z);
522 /* GO GO GO */
523 map_write(map, CMD(0xd0), cmd_adr);
524 chip->state = FL_WRITING;
526 spin_unlock_bh(chip->mutex);
527 cfi_udelay(chip->buffer_write_time);
528 spin_lock_bh(chip->mutex);
530 timeo = jiffies + (HZ/2);
531 z = 0;
532 for (;;) {
533 if (chip->state != FL_WRITING) {
534 /* Someone's suspended the write. Sleep */
535 set_current_state(TASK_UNINTERRUPTIBLE);
536 add_wait_queue(&chip->wq, &wait);
537 spin_unlock_bh(chip->mutex);
538 schedule();
539 remove_wait_queue(&chip->wq, &wait);
540 timeo = jiffies + (HZ / 2); /* FIXME */
541 spin_lock_bh(chip->mutex);
542 continue;
545 status = map_read(map, cmd_adr);
546 if (map_word_andequal(map, status, status_OK, status_OK))
547 break;
549 /* OK Still waiting */
550 if (time_after(jiffies, timeo)) {
551 /* clear status */
552 map_write(map, CMD(0x50), cmd_adr);
553 /* put back into read status register mode */
554 map_write(map, CMD(0x70), adr);
555 chip->state = FL_STATUS;
556 DISABLE_VPP(map);
557 spin_unlock_bh(chip->mutex);
558 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
559 return -EIO;
562 /* Latency issues. Drop the lock, wait a while and retry */
563 spin_unlock_bh(chip->mutex);
564 cfi_udelay(1);
565 z++;
566 spin_lock_bh(chip->mutex);
568 if (!z) {
569 chip->buffer_write_time--;
570 if (!chip->buffer_write_time)
571 chip->buffer_write_time++;
573 if (z > 1)
574 chip->buffer_write_time++;
576 /* Done and happy. */
577 DISABLE_VPP(map);
578 chip->state = FL_STATUS;
580 /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
581 if (map_word_bitsset(map, status, CMD(0x3a))) {
582 #ifdef DEBUG_CFI_FEATURES
583 printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
584 #endif
585 /* clear status */
586 map_write(map, CMD(0x50), cmd_adr);
587 /* put back into read status register mode */
588 map_write(map, CMD(0x70), adr);
589 wake_up(&chip->wq);
590 spin_unlock_bh(chip->mutex);
591 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
593 wake_up(&chip->wq);
594 spin_unlock_bh(chip->mutex);
596 return 0;
599 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
600 size_t len, size_t *retlen, const u_char *buf)
602 struct map_info *map = mtd->priv;
603 struct cfi_private *cfi = map->fldrv_priv;
604 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
605 int ret = 0;
606 int chipnum;
607 unsigned long ofs;
609 *retlen = 0;
610 if (!len)
611 return 0;
613 chipnum = to >> cfi->chipshift;
614 ofs = to - (chipnum << cfi->chipshift);
616 #ifdef DEBUG_CFI_FEATURES
617 printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
618 printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
619 printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
620 #endif
622 /* Write buffer is worth it only if more than one word to write... */
623 while (len > 0) {
624 /* We must not cross write block boundaries */
625 int size = wbufsize - (ofs & (wbufsize-1));
627 if (size > len)
628 size = len;
630 ret = do_write_buffer(map, &cfi->chips[chipnum],
631 ofs, buf, size);
632 if (ret)
633 return ret;
635 ofs += size;
636 buf += size;
637 (*retlen) += size;
638 len -= size;
640 if (ofs >> cfi->chipshift) {
641 chipnum ++;
642 ofs = 0;
643 if (chipnum == cfi->numchips)
644 return 0;
648 return 0;
652 * Writev for ECC-Flashes is a little more complicated. We need to maintain
653 * a small buffer for this.
654 * XXX: If the buffer size is not a multiple of 2, this will break
656 #define ECCBUF_SIZE (mtd->eccsize)
657 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
658 #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
659 static int
660 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
661 unsigned long count, loff_t to, size_t *retlen)
663 unsigned long i;
664 size_t totlen = 0, thislen;
665 int ret = 0;
666 size_t buflen = 0;
667 static char *buffer;
669 if (!ECCBUF_SIZE) {
670 /* We should fall back to a general writev implementation.
671 * Until that is written, just break.
673 return -EIO;
675 buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
676 if (!buffer)
677 return -ENOMEM;
679 for (i=0; i<count; i++) {
680 size_t elem_len = vecs[i].iov_len;
681 void *elem_base = vecs[i].iov_base;
682 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
683 continue;
684 if (buflen) { /* cut off head */
685 if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
686 memcpy(buffer+buflen, elem_base, elem_len);
687 buflen += elem_len;
688 continue;
690 memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
691 ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
692 totlen += thislen;
693 if (ret || thislen != ECCBUF_SIZE)
694 goto write_error;
695 elem_len -= thislen-buflen;
696 elem_base += thislen-buflen;
697 to += ECCBUF_SIZE;
699 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
700 ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
701 totlen += thislen;
702 if (ret || thislen != ECCBUF_DIV(elem_len))
703 goto write_error;
704 to += thislen;
706 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
707 if (buflen) {
708 memset(buffer, 0xff, ECCBUF_SIZE);
709 memcpy(buffer, elem_base + thislen, buflen);
712 if (buflen) { /* flush last page, even if not full */
713 /* This is sometimes intended behaviour, really */
714 ret = mtd->write(mtd, to, buflen, &thislen, buffer);
715 totlen += thislen;
716 if (ret || thislen != ECCBUF_SIZE)
717 goto write_error;
719 write_error:
720 if (retlen)
721 *retlen = totlen;
722 return ret;
726 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
728 struct cfi_private *cfi = map->fldrv_priv;
729 map_word status, status_OK;
730 unsigned long timeo;
731 int retries = 3;
732 DECLARE_WAITQUEUE(wait, current);
733 int ret = 0;
735 adr += chip->start;
737 /* Let's determine this according to the interleave only once */
738 status_OK = CMD(0x80);
740 timeo = jiffies + HZ;
741 retry:
742 spin_lock_bh(chip->mutex);
744 /* Check that the chip's ready to talk to us. */
745 switch (chip->state) {
746 case FL_CFI_QUERY:
747 case FL_JEDEC_QUERY:
748 case FL_READY:
749 map_write(map, CMD(0x70), adr);
750 chip->state = FL_STATUS;
752 case FL_STATUS:
753 status = map_read(map, adr);
754 if (map_word_andequal(map, status, status_OK, status_OK))
755 break;
757 /* Urgh. Chip not yet ready to talk to us. */
758 if (time_after(jiffies, timeo)) {
759 spin_unlock_bh(chip->mutex);
760 printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
761 return -EIO;
764 /* Latency issues. Drop the lock, wait a while and retry */
765 spin_unlock_bh(chip->mutex);
766 cfi_udelay(1);
767 goto retry;
769 default:
770 /* Stick ourselves on a wait queue to be woken when
771 someone changes the status */
772 set_current_state(TASK_UNINTERRUPTIBLE);
773 add_wait_queue(&chip->wq, &wait);
774 spin_unlock_bh(chip->mutex);
775 schedule();
776 remove_wait_queue(&chip->wq, &wait);
777 timeo = jiffies + HZ;
778 goto retry;
781 ENABLE_VPP(map);
782 /* Clear the status register first */
783 map_write(map, CMD(0x50), adr);
785 /* Now erase */
786 map_write(map, CMD(0x20), adr);
787 map_write(map, CMD(0xD0), adr);
788 chip->state = FL_ERASING;
790 spin_unlock_bh(chip->mutex);
791 msleep(1000);
792 spin_lock_bh(chip->mutex);
794 /* FIXME. Use a timer to check this, and return immediately. */
795 /* Once the state machine's known to be working I'll do that */
797 timeo = jiffies + (HZ*20);
798 for (;;) {
799 if (chip->state != FL_ERASING) {
800 /* Someone's suspended the erase. Sleep */
801 set_current_state(TASK_UNINTERRUPTIBLE);
802 add_wait_queue(&chip->wq, &wait);
803 spin_unlock_bh(chip->mutex);
804 schedule();
805 remove_wait_queue(&chip->wq, &wait);
806 timeo = jiffies + (HZ*20); /* FIXME */
807 spin_lock_bh(chip->mutex);
808 continue;
811 status = map_read(map, adr);
812 if (map_word_andequal(map, status, status_OK, status_OK))
813 break;
815 /* OK Still waiting */
816 if (time_after(jiffies, timeo)) {
817 map_write(map, CMD(0x70), adr);
818 chip->state = FL_STATUS;
819 printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
820 DISABLE_VPP(map);
821 spin_unlock_bh(chip->mutex);
822 return -EIO;
825 /* Latency issues. Drop the lock, wait a while and retry */
826 spin_unlock_bh(chip->mutex);
827 cfi_udelay(1);
828 spin_lock_bh(chip->mutex);
831 DISABLE_VPP(map);
832 ret = 0;
834 /* We've broken this before. It doesn't hurt to be safe */
835 map_write(map, CMD(0x70), adr);
836 chip->state = FL_STATUS;
837 status = map_read(map, adr);
839 /* check for lock bit */
840 if (map_word_bitsset(map, status, CMD(0x3a))) {
841 unsigned char chipstatus = status.x[0];
842 if (!map_word_equal(map, status, CMD(chipstatus))) {
843 int i, w;
844 for (w=0; w<map_words(map); w++) {
845 for (i = 0; i<cfi_interleave(cfi); i++) {
846 chipstatus |= status.x[w] >> (cfi->device_type * 8);
849 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
850 status.x[0], chipstatus);
852 /* Reset the error bits */
853 map_write(map, CMD(0x50), adr);
854 map_write(map, CMD(0x70), adr);
856 if ((chipstatus & 0x30) == 0x30) {
857 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
858 ret = -EIO;
859 } else if (chipstatus & 0x02) {
860 /* Protection bit set */
861 ret = -EROFS;
862 } else if (chipstatus & 0x8) {
863 /* Voltage */
864 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
865 ret = -EIO;
866 } else if (chipstatus & 0x20) {
867 if (retries--) {
868 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
869 timeo = jiffies + HZ;
870 chip->state = FL_STATUS;
871 spin_unlock_bh(chip->mutex);
872 goto retry;
874 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
875 ret = -EIO;
879 wake_up(&chip->wq);
880 spin_unlock_bh(chip->mutex);
881 return ret;
884 int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
885 { struct map_info *map = mtd->priv;
886 struct cfi_private *cfi = map->fldrv_priv;
887 unsigned long adr, len;
888 int chipnum, ret = 0;
889 int i, first;
890 struct mtd_erase_region_info *regions = mtd->eraseregions;
892 if (instr->addr > mtd->size)
893 return -EINVAL;
895 if ((instr->len + instr->addr) > mtd->size)
896 return -EINVAL;
898 /* Check that both start and end of the requested erase are
899 * aligned with the erasesize at the appropriate addresses.
902 i = 0;
904 /* Skip all erase regions which are ended before the start of
905 the requested erase. Actually, to save on the calculations,
906 we skip to the first erase region which starts after the
907 start of the requested erase, and then go back one.
910 while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
911 i++;
912 i--;
914 /* OK, now i is pointing at the erase region in which this
915 erase request starts. Check the start of the requested
916 erase range is aligned with the erase size which is in
917 effect here.
920 if (instr->addr & (regions[i].erasesize-1))
921 return -EINVAL;
923 /* Remember the erase region we start on */
924 first = i;
926 /* Next, check that the end of the requested erase is aligned
927 * with the erase region at that address.
930 while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
931 i++;
933 /* As before, drop back one to point at the region in which
934 the address actually falls
936 i--;
938 if ((instr->addr + instr->len) & (regions[i].erasesize-1))
939 return -EINVAL;
941 chipnum = instr->addr >> cfi->chipshift;
942 adr = instr->addr - (chipnum << cfi->chipshift);
943 len = instr->len;
945 i=first;
947 while(len) {
948 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
950 if (ret)
951 return ret;
953 adr += regions[i].erasesize;
954 len -= regions[i].erasesize;
956 if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
957 i++;
959 if (adr >> cfi->chipshift) {
960 adr = 0;
961 chipnum++;
963 if (chipnum >= cfi->numchips)
964 break;
968 instr->state = MTD_ERASE_DONE;
969 mtd_erase_callback(instr);
971 return 0;
974 static void cfi_staa_sync (struct mtd_info *mtd)
976 struct map_info *map = mtd->priv;
977 struct cfi_private *cfi = map->fldrv_priv;
978 int i;
979 struct flchip *chip;
980 int ret = 0;
981 DECLARE_WAITQUEUE(wait, current);
983 for (i=0; !ret && i<cfi->numchips; i++) {
984 chip = &cfi->chips[i];
986 retry:
987 spin_lock_bh(chip->mutex);
989 switch(chip->state) {
990 case FL_READY:
991 case FL_STATUS:
992 case FL_CFI_QUERY:
993 case FL_JEDEC_QUERY:
994 chip->oldstate = chip->state;
995 chip->state = FL_SYNCING;
996 /* No need to wake_up() on this state change -
997 * as the whole point is that nobody can do anything
998 * with the chip now anyway.
1000 case FL_SYNCING:
1001 spin_unlock_bh(chip->mutex);
1002 break;
1004 default:
1005 /* Not an idle state */
1006 add_wait_queue(&chip->wq, &wait);
1008 spin_unlock_bh(chip->mutex);
1009 schedule();
1010 remove_wait_queue(&chip->wq, &wait);
1012 goto retry;
1016 /* Unlock the chips again */
1018 for (i--; i >=0; i--) {
1019 chip = &cfi->chips[i];
1021 spin_lock_bh(chip->mutex);
1023 if (chip->state == FL_SYNCING) {
1024 chip->state = chip->oldstate;
1025 wake_up(&chip->wq);
1027 spin_unlock_bh(chip->mutex);
1031 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1033 struct cfi_private *cfi = map->fldrv_priv;
1034 map_word status, status_OK;
1035 unsigned long timeo = jiffies + HZ;
1036 DECLARE_WAITQUEUE(wait, current);
1038 adr += chip->start;
1040 /* Let's determine this according to the interleave only once */
1041 status_OK = CMD(0x80);
1043 timeo = jiffies + HZ;
1044 retry:
1045 spin_lock_bh(chip->mutex);
1047 /* Check that the chip's ready to talk to us. */
1048 switch (chip->state) {
1049 case FL_CFI_QUERY:
1050 case FL_JEDEC_QUERY:
1051 case FL_READY:
1052 map_write(map, CMD(0x70), adr);
1053 chip->state = FL_STATUS;
1055 case FL_STATUS:
1056 status = map_read(map, adr);
1057 if (map_word_andequal(map, status, status_OK, status_OK))
1058 break;
1060 /* Urgh. Chip not yet ready to talk to us. */
1061 if (time_after(jiffies, timeo)) {
1062 spin_unlock_bh(chip->mutex);
1063 printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1064 return -EIO;
1067 /* Latency issues. Drop the lock, wait a while and retry */
1068 spin_unlock_bh(chip->mutex);
1069 cfi_udelay(1);
1070 goto retry;
1072 default:
1073 /* Stick ourselves on a wait queue to be woken when
1074 someone changes the status */
1075 set_current_state(TASK_UNINTERRUPTIBLE);
1076 add_wait_queue(&chip->wq, &wait);
1077 spin_unlock_bh(chip->mutex);
1078 schedule();
1079 remove_wait_queue(&chip->wq, &wait);
1080 timeo = jiffies + HZ;
1081 goto retry;
1084 ENABLE_VPP(map);
1085 map_write(map, CMD(0x60), adr);
1086 map_write(map, CMD(0x01), adr);
1087 chip->state = FL_LOCKING;
1089 spin_unlock_bh(chip->mutex);
1090 msleep(1000);
1091 spin_lock_bh(chip->mutex);
1093 /* FIXME. Use a timer to check this, and return immediately. */
1094 /* Once the state machine's known to be working I'll do that */
1096 timeo = jiffies + (HZ*2);
1097 for (;;) {
1099 status = map_read(map, adr);
1100 if (map_word_andequal(map, status, status_OK, status_OK))
1101 break;
1103 /* OK Still waiting */
1104 if (time_after(jiffies, timeo)) {
1105 map_write(map, CMD(0x70), adr);
1106 chip->state = FL_STATUS;
1107 printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1108 DISABLE_VPP(map);
1109 spin_unlock_bh(chip->mutex);
1110 return -EIO;
1113 /* Latency issues. Drop the lock, wait a while and retry */
1114 spin_unlock_bh(chip->mutex);
1115 cfi_udelay(1);
1116 spin_lock_bh(chip->mutex);
1119 /* Done and happy. */
1120 chip->state = FL_STATUS;
1121 DISABLE_VPP(map);
1122 wake_up(&chip->wq);
1123 spin_unlock_bh(chip->mutex);
1124 return 0;
1126 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1128 struct map_info *map = mtd->priv;
1129 struct cfi_private *cfi = map->fldrv_priv;
1130 unsigned long adr;
1131 int chipnum, ret = 0;
1132 #ifdef DEBUG_LOCK_BITS
1133 int ofs_factor = cfi->interleave * cfi->device_type;
1134 #endif
1136 if (ofs & (mtd->erasesize - 1))
1137 return -EINVAL;
1139 if (len & (mtd->erasesize -1))
1140 return -EINVAL;
1142 if ((len + ofs) > mtd->size)
1143 return -EINVAL;
1145 chipnum = ofs >> cfi->chipshift;
1146 adr = ofs - (chipnum << cfi->chipshift);
1148 while(len) {
1150 #ifdef DEBUG_LOCK_BITS
1151 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1152 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1153 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1154 #endif
1156 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1158 #ifdef DEBUG_LOCK_BITS
1159 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1160 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1161 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1162 #endif
1164 if (ret)
1165 return ret;
1167 adr += mtd->erasesize;
1168 len -= mtd->erasesize;
1170 if (adr >> cfi->chipshift) {
1171 adr = 0;
1172 chipnum++;
1174 if (chipnum >= cfi->numchips)
1175 break;
1178 return 0;
1180 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1182 struct cfi_private *cfi = map->fldrv_priv;
1183 map_word status, status_OK;
1184 unsigned long timeo = jiffies + HZ;
1185 DECLARE_WAITQUEUE(wait, current);
1187 adr += chip->start;
1189 /* Let's determine this according to the interleave only once */
1190 status_OK = CMD(0x80);
1192 timeo = jiffies + HZ;
1193 retry:
1194 spin_lock_bh(chip->mutex);
1196 /* Check that the chip's ready to talk to us. */
1197 switch (chip->state) {
1198 case FL_CFI_QUERY:
1199 case FL_JEDEC_QUERY:
1200 case FL_READY:
1201 map_write(map, CMD(0x70), adr);
1202 chip->state = FL_STATUS;
1204 case FL_STATUS:
1205 status = map_read(map, adr);
1206 if (map_word_andequal(map, status, status_OK, status_OK))
1207 break;
1209 /* Urgh. Chip not yet ready to talk to us. */
1210 if (time_after(jiffies, timeo)) {
1211 spin_unlock_bh(chip->mutex);
1212 printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1213 return -EIO;
1216 /* Latency issues. Drop the lock, wait a while and retry */
1217 spin_unlock_bh(chip->mutex);
1218 cfi_udelay(1);
1219 goto retry;
1221 default:
1222 /* Stick ourselves on a wait queue to be woken when
1223 someone changes the status */
1224 set_current_state(TASK_UNINTERRUPTIBLE);
1225 add_wait_queue(&chip->wq, &wait);
1226 spin_unlock_bh(chip->mutex);
1227 schedule();
1228 remove_wait_queue(&chip->wq, &wait);
1229 timeo = jiffies + HZ;
1230 goto retry;
1233 ENABLE_VPP(map);
1234 map_write(map, CMD(0x60), adr);
1235 map_write(map, CMD(0xD0), adr);
1236 chip->state = FL_UNLOCKING;
1238 spin_unlock_bh(chip->mutex);
1239 msleep(1000);
1240 spin_lock_bh(chip->mutex);
1242 /* FIXME. Use a timer to check this, and return immediately. */
1243 /* Once the state machine's known to be working I'll do that */
1245 timeo = jiffies + (HZ*2);
1246 for (;;) {
1248 status = map_read(map, adr);
1249 if (map_word_andequal(map, status, status_OK, status_OK))
1250 break;
1252 /* OK Still waiting */
1253 if (time_after(jiffies, timeo)) {
1254 map_write(map, CMD(0x70), adr);
1255 chip->state = FL_STATUS;
1256 printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1257 DISABLE_VPP(map);
1258 spin_unlock_bh(chip->mutex);
1259 return -EIO;
1262 /* Latency issues. Drop the unlock, wait a while and retry */
1263 spin_unlock_bh(chip->mutex);
1264 cfi_udelay(1);
1265 spin_lock_bh(chip->mutex);
1268 /* Done and happy. */
1269 chip->state = FL_STATUS;
1270 DISABLE_VPP(map);
1271 wake_up(&chip->wq);
1272 spin_unlock_bh(chip->mutex);
1273 return 0;
1275 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1277 struct map_info *map = mtd->priv;
1278 struct cfi_private *cfi = map->fldrv_priv;
1279 unsigned long adr;
1280 int chipnum, ret = 0;
1281 #ifdef DEBUG_LOCK_BITS
1282 int ofs_factor = cfi->interleave * cfi->device_type;
1283 #endif
1285 chipnum = ofs >> cfi->chipshift;
1286 adr = ofs - (chipnum << cfi->chipshift);
1288 #ifdef DEBUG_LOCK_BITS
1290 unsigned long temp_adr = adr;
1291 unsigned long temp_len = len;
1293 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1294 while (temp_len) {
1295 printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1296 temp_adr += mtd->erasesize;
1297 temp_len -= mtd->erasesize;
1299 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1301 #endif
1303 ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1305 #ifdef DEBUG_LOCK_BITS
1306 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1307 printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1308 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1309 #endif
1311 return ret;
1314 static int cfi_staa_suspend(struct mtd_info *mtd)
1316 struct map_info *map = mtd->priv;
1317 struct cfi_private *cfi = map->fldrv_priv;
1318 int i;
1319 struct flchip *chip;
1320 int ret = 0;
1322 for (i=0; !ret && i<cfi->numchips; i++) {
1323 chip = &cfi->chips[i];
1325 spin_lock_bh(chip->mutex);
1327 switch(chip->state) {
1328 case FL_READY:
1329 case FL_STATUS:
1330 case FL_CFI_QUERY:
1331 case FL_JEDEC_QUERY:
1332 chip->oldstate = chip->state;
1333 chip->state = FL_PM_SUSPENDED;
1334 /* No need to wake_up() on this state change -
1335 * as the whole point is that nobody can do anything
1336 * with the chip now anyway.
1338 case FL_PM_SUSPENDED:
1339 break;
1341 default:
1342 ret = -EAGAIN;
1343 break;
1345 spin_unlock_bh(chip->mutex);
1348 /* Unlock the chips again */
1350 if (ret) {
1351 for (i--; i >=0; i--) {
1352 chip = &cfi->chips[i];
1354 spin_lock_bh(chip->mutex);
1356 if (chip->state == FL_PM_SUSPENDED) {
1357 /* No need to force it into a known state here,
1358 because we're returning failure, and it didn't
1359 get power cycled */
1360 chip->state = chip->oldstate;
1361 wake_up(&chip->wq);
1363 spin_unlock_bh(chip->mutex);
1367 return ret;
1370 static void cfi_staa_resume(struct mtd_info *mtd)
1372 struct map_info *map = mtd->priv;
1373 struct cfi_private *cfi = map->fldrv_priv;
1374 int i;
1375 struct flchip *chip;
1377 for (i=0; i<cfi->numchips; i++) {
1379 chip = &cfi->chips[i];
1381 spin_lock_bh(chip->mutex);
1383 /* Go to known state. Chip may have been power cycled */
1384 if (chip->state == FL_PM_SUSPENDED) {
1385 map_write(map, CMD(0xFF), 0);
1386 chip->state = FL_READY;
1387 wake_up(&chip->wq);
1390 spin_unlock_bh(chip->mutex);
1394 static void cfi_staa_destroy(struct mtd_info *mtd)
1396 struct map_info *map = mtd->priv;
1397 struct cfi_private *cfi = map->fldrv_priv;
1398 kfree(cfi->cmdset_priv);
1399 kfree(cfi);
1402 static char im_name[]="cfi_cmdset_0020";
1404 static int __init cfi_staa_init(void)
1406 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1407 return 0;
1410 static void __exit cfi_staa_exit(void)
1412 inter_module_unregister(im_name);
1415 module_init(cfi_staa_init);
1416 module_exit(cfi_staa_exit);
1418 MODULE_LICENSE("GPL");