[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / mtd / chips / cfi_cmdset_0001.c
blobc268bcd7172025a5312960f338903085086cc366
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
44 #define MANUFACTURER_INTEL 0x0089
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define MANUFACTURER_ST 0x0020
48 #define M50LPW080 0x002F
50 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 static int cfi_intelext_suspend (struct mtd_info *);
60 static void cfi_intelext_resume (struct mtd_info *);
62 static void cfi_intelext_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
66 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
67 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
69 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char **mtdbuf);
71 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
72 size_t len);
74 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
75 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
76 #include "fwh_lock.h"
81 * *********** SETUP AND PROBE BITS ***********
84 static struct mtd_chip_driver cfi_intelext_chipdrv = {
85 .probe = NULL, /* Not usable directly */
86 .destroy = cfi_intelext_destroy,
87 .name = "cfi_cmdset_0001",
88 .module = THIS_MODULE
91 /* #define DEBUG_LOCK_BITS */
92 /* #define DEBUG_CFI_FEATURES */
94 #ifdef DEBUG_CFI_FEATURES
95 static void cfi_tell_features(struct cfi_pri_intelext *extp)
97 int i;
98 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
99 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
109 for (i=10; i<32; i++) {
110 if (extp->FeatureSupport & (1<<i))
111 printk(" - Unknown Bit %X: supported\n", i);
114 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
115 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
116 for (i=1; i<8; i++) {
117 if (extp->SuspendCmdSupport & (1<<i))
118 printk(" - Unknown Bit %X: supported\n", i);
121 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
122 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
124 for (i=2; i<16; i++) {
125 if (extp->BlkStatusRegMask & (1<<i))
126 printk(" - Unknown Bit %X Active: yes\n",i);
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
131 if (extp->VppOptimal)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
135 #endif
137 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp->SuspendCmdSupport &= ~1;
149 #endif
151 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
154 struct map_info *map = mtd->priv;
155 struct cfi_private *cfi = map->fldrv_priv;
156 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
158 if (cfip && (cfip->FeatureSupport&4)) {
159 cfip->FeatureSupport &= ~4;
160 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
163 #endif
165 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv;
170 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
171 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
174 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
179 /* Note this is done after the region info is endian swapped */
180 cfi->cfiq->EraseRegionInfo[1] =
181 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
184 static void fixup_use_point(struct mtd_info *mtd, void *param)
186 struct map_info *map = mtd->priv;
187 if (!mtd->point && map_is_linear(map)) {
188 mtd->point = cfi_intelext_point;
189 mtd->unpoint = cfi_intelext_unpoint;
193 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197 if (cfi->cfiq->BufWriteTimeoutTyp) {
198 printk(KERN_INFO "Using buffer write method\n" );
199 mtd->write = cfi_intelext_write_buffers;
203 static struct cfi_fixup cfi_fixup_table[] = {
204 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
206 #endif
207 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
209 #endif
210 #if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
212 #endif
213 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
214 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
215 { 0, 0, NULL, NULL }
218 static struct cfi_fixup jedec_fixup_table[] = {
219 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
220 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
221 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
222 { 0, 0, NULL, NULL }
224 static struct cfi_fixup fixup_table[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
231 { 0, 0, NULL, NULL }
234 static inline struct cfi_pri_intelext *
235 read_pri_intelext(struct map_info *map, __u16 adr)
237 struct cfi_pri_intelext *extp;
238 unsigned int extp_size = sizeof(*extp);
240 again:
241 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
242 if (!extp)
243 return NULL;
245 /* Do some byteswapping if necessary */
246 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
247 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
248 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
250 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
251 unsigned int extra_size = 0;
252 int nb_parts, i;
254 /* Protection Register info */
255 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
257 /* Burst Read info */
258 extra_size += 6;
260 /* Number of hardware-partitions */
261 extra_size += 1;
262 if (extp_size < sizeof(*extp) + extra_size)
263 goto need_more;
264 nb_parts = extp->extra[extra_size - 1];
266 for (i = 0; i < nb_parts; i++) {
267 struct cfi_intelext_regioninfo *rinfo;
268 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
269 extra_size += sizeof(*rinfo);
270 if (extp_size < sizeof(*extp) + extra_size)
271 goto need_more;
272 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
273 extra_size += (rinfo->NumBlockTypes - 1)
274 * sizeof(struct cfi_intelext_blockinfo);
277 if (extp_size < sizeof(*extp) + extra_size) {
278 need_more:
279 extp_size = sizeof(*extp) + extra_size;
280 kfree(extp);
281 if (extp_size > 4096) {
282 printk(KERN_ERR
283 "%s: cfi_pri_intelext is too fat\n",
284 __FUNCTION__);
285 return NULL;
287 goto again;
291 return extp;
294 /* This routine is made available to other mtd code via
295 * inter_module_register. It must only be accessed through
296 * inter_module_get which will bump the use count of this module. The
297 * addresses passed back in cfi are valid as long as the use count of
298 * this module is non-zero, i.e. between inter_module_get and
299 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
301 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
303 struct cfi_private *cfi = map->fldrv_priv;
304 struct mtd_info *mtd;
305 int i;
307 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
308 if (!mtd) {
309 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
310 return NULL;
312 memset(mtd, 0, sizeof(*mtd));
313 mtd->priv = map;
314 mtd->type = MTD_NORFLASH;
316 /* Fill in the default mtd operations */
317 mtd->erase = cfi_intelext_erase_varsize;
318 mtd->read = cfi_intelext_read;
319 mtd->write = cfi_intelext_write_words;
320 mtd->sync = cfi_intelext_sync;
321 mtd->lock = cfi_intelext_lock;
322 mtd->unlock = cfi_intelext_unlock;
323 mtd->suspend = cfi_intelext_suspend;
324 mtd->resume = cfi_intelext_resume;
325 mtd->flags = MTD_CAP_NORFLASH;
326 mtd->name = map->name;
328 if (cfi->cfi_mode == CFI_MODE_CFI) {
330 * It's a real CFI chip, not one for which the probe
331 * routine faked a CFI structure. So we read the feature
332 * table from it.
334 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
335 struct cfi_pri_intelext *extp;
337 extp = read_pri_intelext(map, adr);
338 if (!extp) {
339 kfree(mtd);
340 return NULL;
343 /* Install our own private info structure */
344 cfi->cmdset_priv = extp;
346 cfi_fixup(mtd, cfi_fixup_table);
348 #ifdef DEBUG_CFI_FEATURES
349 /* Tell the user about it in lots of lovely detail */
350 cfi_tell_features(extp);
351 #endif
353 if(extp->SuspendCmdSupport & 1) {
354 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
357 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
358 /* Apply jedec specific fixups */
359 cfi_fixup(mtd, jedec_fixup_table);
361 /* Apply generic fixups */
362 cfi_fixup(mtd, fixup_table);
364 for (i=0; i< cfi->numchips; i++) {
365 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
366 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
367 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
368 cfi->chips[i].ref_point_counter = 0;
371 map->fldrv = &cfi_intelext_chipdrv;
373 return cfi_intelext_setup(mtd);
376 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
378 struct map_info *map = mtd->priv;
379 struct cfi_private *cfi = map->fldrv_priv;
380 unsigned long offset = 0;
381 int i,j;
382 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
384 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
386 mtd->size = devsize * cfi->numchips;
388 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
389 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
390 * mtd->numeraseregions, GFP_KERNEL);
391 if (!mtd->eraseregions) {
392 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
393 goto setup_err;
396 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
397 unsigned long ernum, ersize;
398 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
399 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
401 if (mtd->erasesize < ersize) {
402 mtd->erasesize = ersize;
404 for (j=0; j<cfi->numchips; j++) {
405 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
406 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
407 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
409 offset += (ersize * ernum);
412 if (offset != devsize) {
413 /* Argh */
414 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
415 goto setup_err;
418 for (i=0; i<mtd->numeraseregions;i++){
419 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
420 i,mtd->eraseregions[i].offset,
421 mtd->eraseregions[i].erasesize,
422 mtd->eraseregions[i].numblocks);
425 #if 0
426 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
427 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
428 #endif
430 /* This function has the potential to distort the reality
431 a bit and therefore should be called last. */
432 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
433 goto setup_err;
435 __module_get(THIS_MODULE);
436 return mtd;
438 setup_err:
439 if(mtd) {
440 if(mtd->eraseregions)
441 kfree(mtd->eraseregions);
442 kfree(mtd);
444 kfree(cfi->cmdset_priv);
445 return NULL;
448 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
449 struct cfi_private **pcfi)
451 struct map_info *map = mtd->priv;
452 struct cfi_private *cfi = *pcfi;
453 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
456 * Probing of multi-partition flash ships.
458 * To support multiple partitions when available, we simply arrange
459 * for each of them to have their own flchip structure even if they
460 * are on the same physical chip. This means completely recreating
461 * a new cfi_private structure right here which is a blatent code
462 * layering violation, but this is still the least intrusive
463 * arrangement at this point. This can be rearranged in the future
464 * if someone feels motivated enough. --nico
466 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
467 && extp->FeatureSupport & (1 << 9)) {
468 struct cfi_private *newcfi;
469 struct flchip *chip;
470 struct flchip_shared *shared;
471 int offs, numregions, numparts, partshift, numvirtchips, i, j;
473 /* Protection Register info */
474 offs = (extp->NumProtectionFields - 1) * (4 + 6);
476 /* Burst Read info */
477 offs += 6;
479 /* Number of partition regions */
480 numregions = extp->extra[offs];
481 offs += 1;
483 /* Number of hardware partitions */
484 numparts = 0;
485 for (i = 0; i < numregions; i++) {
486 struct cfi_intelext_regioninfo *rinfo;
487 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
488 numparts += rinfo->NumIdentPartitions;
489 offs += sizeof(*rinfo)
490 + (rinfo->NumBlockTypes - 1) *
491 sizeof(struct cfi_intelext_blockinfo);
495 * All functions below currently rely on all chips having
496 * the same geometry so we'll just assume that all hardware
497 * partitions are of the same size too.
499 partshift = cfi->chipshift - __ffs(numparts);
501 if ((1 << partshift) < mtd->erasesize) {
502 printk( KERN_ERR
503 "%s: bad number of hw partitions (%d)\n",
504 __FUNCTION__, numparts);
505 return -EINVAL;
508 numvirtchips = cfi->numchips * numparts;
509 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
510 if (!newcfi)
511 return -ENOMEM;
512 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
513 if (!shared) {
514 kfree(newcfi);
515 return -ENOMEM;
517 memcpy(newcfi, cfi, sizeof(struct cfi_private));
518 newcfi->numchips = numvirtchips;
519 newcfi->chipshift = partshift;
521 chip = &newcfi->chips[0];
522 for (i = 0; i < cfi->numchips; i++) {
523 shared[i].writing = shared[i].erasing = NULL;
524 spin_lock_init(&shared[i].lock);
525 for (j = 0; j < numparts; j++) {
526 *chip = cfi->chips[i];
527 chip->start += j << partshift;
528 chip->priv = &shared[i];
529 /* those should be reset too since
530 they create memory references. */
531 init_waitqueue_head(&chip->wq);
532 spin_lock_init(&chip->_spinlock);
533 chip->mutex = &chip->_spinlock;
534 chip++;
538 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
539 "--> %d partitions of %d KiB\n",
540 map->name, cfi->numchips, cfi->interleave,
541 newcfi->numchips, 1<<(newcfi->chipshift-10));
543 map->fldrv_priv = newcfi;
544 *pcfi = newcfi;
545 kfree(cfi);
548 return 0;
552 * *********** CHIP ACCESS FUNCTIONS ***********
555 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
557 DECLARE_WAITQUEUE(wait, current);
558 struct cfi_private *cfi = map->fldrv_priv;
559 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
560 unsigned long timeo;
561 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
563 resettime:
564 timeo = jiffies + HZ;
565 retry:
566 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
568 * OK. We have possibility for contension on the write/erase
569 * operations which are global to the real chip and not per
570 * partition. So let's fight it over in the partition which
571 * currently has authority on the operation.
573 * The rules are as follows:
575 * - any write operation must own shared->writing.
577 * - any erase operation must own _both_ shared->writing and
578 * shared->erasing.
580 * - contension arbitration is handled in the owner's context.
582 * The 'shared' struct can be read when its lock is taken.
583 * However any writes to it can only be made when the current
584 * owner's lock is also held.
586 struct flchip_shared *shared = chip->priv;
587 struct flchip *contender;
588 spin_lock(&shared->lock);
589 contender = shared->writing;
590 if (contender && contender != chip) {
592 * The engine to perform desired operation on this
593 * partition is already in use by someone else.
594 * Let's fight over it in the context of the chip
595 * currently using it. If it is possible to suspend,
596 * that other partition will do just that, otherwise
597 * it'll happily send us to sleep. In any case, when
598 * get_chip returns success we're clear to go ahead.
600 int ret = spin_trylock(contender->mutex);
601 spin_unlock(&shared->lock);
602 if (!ret)
603 goto retry;
604 spin_unlock(chip->mutex);
605 ret = get_chip(map, contender, contender->start, mode);
606 spin_lock(chip->mutex);
607 if (ret) {
608 spin_unlock(contender->mutex);
609 return ret;
611 timeo = jiffies + HZ;
612 spin_lock(&shared->lock);
615 /* We now own it */
616 shared->writing = chip;
617 if (mode == FL_ERASING)
618 shared->erasing = chip;
619 if (contender && contender != chip)
620 spin_unlock(contender->mutex);
621 spin_unlock(&shared->lock);
624 switch (chip->state) {
626 case FL_STATUS:
627 for (;;) {
628 status = map_read(map, adr);
629 if (map_word_andequal(map, status, status_OK, status_OK))
630 break;
632 /* At this point we're fine with write operations
633 in other partitions as they don't conflict. */
634 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
635 break;
637 if (time_after(jiffies, timeo)) {
638 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
639 status.x[0]);
640 return -EIO;
642 spin_unlock(chip->mutex);
643 cfi_udelay(1);
644 spin_lock(chip->mutex);
645 /* Someone else might have been playing with it. */
646 goto retry;
649 case FL_READY:
650 case FL_CFI_QUERY:
651 case FL_JEDEC_QUERY:
652 return 0;
654 case FL_ERASING:
655 if (!cfip ||
656 !(cfip->FeatureSupport & 2) ||
657 !(mode == FL_READY || mode == FL_POINT ||
658 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
659 goto sleep;
662 /* Erase suspend */
663 map_write(map, CMD(0xB0), adr);
665 /* If the flash has finished erasing, then 'erase suspend'
666 * appears to make some (28F320) flash devices switch to
667 * 'read' mode. Make sure that we switch to 'read status'
668 * mode so we get the right data. --rmk
670 map_write(map, CMD(0x70), adr);
671 chip->oldstate = FL_ERASING;
672 chip->state = FL_ERASE_SUSPENDING;
673 chip->erase_suspended = 1;
674 for (;;) {
675 status = map_read(map, adr);
676 if (map_word_andequal(map, status, status_OK, status_OK))
677 break;
679 if (time_after(jiffies, timeo)) {
680 /* Urgh. Resume and pretend we weren't here. */
681 map_write(map, CMD(0xd0), adr);
682 /* Make sure we're in 'read status' mode if it had finished */
683 map_write(map, CMD(0x70), adr);
684 chip->state = FL_ERASING;
685 chip->oldstate = FL_READY;
686 printk(KERN_ERR "Chip not ready after erase "
687 "suspended: status = 0x%lx\n", status.x[0]);
688 return -EIO;
691 spin_unlock(chip->mutex);
692 cfi_udelay(1);
693 spin_lock(chip->mutex);
694 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
695 So we can just loop here. */
697 chip->state = FL_STATUS;
698 return 0;
700 case FL_XIP_WHILE_ERASING:
701 if (mode != FL_READY && mode != FL_POINT &&
702 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
703 goto sleep;
704 chip->oldstate = chip->state;
705 chip->state = FL_READY;
706 return 0;
708 case FL_POINT:
709 /* Only if there's no operation suspended... */
710 if (mode == FL_READY && chip->oldstate == FL_READY)
711 return 0;
713 default:
714 sleep:
715 set_current_state(TASK_UNINTERRUPTIBLE);
716 add_wait_queue(&chip->wq, &wait);
717 spin_unlock(chip->mutex);
718 schedule();
719 remove_wait_queue(&chip->wq, &wait);
720 spin_lock(chip->mutex);
721 goto resettime;
725 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
727 struct cfi_private *cfi = map->fldrv_priv;
729 if (chip->priv) {
730 struct flchip_shared *shared = chip->priv;
731 spin_lock(&shared->lock);
732 if (shared->writing == chip && chip->oldstate == FL_READY) {
733 /* We own the ability to write, but we're done */
734 shared->writing = shared->erasing;
735 if (shared->writing && shared->writing != chip) {
736 /* give back ownership to who we loaned it from */
737 struct flchip *loaner = shared->writing;
738 spin_lock(loaner->mutex);
739 spin_unlock(&shared->lock);
740 spin_unlock(chip->mutex);
741 put_chip(map, loaner, loaner->start);
742 spin_lock(chip->mutex);
743 spin_unlock(loaner->mutex);
744 wake_up(&chip->wq);
745 return;
747 shared->erasing = NULL;
748 shared->writing = NULL;
749 } else if (shared->erasing == chip && shared->writing != chip) {
751 * We own the ability to erase without the ability
752 * to write, which means the erase was suspended
753 * and some other partition is currently writing.
754 * Don't let the switch below mess things up since
755 * we don't have ownership to resume anything.
757 spin_unlock(&shared->lock);
758 wake_up(&chip->wq);
759 return;
761 spin_unlock(&shared->lock);
764 switch(chip->oldstate) {
765 case FL_ERASING:
766 chip->state = chip->oldstate;
767 /* What if one interleaved chip has finished and the
768 other hasn't? The old code would leave the finished
769 one in READY mode. That's bad, and caused -EROFS
770 errors to be returned from do_erase_oneblock because
771 that's the only bit it checked for at the time.
772 As the state machine appears to explicitly allow
773 sending the 0x70 (Read Status) command to an erasing
774 chip and expecting it to be ignored, that's what we
775 do. */
776 map_write(map, CMD(0xd0), adr);
777 map_write(map, CMD(0x70), adr);
778 chip->oldstate = FL_READY;
779 chip->state = FL_ERASING;
780 break;
782 case FL_XIP_WHILE_ERASING:
783 chip->state = chip->oldstate;
784 chip->oldstate = FL_READY;
785 break;
787 case FL_READY:
788 case FL_STATUS:
789 case FL_JEDEC_QUERY:
790 /* We should really make set_vpp() count, rather than doing this */
791 DISABLE_VPP(map);
792 break;
793 default:
794 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
796 wake_up(&chip->wq);
799 #ifdef CONFIG_MTD_XIP
802 * No interrupt what so ever can be serviced while the flash isn't in array
803 * mode. This is ensured by the xip_disable() and xip_enable() functions
804 * enclosing any code path where the flash is known not to be in array mode.
805 * And within a XIP disabled code path, only functions marked with __xipram
806 * may be called and nothing else (it's a good thing to inspect generated
807 * assembly to make sure inline functions were actually inlined and that gcc
808 * didn't emit calls to its own support functions). Also configuring MTD CFI
809 * support to a single buswidth and a single interleave is also recommended.
810 * Note that not only IRQs are disabled but the preemption count is also
811 * increased to prevent other locking primitives (namely spin_unlock) from
812 * decrementing the preempt count to zero and scheduling the CPU away while
813 * not in array mode.
816 static void xip_disable(struct map_info *map, struct flchip *chip,
817 unsigned long adr)
819 /* TODO: chips with no XIP use should ignore and return */
820 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
821 preempt_disable();
822 local_irq_disable();
825 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
826 unsigned long adr)
828 struct cfi_private *cfi = map->fldrv_priv;
829 if (chip->state != FL_POINT && chip->state != FL_READY) {
830 map_write(map, CMD(0xff), adr);
831 chip->state = FL_READY;
833 (void) map_read(map, adr);
834 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
835 local_irq_enable();
836 preempt_enable();
840 * When a delay is required for the flash operation to complete, the
841 * xip_udelay() function is polling for both the given timeout and pending
842 * (but still masked) hardware interrupts. Whenever there is an interrupt
843 * pending then the flash erase or write operation is suspended, array mode
844 * restored and interrupts unmasked. Task scheduling might also happen at that
845 * point. The CPU eventually returns from the interrupt or the call to
846 * schedule() and the suspended flash operation is resumed for the remaining
847 * of the delay period.
849 * Warning: this function _will_ fool interrupt latency tracing tools.
852 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
853 unsigned long adr, int usec)
855 struct cfi_private *cfi = map->fldrv_priv;
856 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
857 map_word status, OK = CMD(0x80);
858 unsigned long suspended, start = xip_currtime();
859 flstate_t oldstate, newstate;
861 do {
862 cpu_relax();
863 if (xip_irqpending() && cfip &&
864 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
865 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
866 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
868 * Let's suspend the erase or write operation when
869 * supported. Note that we currently don't try to
870 * suspend interleaved chips if there is already
871 * another operation suspended (imagine what happens
872 * when one chip was already done with the current
873 * operation while another chip suspended it, then
874 * we resume the whole thing at once). Yes, it
875 * can happen!
877 map_write(map, CMD(0xb0), adr);
878 map_write(map, CMD(0x70), adr);
879 usec -= xip_elapsed_since(start);
880 suspended = xip_currtime();
881 do {
882 if (xip_elapsed_since(suspended) > 100000) {
884 * The chip doesn't want to suspend
885 * after waiting for 100 msecs.
886 * This is a critical error but there
887 * is not much we can do here.
889 return;
891 status = map_read(map, adr);
892 } while (!map_word_andequal(map, status, OK, OK));
894 /* Suspend succeeded */
895 oldstate = chip->state;
896 if (oldstate == FL_ERASING) {
897 if (!map_word_bitsset(map, status, CMD(0x40)))
898 break;
899 newstate = FL_XIP_WHILE_ERASING;
900 chip->erase_suspended = 1;
901 } else {
902 if (!map_word_bitsset(map, status, CMD(0x04)))
903 break;
904 newstate = FL_XIP_WHILE_WRITING;
905 chip->write_suspended = 1;
907 chip->state = newstate;
908 map_write(map, CMD(0xff), adr);
909 (void) map_read(map, adr);
910 asm volatile (".rep 8; nop; .endr");
911 local_irq_enable();
912 preempt_enable();
913 asm volatile (".rep 8; nop; .endr");
914 cond_resched();
917 * We're back. However someone else might have
918 * decided to go write to the chip if we are in
919 * a suspended erase state. If so let's wait
920 * until it's done.
922 preempt_disable();
923 while (chip->state != newstate) {
924 DECLARE_WAITQUEUE(wait, current);
925 set_current_state(TASK_UNINTERRUPTIBLE);
926 add_wait_queue(&chip->wq, &wait);
927 preempt_enable();
928 schedule();
929 remove_wait_queue(&chip->wq, &wait);
930 preempt_disable();
932 /* Disallow XIP again */
933 local_irq_disable();
935 /* Resume the write or erase operation */
936 map_write(map, CMD(0xd0), adr);
937 map_write(map, CMD(0x70), adr);
938 chip->state = oldstate;
939 start = xip_currtime();
940 } else if (usec >= 1000000/HZ) {
942 * Try to save on CPU power when waiting delay
943 * is at least a system timer tick period.
944 * No need to be extremely accurate here.
946 xip_cpu_idle();
948 status = map_read(map, adr);
949 } while (!map_word_andequal(map, status, OK, OK)
950 && xip_elapsed_since(start) < usec);
953 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
956 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
957 * the flash is actively programming or erasing since we have to poll for
958 * the operation to complete anyway. We can't do that in a generic way with
959 * a XIP setup so do it before the actual flash operation in this case.
961 #undef INVALIDATE_CACHED_RANGE
962 #define INVALIDATE_CACHED_RANGE(x...)
963 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
964 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
967 * Extra notes:
969 * Activating this XIP support changes the way the code works a bit. For
970 * example the code to suspend the current process when concurrent access
971 * happens is never executed because xip_udelay() will always return with the
972 * same chip state as it was entered with. This is why there is no care for
973 * the presence of add_wait_queue() or schedule() calls from within a couple
974 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
975 * The queueing and scheduling are always happening within xip_udelay().
977 * Similarly, get_chip() and put_chip() just happen to always be executed
978 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
979 * is in array mode, therefore never executing many cases therein and not
980 * causing any problem with XIP.
983 #else
985 #define xip_disable(map, chip, adr)
986 #define xip_enable(map, chip, adr)
988 #define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
990 #define XIP_INVAL_CACHED_RANGE(x...)
992 #endif
994 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
996 unsigned long cmd_addr;
997 struct cfi_private *cfi = map->fldrv_priv;
998 int ret = 0;
1000 adr += chip->start;
1002 /* Ensure cmd read/writes are aligned. */
1003 cmd_addr = adr & ~(map_bankwidth(map)-1);
1005 spin_lock(chip->mutex);
1007 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1009 if (!ret) {
1010 if (chip->state != FL_POINT && chip->state != FL_READY)
1011 map_write(map, CMD(0xff), cmd_addr);
1013 chip->state = FL_POINT;
1014 chip->ref_point_counter++;
1016 spin_unlock(chip->mutex);
1018 return ret;
1021 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1023 struct map_info *map = mtd->priv;
1024 struct cfi_private *cfi = map->fldrv_priv;
1025 unsigned long ofs;
1026 int chipnum;
1027 int ret = 0;
1029 if (!map->virt || (from + len > mtd->size))
1030 return -EINVAL;
1032 *mtdbuf = (void *)map->virt + from;
1033 *retlen = 0;
1035 /* Now lock the chip(s) to POINT state */
1037 /* ofs: offset within the first chip that the first read should start */
1038 chipnum = (from >> cfi->chipshift);
1039 ofs = from - (chipnum << cfi->chipshift);
1041 while (len) {
1042 unsigned long thislen;
1044 if (chipnum >= cfi->numchips)
1045 break;
1047 if ((len + ofs -1) >> cfi->chipshift)
1048 thislen = (1<<cfi->chipshift) - ofs;
1049 else
1050 thislen = len;
1052 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1053 if (ret)
1054 break;
1056 *retlen += thislen;
1057 len -= thislen;
1059 ofs = 0;
1060 chipnum++;
1062 return 0;
1065 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1067 struct map_info *map = mtd->priv;
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 unsigned long ofs;
1070 int chipnum;
1072 /* Now unlock the chip(s) POINT state */
1074 /* ofs: offset within the first chip that the first read should start */
1075 chipnum = (from >> cfi->chipshift);
1076 ofs = from - (chipnum << cfi->chipshift);
1078 while (len) {
1079 unsigned long thislen;
1080 struct flchip *chip;
1082 chip = &cfi->chips[chipnum];
1083 if (chipnum >= cfi->numchips)
1084 break;
1086 if ((len + ofs -1) >> cfi->chipshift)
1087 thislen = (1<<cfi->chipshift) - ofs;
1088 else
1089 thislen = len;
1091 spin_lock(chip->mutex);
1092 if (chip->state == FL_POINT) {
1093 chip->ref_point_counter--;
1094 if(chip->ref_point_counter == 0)
1095 chip->state = FL_READY;
1096 } else
1097 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1099 put_chip(map, chip, chip->start);
1100 spin_unlock(chip->mutex);
1102 len -= thislen;
1103 ofs = 0;
1104 chipnum++;
1108 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1110 unsigned long cmd_addr;
1111 struct cfi_private *cfi = map->fldrv_priv;
1112 int ret;
1114 adr += chip->start;
1116 /* Ensure cmd read/writes are aligned. */
1117 cmd_addr = adr & ~(map_bankwidth(map)-1);
1119 spin_lock(chip->mutex);
1120 ret = get_chip(map, chip, cmd_addr, FL_READY);
1121 if (ret) {
1122 spin_unlock(chip->mutex);
1123 return ret;
1126 if (chip->state != FL_POINT && chip->state != FL_READY) {
1127 map_write(map, CMD(0xff), cmd_addr);
1129 chip->state = FL_READY;
1132 map_copy_from(map, buf, adr, len);
1134 put_chip(map, chip, cmd_addr);
1136 spin_unlock(chip->mutex);
1137 return 0;
1140 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1142 struct map_info *map = mtd->priv;
1143 struct cfi_private *cfi = map->fldrv_priv;
1144 unsigned long ofs;
1145 int chipnum;
1146 int ret = 0;
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum = (from >> cfi->chipshift);
1150 ofs = from - (chipnum << cfi->chipshift);
1152 *retlen = 0;
1154 while (len) {
1155 unsigned long thislen;
1157 if (chipnum >= cfi->numchips)
1158 break;
1160 if ((len + ofs -1) >> cfi->chipshift)
1161 thislen = (1<<cfi->chipshift) - ofs;
1162 else
1163 thislen = len;
1165 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1166 if (ret)
1167 break;
1169 *retlen += thislen;
1170 len -= thislen;
1171 buf += thislen;
1173 ofs = 0;
1174 chipnum++;
1176 return ret;
1179 #if 0
1180 static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1181 loff_t from, size_t len,
1182 size_t *retlen,
1183 u_char *buf,
1184 int base_offst, int reg_sz)
1186 struct map_info *map = mtd->priv;
1187 struct cfi_private *cfi = map->fldrv_priv;
1188 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1189 struct flchip *chip;
1190 int ofs_factor = cfi->interleave * cfi->device_type;
1191 int count = len;
1192 int chip_num, offst;
1193 int ret;
1195 chip_num = ((unsigned int)from/reg_sz);
1196 offst = from - (reg_sz*chip_num)+base_offst;
1198 while (count) {
1199 /* Calculate which chip & protection register offset we need */
1201 if (chip_num >= cfi->numchips)
1202 goto out;
1204 chip = &cfi->chips[chip_num];
1206 spin_lock(chip->mutex);
1207 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1208 if (ret) {
1209 spin_unlock(chip->mutex);
1210 return (len-count)?:ret;
1213 xip_disable(map, chip, chip->start);
1215 if (chip->state != FL_JEDEC_QUERY) {
1216 map_write(map, CMD(0x90), chip->start);
1217 chip->state = FL_JEDEC_QUERY;
1220 while (count && ((offst-base_offst) < reg_sz)) {
1221 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1222 buf++;
1223 offst++;
1224 count--;
1227 xip_enable(map, chip, chip->start);
1228 put_chip(map, chip, chip->start);
1229 spin_unlock(chip->mutex);
1231 /* Move on to the next chip */
1232 chip_num++;
1233 offst = base_offst;
1236 out:
1237 return len-count;
1240 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1242 struct map_info *map = mtd->priv;
1243 struct cfi_private *cfi = map->fldrv_priv;
1244 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1245 int base_offst,reg_sz;
1247 /* Check that we actually have some protection registers */
1248 if(!extp || !(extp->FeatureSupport&64)){
1249 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1250 return 0;
1253 base_offst=(1<<extp->FactProtRegSize);
1254 reg_sz=(1<<extp->UserProtRegSize);
1256 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1259 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1261 struct map_info *map = mtd->priv;
1262 struct cfi_private *cfi = map->fldrv_priv;
1263 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1264 int base_offst,reg_sz;
1266 /* Check that we actually have some protection registers */
1267 if(!extp || !(extp->FeatureSupport&64)){
1268 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1269 return 0;
1272 base_offst=0;
1273 reg_sz=(1<<extp->FactProtRegSize);
1275 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1277 #endif
1279 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1280 unsigned long adr, map_word datum)
1282 struct cfi_private *cfi = map->fldrv_priv;
1283 map_word status, status_OK;
1284 unsigned long timeo;
1285 int z, ret=0;
1287 adr += chip->start;
1289 /* Let's determine this according to the interleave only once */
1290 status_OK = CMD(0x80);
1292 spin_lock(chip->mutex);
1293 ret = get_chip(map, chip, adr, FL_WRITING);
1294 if (ret) {
1295 spin_unlock(chip->mutex);
1296 return ret;
1299 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1300 ENABLE_VPP(map);
1301 xip_disable(map, chip, adr);
1302 map_write(map, CMD(0x40), adr);
1303 map_write(map, datum, adr);
1304 chip->state = FL_WRITING;
1306 spin_unlock(chip->mutex);
1307 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1308 UDELAY(map, chip, adr, chip->word_write_time);
1309 spin_lock(chip->mutex);
1311 timeo = jiffies + (HZ/2);
1312 z = 0;
1313 for (;;) {
1314 if (chip->state != FL_WRITING) {
1315 /* Someone's suspended the write. Sleep */
1316 DECLARE_WAITQUEUE(wait, current);
1318 set_current_state(TASK_UNINTERRUPTIBLE);
1319 add_wait_queue(&chip->wq, &wait);
1320 spin_unlock(chip->mutex);
1321 schedule();
1322 remove_wait_queue(&chip->wq, &wait);
1323 timeo = jiffies + (HZ / 2); /* FIXME */
1324 spin_lock(chip->mutex);
1325 continue;
1328 status = map_read(map, adr);
1329 if (map_word_andequal(map, status, status_OK, status_OK))
1330 break;
1332 /* OK Still waiting */
1333 if (time_after(jiffies, timeo)) {
1334 chip->state = FL_STATUS;
1335 xip_enable(map, chip, adr);
1336 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1337 ret = -EIO;
1338 goto out;
1341 /* Latency issues. Drop the lock, wait a while and retry */
1342 spin_unlock(chip->mutex);
1343 z++;
1344 UDELAY(map, chip, adr, 1);
1345 spin_lock(chip->mutex);
1347 if (!z) {
1348 chip->word_write_time--;
1349 if (!chip->word_write_time)
1350 chip->word_write_time++;
1352 if (z > 1)
1353 chip->word_write_time++;
1355 /* Done and happy. */
1356 chip->state = FL_STATUS;
1358 /* check for lock bit */
1359 if (map_word_bitsset(map, status, CMD(0x02))) {
1360 /* clear status */
1361 map_write(map, CMD(0x50), adr);
1362 /* put back into read status register mode */
1363 map_write(map, CMD(0x70), adr);
1364 ret = -EROFS;
1367 xip_enable(map, chip, adr);
1368 out: put_chip(map, chip, adr);
1369 spin_unlock(chip->mutex);
1371 return ret;
1375 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1377 struct map_info *map = mtd->priv;
1378 struct cfi_private *cfi = map->fldrv_priv;
1379 int ret = 0;
1380 int chipnum;
1381 unsigned long ofs;
1383 *retlen = 0;
1384 if (!len)
1385 return 0;
1387 chipnum = to >> cfi->chipshift;
1388 ofs = to - (chipnum << cfi->chipshift);
1390 /* If it's not bus-aligned, do the first byte write */
1391 if (ofs & (map_bankwidth(map)-1)) {
1392 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1393 int gap = ofs - bus_ofs;
1394 int n;
1395 map_word datum;
1397 n = min_t(int, len, map_bankwidth(map)-gap);
1398 datum = map_word_ff(map);
1399 datum = map_word_load_partial(map, datum, buf, gap, n);
1401 ret = do_write_oneword(map, &cfi->chips[chipnum],
1402 bus_ofs, datum);
1403 if (ret)
1404 return ret;
1406 len -= n;
1407 ofs += n;
1408 buf += n;
1409 (*retlen) += n;
1411 if (ofs >> cfi->chipshift) {
1412 chipnum ++;
1413 ofs = 0;
1414 if (chipnum == cfi->numchips)
1415 return 0;
1419 while(len >= map_bankwidth(map)) {
1420 map_word datum = map_word_load(map, buf);
1422 ret = do_write_oneword(map, &cfi->chips[chipnum],
1423 ofs, datum);
1424 if (ret)
1425 return ret;
1427 ofs += map_bankwidth(map);
1428 buf += map_bankwidth(map);
1429 (*retlen) += map_bankwidth(map);
1430 len -= map_bankwidth(map);
1432 if (ofs >> cfi->chipshift) {
1433 chipnum ++;
1434 ofs = 0;
1435 if (chipnum == cfi->numchips)
1436 return 0;
1440 if (len & (map_bankwidth(map)-1)) {
1441 map_word datum;
1443 datum = map_word_ff(map);
1444 datum = map_word_load_partial(map, datum, buf, 0, len);
1446 ret = do_write_oneword(map, &cfi->chips[chipnum],
1447 ofs, datum);
1448 if (ret)
1449 return ret;
1451 (*retlen) += len;
1454 return 0;
1458 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1459 unsigned long adr, const u_char *buf, int len)
1461 struct cfi_private *cfi = map->fldrv_priv;
1462 map_word status, status_OK;
1463 unsigned long cmd_adr, timeo;
1464 int wbufsize, z, ret=0, bytes, words;
1466 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1467 adr += chip->start;
1468 cmd_adr = adr & ~(wbufsize-1);
1470 /* Let's determine this according to the interleave only once */
1471 status_OK = CMD(0x80);
1473 spin_lock(chip->mutex);
1474 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1475 if (ret) {
1476 spin_unlock(chip->mutex);
1477 return ret;
1480 XIP_INVAL_CACHED_RANGE(map, adr, len);
1481 ENABLE_VPP(map);
1482 xip_disable(map, chip, cmd_adr);
1484 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1485 [...], the device will not accept any more Write to Buffer commands".
1486 So we must check here and reset those bits if they're set. Otherwise
1487 we're just pissing in the wind */
1488 if (chip->state != FL_STATUS)
1489 map_write(map, CMD(0x70), cmd_adr);
1490 status = map_read(map, cmd_adr);
1491 if (map_word_bitsset(map, status, CMD(0x30))) {
1492 xip_enable(map, chip, cmd_adr);
1493 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1494 xip_disable(map, chip, cmd_adr);
1495 map_write(map, CMD(0x50), cmd_adr);
1496 map_write(map, CMD(0x70), cmd_adr);
1499 chip->state = FL_WRITING_TO_BUFFER;
1501 z = 0;
1502 for (;;) {
1503 map_write(map, CMD(0xe8), cmd_adr);
1505 status = map_read(map, cmd_adr);
1506 if (map_word_andequal(map, status, status_OK, status_OK))
1507 break;
1509 spin_unlock(chip->mutex);
1510 UDELAY(map, chip, cmd_adr, 1);
1511 spin_lock(chip->mutex);
1513 if (++z > 20) {
1514 /* Argh. Not ready for write to buffer */
1515 map_word Xstatus;
1516 map_write(map, CMD(0x70), cmd_adr);
1517 chip->state = FL_STATUS;
1518 Xstatus = map_read(map, cmd_adr);
1519 /* Odd. Clear status bits */
1520 map_write(map, CMD(0x50), cmd_adr);
1521 map_write(map, CMD(0x70), cmd_adr);
1522 xip_enable(map, chip, cmd_adr);
1523 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1524 status.x[0], Xstatus.x[0]);
1525 ret = -EIO;
1526 goto out;
1530 /* Write length of data to come */
1531 bytes = len & (map_bankwidth(map)-1);
1532 words = len / map_bankwidth(map);
1533 map_write(map, CMD(words - !bytes), cmd_adr );
1535 /* Write data */
1536 z = 0;
1537 while(z < words * map_bankwidth(map)) {
1538 map_word datum = map_word_load(map, buf);
1539 map_write(map, datum, adr+z);
1541 z += map_bankwidth(map);
1542 buf += map_bankwidth(map);
1545 if (bytes) {
1546 map_word datum;
1548 datum = map_word_ff(map);
1549 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1550 map_write(map, datum, adr+z);
1553 /* GO GO GO */
1554 map_write(map, CMD(0xd0), cmd_adr);
1555 chip->state = FL_WRITING;
1557 spin_unlock(chip->mutex);
1558 INVALIDATE_CACHED_RANGE(map, adr, len);
1559 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1560 spin_lock(chip->mutex);
1562 timeo = jiffies + (HZ/2);
1563 z = 0;
1564 for (;;) {
1565 if (chip->state != FL_WRITING) {
1566 /* Someone's suspended the write. Sleep */
1567 DECLARE_WAITQUEUE(wait, current);
1568 set_current_state(TASK_UNINTERRUPTIBLE);
1569 add_wait_queue(&chip->wq, &wait);
1570 spin_unlock(chip->mutex);
1571 schedule();
1572 remove_wait_queue(&chip->wq, &wait);
1573 timeo = jiffies + (HZ / 2); /* FIXME */
1574 spin_lock(chip->mutex);
1575 continue;
1578 status = map_read(map, cmd_adr);
1579 if (map_word_andequal(map, status, status_OK, status_OK))
1580 break;
1582 /* OK Still waiting */
1583 if (time_after(jiffies, timeo)) {
1584 chip->state = FL_STATUS;
1585 xip_enable(map, chip, cmd_adr);
1586 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1587 ret = -EIO;
1588 goto out;
1591 /* Latency issues. Drop the lock, wait a while and retry */
1592 spin_unlock(chip->mutex);
1593 UDELAY(map, chip, cmd_adr, 1);
1594 z++;
1595 spin_lock(chip->mutex);
1597 if (!z) {
1598 chip->buffer_write_time--;
1599 if (!chip->buffer_write_time)
1600 chip->buffer_write_time++;
1602 if (z > 1)
1603 chip->buffer_write_time++;
1605 /* Done and happy. */
1606 chip->state = FL_STATUS;
1608 /* check for lock bit */
1609 if (map_word_bitsset(map, status, CMD(0x02))) {
1610 /* clear status */
1611 map_write(map, CMD(0x50), cmd_adr);
1612 /* put back into read status register mode */
1613 map_write(map, CMD(0x70), adr);
1614 ret = -EROFS;
1617 xip_enable(map, chip, cmd_adr);
1618 out: put_chip(map, chip, cmd_adr);
1619 spin_unlock(chip->mutex);
1620 return ret;
1623 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1624 size_t len, size_t *retlen, const u_char *buf)
1626 struct map_info *map = mtd->priv;
1627 struct cfi_private *cfi = map->fldrv_priv;
1628 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1629 int ret = 0;
1630 int chipnum;
1631 unsigned long ofs;
1633 *retlen = 0;
1634 if (!len)
1635 return 0;
1637 chipnum = to >> cfi->chipshift;
1638 ofs = to - (chipnum << cfi->chipshift);
1640 /* If it's not bus-aligned, do the first word write */
1641 if (ofs & (map_bankwidth(map)-1)) {
1642 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1643 if (local_len > len)
1644 local_len = len;
1645 ret = cfi_intelext_write_words(mtd, to, local_len,
1646 retlen, buf);
1647 if (ret)
1648 return ret;
1649 ofs += local_len;
1650 buf += local_len;
1651 len -= local_len;
1653 if (ofs >> cfi->chipshift) {
1654 chipnum ++;
1655 ofs = 0;
1656 if (chipnum == cfi->numchips)
1657 return 0;
1661 while(len) {
1662 /* We must not cross write block boundaries */
1663 int size = wbufsize - (ofs & (wbufsize-1));
1665 if (size > len)
1666 size = len;
1667 ret = do_write_buffer(map, &cfi->chips[chipnum],
1668 ofs, buf, size);
1669 if (ret)
1670 return ret;
1672 ofs += size;
1673 buf += size;
1674 (*retlen) += size;
1675 len -= size;
1677 if (ofs >> cfi->chipshift) {
1678 chipnum ++;
1679 ofs = 0;
1680 if (chipnum == cfi->numchips)
1681 return 0;
1684 return 0;
1687 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1688 unsigned long adr, int len, void *thunk)
1690 struct cfi_private *cfi = map->fldrv_priv;
1691 map_word status, status_OK;
1692 unsigned long timeo;
1693 int retries = 3;
1694 DECLARE_WAITQUEUE(wait, current);
1695 int ret = 0;
1697 adr += chip->start;
1699 /* Let's determine this according to the interleave only once */
1700 status_OK = CMD(0x80);
1702 retry:
1703 spin_lock(chip->mutex);
1704 ret = get_chip(map, chip, adr, FL_ERASING);
1705 if (ret) {
1706 spin_unlock(chip->mutex);
1707 return ret;
1710 XIP_INVAL_CACHED_RANGE(map, adr, len);
1711 ENABLE_VPP(map);
1712 xip_disable(map, chip, adr);
1714 /* Clear the status register first */
1715 map_write(map, CMD(0x50), adr);
1717 /* Now erase */
1718 map_write(map, CMD(0x20), adr);
1719 map_write(map, CMD(0xD0), adr);
1720 chip->state = FL_ERASING;
1721 chip->erase_suspended = 0;
1723 spin_unlock(chip->mutex);
1724 INVALIDATE_CACHED_RANGE(map, adr, len);
1725 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1726 spin_lock(chip->mutex);
1728 /* FIXME. Use a timer to check this, and return immediately. */
1729 /* Once the state machine's known to be working I'll do that */
1731 timeo = jiffies + (HZ*20);
1732 for (;;) {
1733 if (chip->state != FL_ERASING) {
1734 /* Someone's suspended the erase. Sleep */
1735 set_current_state(TASK_UNINTERRUPTIBLE);
1736 add_wait_queue(&chip->wq, &wait);
1737 spin_unlock(chip->mutex);
1738 schedule();
1739 remove_wait_queue(&chip->wq, &wait);
1740 spin_lock(chip->mutex);
1741 continue;
1743 if (chip->erase_suspended) {
1744 /* This erase was suspended and resumed.
1745 Adjust the timeout */
1746 timeo = jiffies + (HZ*20); /* FIXME */
1747 chip->erase_suspended = 0;
1750 status = map_read(map, adr);
1751 if (map_word_andequal(map, status, status_OK, status_OK))
1752 break;
1754 /* OK Still waiting */
1755 if (time_after(jiffies, timeo)) {
1756 map_word Xstatus;
1757 map_write(map, CMD(0x70), adr);
1758 chip->state = FL_STATUS;
1759 Xstatus = map_read(map, adr);
1760 /* Clear status bits */
1761 map_write(map, CMD(0x50), adr);
1762 map_write(map, CMD(0x70), adr);
1763 xip_enable(map, chip, adr);
1764 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1765 adr, status.x[0], Xstatus.x[0]);
1766 ret = -EIO;
1767 goto out;
1770 /* Latency issues. Drop the lock, wait a while and retry */
1771 spin_unlock(chip->mutex);
1772 UDELAY(map, chip, adr, 1000000/HZ);
1773 spin_lock(chip->mutex);
1776 /* We've broken this before. It doesn't hurt to be safe */
1777 map_write(map, CMD(0x70), adr);
1778 chip->state = FL_STATUS;
1779 status = map_read(map, adr);
1781 /* check for lock bit */
1782 if (map_word_bitsset(map, status, CMD(0x3a))) {
1783 unsigned char chipstatus;
1785 /* Reset the error bits */
1786 map_write(map, CMD(0x50), adr);
1787 map_write(map, CMD(0x70), adr);
1788 xip_enable(map, chip, adr);
1790 chipstatus = status.x[0];
1791 if (!map_word_equal(map, status, CMD(chipstatus))) {
1792 int i, w;
1793 for (w=0; w<map_words(map); w++) {
1794 for (i = 0; i<cfi_interleave(cfi); i++) {
1795 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1798 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1799 status.x[0], chipstatus);
1802 if ((chipstatus & 0x30) == 0x30) {
1803 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1804 ret = -EIO;
1805 } else if (chipstatus & 0x02) {
1806 /* Protection bit set */
1807 ret = -EROFS;
1808 } else if (chipstatus & 0x8) {
1809 /* Voltage */
1810 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1811 ret = -EIO;
1812 } else if (chipstatus & 0x20) {
1813 if (retries--) {
1814 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1815 timeo = jiffies + HZ;
1816 put_chip(map, chip, adr);
1817 spin_unlock(chip->mutex);
1818 goto retry;
1820 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1821 ret = -EIO;
1823 } else {
1824 xip_enable(map, chip, adr);
1825 ret = 0;
1828 out: put_chip(map, chip, adr);
1829 spin_unlock(chip->mutex);
1830 return ret;
1833 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1835 unsigned long ofs, len;
1836 int ret;
1838 ofs = instr->addr;
1839 len = instr->len;
1841 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1842 if (ret)
1843 return ret;
1845 instr->state = MTD_ERASE_DONE;
1846 mtd_erase_callback(instr);
1848 return 0;
1851 static void cfi_intelext_sync (struct mtd_info *mtd)
1853 struct map_info *map = mtd->priv;
1854 struct cfi_private *cfi = map->fldrv_priv;
1855 int i;
1856 struct flchip *chip;
1857 int ret = 0;
1859 for (i=0; !ret && i<cfi->numchips; i++) {
1860 chip = &cfi->chips[i];
1862 spin_lock(chip->mutex);
1863 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1865 if (!ret) {
1866 chip->oldstate = chip->state;
1867 chip->state = FL_SYNCING;
1868 /* No need to wake_up() on this state change -
1869 * as the whole point is that nobody can do anything
1870 * with the chip now anyway.
1873 spin_unlock(chip->mutex);
1876 /* Unlock the chips again */
1878 for (i--; i >=0; i--) {
1879 chip = &cfi->chips[i];
1881 spin_lock(chip->mutex);
1883 if (chip->state == FL_SYNCING) {
1884 chip->state = chip->oldstate;
1885 wake_up(&chip->wq);
1887 spin_unlock(chip->mutex);
1891 #ifdef DEBUG_LOCK_BITS
1892 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1893 struct flchip *chip,
1894 unsigned long adr,
1895 int len, void *thunk)
1897 struct cfi_private *cfi = map->fldrv_priv;
1898 int status, ofs_factor = cfi->interleave * cfi->device_type;
1900 xip_disable(map, chip, adr+(2*ofs_factor));
1901 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1902 chip->state = FL_JEDEC_QUERY;
1903 status = cfi_read_query(map, adr+(2*ofs_factor));
1904 xip_enable(map, chip, 0);
1905 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1906 adr, status);
1907 return 0;
1909 #endif
1911 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1912 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1914 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1915 unsigned long adr, int len, void *thunk)
1917 struct cfi_private *cfi = map->fldrv_priv;
1918 map_word status, status_OK;
1919 unsigned long timeo = jiffies + HZ;
1920 int ret;
1922 adr += chip->start;
1924 /* Let's determine this according to the interleave only once */
1925 status_OK = CMD(0x80);
1927 spin_lock(chip->mutex);
1928 ret = get_chip(map, chip, adr, FL_LOCKING);
1929 if (ret) {
1930 spin_unlock(chip->mutex);
1931 return ret;
1934 ENABLE_VPP(map);
1935 xip_disable(map, chip, adr);
1937 map_write(map, CMD(0x60), adr);
1938 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1939 map_write(map, CMD(0x01), adr);
1940 chip->state = FL_LOCKING;
1941 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1942 map_write(map, CMD(0xD0), adr);
1943 chip->state = FL_UNLOCKING;
1944 } else
1945 BUG();
1947 spin_unlock(chip->mutex);
1948 UDELAY(map, chip, adr, 1000000/HZ);
1949 spin_lock(chip->mutex);
1951 /* FIXME. Use a timer to check this, and return immediately. */
1952 /* Once the state machine's known to be working I'll do that */
1954 timeo = jiffies + (HZ*20);
1955 for (;;) {
1957 status = map_read(map, adr);
1958 if (map_word_andequal(map, status, status_OK, status_OK))
1959 break;
1961 /* OK Still waiting */
1962 if (time_after(jiffies, timeo)) {
1963 map_word Xstatus;
1964 map_write(map, CMD(0x70), adr);
1965 chip->state = FL_STATUS;
1966 Xstatus = map_read(map, adr);
1967 xip_enable(map, chip, adr);
1968 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1969 status.x[0], Xstatus.x[0]);
1970 put_chip(map, chip, adr);
1971 spin_unlock(chip->mutex);
1972 return -EIO;
1975 /* Latency issues. Drop the lock, wait a while and retry */
1976 spin_unlock(chip->mutex);
1977 UDELAY(map, chip, adr, 1);
1978 spin_lock(chip->mutex);
1981 /* Done and happy. */
1982 chip->state = FL_STATUS;
1983 xip_enable(map, chip, adr);
1984 put_chip(map, chip, adr);
1985 spin_unlock(chip->mutex);
1986 return 0;
1989 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1991 int ret;
1993 #ifdef DEBUG_LOCK_BITS
1994 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1995 __FUNCTION__, ofs, len);
1996 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1997 ofs, len, 0);
1998 #endif
2000 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2001 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2003 #ifdef DEBUG_LOCK_BITS
2004 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2005 __FUNCTION__, ret);
2006 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2007 ofs, len, 0);
2008 #endif
2010 return ret;
2013 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2015 int ret;
2017 #ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2019 __FUNCTION__, ofs, len);
2020 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2021 ofs, len, 0);
2022 #endif
2024 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2025 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2027 #ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2029 __FUNCTION__, ret);
2030 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2031 ofs, len, 0);
2032 #endif
2034 return ret;
2037 static int cfi_intelext_suspend(struct mtd_info *mtd)
2039 struct map_info *map = mtd->priv;
2040 struct cfi_private *cfi = map->fldrv_priv;
2041 int i;
2042 struct flchip *chip;
2043 int ret = 0;
2045 for (i=0; !ret && i<cfi->numchips; i++) {
2046 chip = &cfi->chips[i];
2048 spin_lock(chip->mutex);
2050 switch (chip->state) {
2051 case FL_READY:
2052 case FL_STATUS:
2053 case FL_CFI_QUERY:
2054 case FL_JEDEC_QUERY:
2055 if (chip->oldstate == FL_READY) {
2056 chip->oldstate = chip->state;
2057 chip->state = FL_PM_SUSPENDED;
2058 /* No need to wake_up() on this state change -
2059 * as the whole point is that nobody can do anything
2060 * with the chip now anyway.
2062 } else {
2063 /* There seems to be an operation pending. We must wait for it. */
2064 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2065 ret = -EAGAIN;
2067 break;
2068 default:
2069 /* Should we actually wait? Once upon a time these routines weren't
2070 allowed to. Or should we return -EAGAIN, because the upper layers
2071 ought to have already shut down anything which was using the device
2072 anyway? The latter for now. */
2073 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2074 ret = -EAGAIN;
2075 case FL_PM_SUSPENDED:
2076 break;
2078 spin_unlock(chip->mutex);
2081 /* Unlock the chips again */
2083 if (ret) {
2084 for (i--; i >=0; i--) {
2085 chip = &cfi->chips[i];
2087 spin_lock(chip->mutex);
2089 if (chip->state == FL_PM_SUSPENDED) {
2090 /* No need to force it into a known state here,
2091 because we're returning failure, and it didn't
2092 get power cycled */
2093 chip->state = chip->oldstate;
2094 chip->oldstate = FL_READY;
2095 wake_up(&chip->wq);
2097 spin_unlock(chip->mutex);
2101 return ret;
2104 static void cfi_intelext_resume(struct mtd_info *mtd)
2106 struct map_info *map = mtd->priv;
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 int i;
2109 struct flchip *chip;
2111 for (i=0; i<cfi->numchips; i++) {
2113 chip = &cfi->chips[i];
2115 spin_lock(chip->mutex);
2117 /* Go to known state. Chip may have been power cycled */
2118 if (chip->state == FL_PM_SUSPENDED) {
2119 map_write(map, CMD(0xFF), cfi->chips[i].start);
2120 chip->oldstate = chip->state = FL_READY;
2121 wake_up(&chip->wq);
2124 spin_unlock(chip->mutex);
2128 static void cfi_intelext_destroy(struct mtd_info *mtd)
2130 struct map_info *map = mtd->priv;
2131 struct cfi_private *cfi = map->fldrv_priv;
2132 kfree(cfi->cmdset_priv);
2133 kfree(cfi->cfiq);
2134 kfree(cfi->chips[0].priv);
2135 kfree(cfi);
2136 kfree(mtd->eraseregions);
2139 static char im_name_1[]="cfi_cmdset_0001";
2140 static char im_name_3[]="cfi_cmdset_0003";
2142 static int __init cfi_intelext_init(void)
2144 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2145 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2146 return 0;
2149 static void __exit cfi_intelext_exit(void)
2151 inter_module_unregister(im_name_1);
2152 inter_module_unregister(im_name_3);
2155 module_init(cfi_intelext_init);
2156 module_exit(cfi_intelext_exit);
2158 MODULE_LICENSE("GPL");
2159 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2160 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");