ACPI: make acpi_pci_bind() static
[linux-2.6/linux-acpi-2.6.git] / drivers / mtd / chips / cfi_cmdset_0002.c
blob61ea833e09086e631024f298575678ff3f123ea3
1 /*
2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
14 * by Nicolas Pitre)
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
20 * This code is GPL
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
28 #include <asm/io.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define MANUFACTURER_AMD 0x0001
47 #define MANUFACTURER_ATMEL 0x001F
48 #define MANUFACTURER_MACRONIX 0x00C2
49 #define MANUFACTURER_SST 0x00BF
50 #define SST49LF004B 0x0060
51 #define SST49LF040B 0x0050
52 #define SST49LF008A 0x005a
53 #define AT49BV6416 0x00d6
55 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
59 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_amdstd_sync (struct mtd_info *);
61 static int cfi_amdstd_suspend (struct mtd_info *);
62 static void cfi_amdstd_resume (struct mtd_info *);
63 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static void cfi_amdstd_destroy(struct mtd_info *);
67 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
68 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
70 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
71 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 #include "fwh_lock.h"
74 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
77 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
78 .probe = NULL, /* Not usable directly */
79 .destroy = cfi_amdstd_destroy,
80 .name = "cfi_cmdset_0002",
81 .module = THIS_MODULE
85 /* #define DEBUG_CFI_FEATURES */
88 #ifdef DEBUG_CFI_FEATURES
89 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
91 const char* erase_suspend[3] = {
92 "Not supported", "Read only", "Read/write"
94 const char* top_bottom[6] = {
95 "No WP", "8x8KiB sectors at top & bottom, no WP",
96 "Bottom boot", "Top boot",
97 "Uniform, Bottom WP", "Uniform, Top WP"
100 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
101 printk(" Address sensitive unlock: %s\n",
102 (extp->SiliconRevision & 1) ? "Not required" : "Required");
104 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
105 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
106 else
107 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
109 if (extp->BlkProt == 0)
110 printk(" Block protection: Not supported\n");
111 else
112 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
115 printk(" Temporary block unprotect: %s\n",
116 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
117 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
118 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
119 printk(" Burst mode: %s\n",
120 extp->BurstMode ? "Supported" : "Not supported");
121 if (extp->PageMode == 0)
122 printk(" Page mode: Not supported\n");
123 else
124 printk(" Page mode: %d word page\n", extp->PageMode << 2);
126 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMin >> 4, extp->VppMin & 0xf);
128 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
129 extp->VppMax >> 4, extp->VppMax & 0xf);
131 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
132 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
133 else
134 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
136 #endif
138 #ifdef AMD_BOOTLOC_BUG
139 /* Wheee. Bring me the head of someone at AMD. */
140 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
142 struct map_info *map = mtd->priv;
143 struct cfi_private *cfi = map->fldrv_priv;
144 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 __u8 major = extp->MajorVersion;
146 __u8 minor = extp->MinorVersion;
148 if (((major << 8) | minor) < 0x3131) {
149 /* CFI version 1.0 => don't trust bootloc */
151 DEBUG(MTD_DEBUG_LEVEL1,
152 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
153 map->name, cfi->mfr, cfi->id);
155 /* AFAICS all 29LV400 with a bottom boot block have a device ID
156 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
157 * These were badly detected as they have the 0x80 bit set
158 * so treat them as a special case.
160 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
162 /* Macronix added CFI to their 2nd generation
163 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
164 * Fujitsu, Spansion, EON, ESI and older Macronix)
165 * has CFI.
167 * Therefore also check the manufacturer.
168 * This reduces the risk of false detection due to
169 * the 8-bit device ID.
171 (cfi->mfr == MANUFACTURER_MACRONIX)) {
172 DEBUG(MTD_DEBUG_LEVEL1,
173 "%s: Macronix MX29LV400C with bottom boot block"
174 " detected\n", map->name);
175 extp->TopBottom = 2; /* bottom boot */
176 } else
177 if (cfi->id & 0x80) {
178 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
179 extp->TopBottom = 3; /* top boot */
180 } else {
181 extp->TopBottom = 2; /* bottom boot */
184 DEBUG(MTD_DEBUG_LEVEL1,
185 "%s: AMD CFI PRI V%c.%c has no boot block field;"
186 " deduced %s from Device ID\n", map->name, major, minor,
187 extp->TopBottom == 2 ? "bottom" : "top");
190 #endif
192 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
194 struct map_info *map = mtd->priv;
195 struct cfi_private *cfi = map->fldrv_priv;
196 if (cfi->cfiq->BufWriteTimeoutTyp) {
197 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
198 mtd->write = cfi_amdstd_write_buffers;
202 /* Atmel chips don't use the same PRI format as AMD chips */
203 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv;
207 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
208 struct cfi_pri_atmel atmel_pri;
210 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
211 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
213 if (atmel_pri.Features & 0x02)
214 extp->EraseSuspend = 2;
216 /* Some chips got it backwards... */
217 if (cfi->id == AT49BV6416) {
218 if (atmel_pri.BottomBoot)
219 extp->TopBottom = 3;
220 else
221 extp->TopBottom = 2;
222 } else {
223 if (atmel_pri.BottomBoot)
224 extp->TopBottom = 2;
225 else
226 extp->TopBottom = 3;
229 /* burst write mode not supported */
230 cfi->cfiq->BufWriteTimeoutTyp = 0;
231 cfi->cfiq->BufWriteTimeoutMax = 0;
234 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
236 /* Setup for chips with a secsi area */
237 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
238 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
241 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
243 struct map_info *map = mtd->priv;
244 struct cfi_private *cfi = map->fldrv_priv;
245 if ((cfi->cfiq->NumEraseRegions == 1) &&
246 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
247 mtd->erase = cfi_amdstd_erase_chip;
253 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
254 * locked by default.
256 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
258 mtd->lock = cfi_atmel_lock;
259 mtd->unlock = cfi_atmel_unlock;
260 mtd->flags |= MTD_POWERUP_LOCK;
263 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
265 struct map_info *map = mtd->priv;
266 struct cfi_private *cfi = map->fldrv_priv;
268 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
269 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
270 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
274 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
279 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
280 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
281 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
285 static void fixup_M29W128G_write_buffer(struct mtd_info *mtd, void *param)
287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv;
289 if (cfi->cfiq->BufWriteTimeoutTyp) {
290 pr_warning("Don't use write buffer on ST flash M29W128G\n");
291 cfi->cfiq->BufWriteTimeoutTyp = 0;
295 static struct cfi_fixup cfi_fixup_table[] = {
296 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
297 #ifdef AMD_BOOTLOC_BUG
298 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
299 { MANUFACTURER_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
300 #endif
301 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
302 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
303 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
304 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
305 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
306 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
307 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
308 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
309 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
310 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
311 { CFI_MFR_ST, 0x227E, fixup_M29W128G_write_buffer, NULL, },
312 #if !FORCE_WORD_WRITE
313 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
314 #endif
315 { 0, 0, NULL, NULL }
317 static struct cfi_fixup jedec_fixup_table[] = {
318 { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
319 { MANUFACTURER_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
320 { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
321 { 0, 0, NULL, NULL }
324 static struct cfi_fixup fixup_table[] = {
325 /* The CFI vendor ids and the JEDEC vendor IDs appear
326 * to be common. It is like the devices id's are as
327 * well. This table is to pick all cases where
328 * we know that is the case.
330 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
331 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
332 { 0, 0, NULL, NULL }
336 static void cfi_fixup_major_minor(struct cfi_private *cfi,
337 struct cfi_pri_amdstd *extp)
339 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
340 extp->MajorVersion == '0')
341 extp->MajorVersion = '1';
344 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
346 struct cfi_private *cfi = map->fldrv_priv;
347 struct mtd_info *mtd;
348 int i;
350 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
351 if (!mtd) {
352 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
353 return NULL;
355 mtd->priv = map;
356 mtd->type = MTD_NORFLASH;
358 /* Fill in the default mtd operations */
359 mtd->erase = cfi_amdstd_erase_varsize;
360 mtd->write = cfi_amdstd_write_words;
361 mtd->read = cfi_amdstd_read;
362 mtd->sync = cfi_amdstd_sync;
363 mtd->suspend = cfi_amdstd_suspend;
364 mtd->resume = cfi_amdstd_resume;
365 mtd->flags = MTD_CAP_NORFLASH;
366 mtd->name = map->name;
367 mtd->writesize = 1;
369 if (cfi->cfi_mode==CFI_MODE_CFI){
370 unsigned char bootloc;
372 * It's a real CFI chip, not one for which the probe
373 * routine faked a CFI structure. So we read the feature
374 * table from it.
376 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
377 struct cfi_pri_amdstd *extp;
379 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
380 if (!extp) {
381 kfree(mtd);
382 return NULL;
385 cfi_fixup_major_minor(cfi, extp);
387 if (extp->MajorVersion != '1' ||
388 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
389 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
390 "version %c.%c.\n", extp->MajorVersion,
391 extp->MinorVersion);
392 kfree(extp);
393 kfree(mtd);
394 return NULL;
397 /* Install our own private info structure */
398 cfi->cmdset_priv = extp;
400 /* Apply cfi device specific fixups */
401 cfi_fixup(mtd, cfi_fixup_table);
403 #ifdef DEBUG_CFI_FEATURES
404 /* Tell the user about it in lots of lovely detail */
405 cfi_tell_features(extp);
406 #endif
408 bootloc = extp->TopBottom;
409 if ((bootloc != 2) && (bootloc != 3)) {
410 printk(KERN_WARNING "%s: CFI does not contain boot "
411 "bank location. Assuming top.\n", map->name);
412 bootloc = 2;
415 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
416 printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
418 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
419 int j = (cfi->cfiq->NumEraseRegions-1)-i;
420 __u32 swap;
422 swap = cfi->cfiq->EraseRegionInfo[i];
423 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
424 cfi->cfiq->EraseRegionInfo[j] = swap;
427 /* Set the default CFI lock/unlock addresses */
428 cfi->addr_unlock1 = 0x555;
429 cfi->addr_unlock2 = 0x2aa;
431 } /* CFI mode */
432 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
433 /* Apply jedec specific fixups */
434 cfi_fixup(mtd, jedec_fixup_table);
436 /* Apply generic fixups */
437 cfi_fixup(mtd, fixup_table);
439 for (i=0; i< cfi->numchips; i++) {
440 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
441 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
442 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
443 cfi->chips[i].ref_point_counter = 0;
444 init_waitqueue_head(&(cfi->chips[i].wq));
447 map->fldrv = &cfi_amdstd_chipdrv;
449 return cfi_amdstd_setup(mtd);
451 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
453 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
455 struct map_info *map = mtd->priv;
456 struct cfi_private *cfi = map->fldrv_priv;
457 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
458 unsigned long offset = 0;
459 int i,j;
461 printk(KERN_NOTICE "number of %s chips: %d\n",
462 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
463 /* Select the correct geometry setup */
464 mtd->size = devsize * cfi->numchips;
466 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
467 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
468 * mtd->numeraseregions, GFP_KERNEL);
469 if (!mtd->eraseregions) {
470 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
471 goto setup_err;
474 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
475 unsigned long ernum, ersize;
476 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
477 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
479 if (mtd->erasesize < ersize) {
480 mtd->erasesize = ersize;
482 for (j=0; j<cfi->numchips; j++) {
483 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
484 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
485 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
487 offset += (ersize * ernum);
489 if (offset != devsize) {
490 /* Argh */
491 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
492 goto setup_err;
494 #if 0
495 // debug
496 for (i=0; i<mtd->numeraseregions;i++){
497 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
498 i,mtd->eraseregions[i].offset,
499 mtd->eraseregions[i].erasesize,
500 mtd->eraseregions[i].numblocks);
502 #endif
504 /* FIXME: erase-suspend-program is broken. See
505 http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
506 printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
508 __module_get(THIS_MODULE);
509 return mtd;
511 setup_err:
512 if(mtd) {
513 kfree(mtd->eraseregions);
514 kfree(mtd);
516 kfree(cfi->cmdset_priv);
517 kfree(cfi->cfiq);
518 return NULL;
522 * Return true if the chip is ready.
524 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
525 * non-suspended sector) and is indicated by no toggle bits toggling.
527 * Note that anything more complicated than checking if no bits are toggling
528 * (including checking DQ5 for an error status) is tricky to get working
529 * correctly and is therefore not done (particulary with interleaved chips
530 * as each chip must be checked independantly of the others).
532 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
534 map_word d, t;
536 d = map_read(map, addr);
537 t = map_read(map, addr);
539 return map_word_equal(map, d, t);
543 * Return true if the chip is ready and has the correct value.
545 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
546 * non-suspended sector) and it is indicated by no bits toggling.
548 * Error are indicated by toggling bits or bits held with the wrong value,
549 * or with bits toggling.
551 * Note that anything more complicated than checking if no bits are toggling
552 * (including checking DQ5 for an error status) is tricky to get working
553 * correctly and is therefore not done (particulary with interleaved chips
554 * as each chip must be checked independantly of the others).
557 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
559 map_word oldd, curd;
561 oldd = map_read(map, addr);
562 curd = map_read(map, addr);
564 return map_word_equal(map, oldd, curd) &&
565 map_word_equal(map, curd, expected);
568 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
570 DECLARE_WAITQUEUE(wait, current);
571 struct cfi_private *cfi = map->fldrv_priv;
572 unsigned long timeo;
573 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
575 resettime:
576 timeo = jiffies + HZ;
577 retry:
578 switch (chip->state) {
580 case FL_STATUS:
581 for (;;) {
582 if (chip_ready(map, adr))
583 break;
585 if (time_after(jiffies, timeo)) {
586 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
587 spin_unlock(chip->mutex);
588 return -EIO;
590 spin_unlock(chip->mutex);
591 cfi_udelay(1);
592 spin_lock(chip->mutex);
593 /* Someone else might have been playing with it. */
594 goto retry;
597 case FL_READY:
598 case FL_CFI_QUERY:
599 case FL_JEDEC_QUERY:
600 return 0;
602 case FL_ERASING:
603 if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
604 goto sleep;
606 if (!( mode == FL_READY
607 || mode == FL_POINT
608 || !cfip
609 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
610 || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)
612 goto sleep;
614 /* We could check to see if we're trying to access the sector
615 * that is currently being erased. However, no user will try
616 * anything like that so we just wait for the timeout. */
618 /* Erase suspend */
619 /* It's harmless to issue the Erase-Suspend and Erase-Resume
620 * commands when the erase algorithm isn't in progress. */
621 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
622 chip->oldstate = FL_ERASING;
623 chip->state = FL_ERASE_SUSPENDING;
624 chip->erase_suspended = 1;
625 for (;;) {
626 if (chip_ready(map, adr))
627 break;
629 if (time_after(jiffies, timeo)) {
630 /* Should have suspended the erase by now.
631 * Send an Erase-Resume command as either
632 * there was an error (so leave the erase
633 * routine to recover from it) or we trying to
634 * use the erase-in-progress sector. */
635 map_write(map, CMD(0x30), chip->in_progress_block_addr);
636 chip->state = FL_ERASING;
637 chip->oldstate = FL_READY;
638 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
639 return -EIO;
642 spin_unlock(chip->mutex);
643 cfi_udelay(1);
644 spin_lock(chip->mutex);
645 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
646 So we can just loop here. */
648 chip->state = FL_READY;
649 return 0;
651 case FL_XIP_WHILE_ERASING:
652 if (mode != FL_READY && mode != FL_POINT &&
653 (!cfip || !(cfip->EraseSuspend&2)))
654 goto sleep;
655 chip->oldstate = chip->state;
656 chip->state = FL_READY;
657 return 0;
659 case FL_POINT:
660 /* Only if there's no operation suspended... */
661 if (mode == FL_READY && chip->oldstate == FL_READY)
662 return 0;
664 default:
665 sleep:
666 set_current_state(TASK_UNINTERRUPTIBLE);
667 add_wait_queue(&chip->wq, &wait);
668 spin_unlock(chip->mutex);
669 schedule();
670 remove_wait_queue(&chip->wq, &wait);
671 spin_lock(chip->mutex);
672 goto resettime;
677 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
679 struct cfi_private *cfi = map->fldrv_priv;
681 switch(chip->oldstate) {
682 case FL_ERASING:
683 chip->state = chip->oldstate;
684 map_write(map, CMD(0x30), chip->in_progress_block_addr);
685 chip->oldstate = FL_READY;
686 chip->state = FL_ERASING;
687 break;
689 case FL_XIP_WHILE_ERASING:
690 chip->state = chip->oldstate;
691 chip->oldstate = FL_READY;
692 break;
694 case FL_READY:
695 case FL_STATUS:
696 /* We should really make set_vpp() count, rather than doing this */
697 DISABLE_VPP(map);
698 break;
699 default:
700 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
702 wake_up(&chip->wq);
705 #ifdef CONFIG_MTD_XIP
708 * No interrupt what so ever can be serviced while the flash isn't in array
709 * mode. This is ensured by the xip_disable() and xip_enable() functions
710 * enclosing any code path where the flash is known not to be in array mode.
711 * And within a XIP disabled code path, only functions marked with __xipram
712 * may be called and nothing else (it's a good thing to inspect generated
713 * assembly to make sure inline functions were actually inlined and that gcc
714 * didn't emit calls to its own support functions). Also configuring MTD CFI
715 * support to a single buswidth and a single interleave is also recommended.
718 static void xip_disable(struct map_info *map, struct flchip *chip,
719 unsigned long adr)
721 /* TODO: chips with no XIP use should ignore and return */
722 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
723 local_irq_disable();
726 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
727 unsigned long adr)
729 struct cfi_private *cfi = map->fldrv_priv;
731 if (chip->state != FL_POINT && chip->state != FL_READY) {
732 map_write(map, CMD(0xf0), adr);
733 chip->state = FL_READY;
735 (void) map_read(map, adr);
736 xip_iprefetch();
737 local_irq_enable();
741 * When a delay is required for the flash operation to complete, the
742 * xip_udelay() function is polling for both the given timeout and pending
743 * (but still masked) hardware interrupts. Whenever there is an interrupt
744 * pending then the flash erase operation is suspended, array mode restored
745 * and interrupts unmasked. Task scheduling might also happen at that
746 * point. The CPU eventually returns from the interrupt or the call to
747 * schedule() and the suspended flash operation is resumed for the remaining
748 * of the delay period.
750 * Warning: this function _will_ fool interrupt latency tracing tools.
753 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
754 unsigned long adr, int usec)
756 struct cfi_private *cfi = map->fldrv_priv;
757 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
758 map_word status, OK = CMD(0x80);
759 unsigned long suspended, start = xip_currtime();
760 flstate_t oldstate;
762 do {
763 cpu_relax();
764 if (xip_irqpending() && extp &&
765 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
766 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
768 * Let's suspend the erase operation when supported.
769 * Note that we currently don't try to suspend
770 * interleaved chips if there is already another
771 * operation suspended (imagine what happens
772 * when one chip was already done with the current
773 * operation while another chip suspended it, then
774 * we resume the whole thing at once). Yes, it
775 * can happen!
777 map_write(map, CMD(0xb0), adr);
778 usec -= xip_elapsed_since(start);
779 suspended = xip_currtime();
780 do {
781 if (xip_elapsed_since(suspended) > 100000) {
783 * The chip doesn't want to suspend
784 * after waiting for 100 msecs.
785 * This is a critical error but there
786 * is not much we can do here.
788 return;
790 status = map_read(map, adr);
791 } while (!map_word_andequal(map, status, OK, OK));
793 /* Suspend succeeded */
794 oldstate = chip->state;
795 if (!map_word_bitsset(map, status, CMD(0x40)))
796 break;
797 chip->state = FL_XIP_WHILE_ERASING;
798 chip->erase_suspended = 1;
799 map_write(map, CMD(0xf0), adr);
800 (void) map_read(map, adr);
801 xip_iprefetch();
802 local_irq_enable();
803 spin_unlock(chip->mutex);
804 xip_iprefetch();
805 cond_resched();
808 * We're back. However someone else might have
809 * decided to go write to the chip if we are in
810 * a suspended erase state. If so let's wait
811 * until it's done.
813 spin_lock(chip->mutex);
814 while (chip->state != FL_XIP_WHILE_ERASING) {
815 DECLARE_WAITQUEUE(wait, current);
816 set_current_state(TASK_UNINTERRUPTIBLE);
817 add_wait_queue(&chip->wq, &wait);
818 spin_unlock(chip->mutex);
819 schedule();
820 remove_wait_queue(&chip->wq, &wait);
821 spin_lock(chip->mutex);
823 /* Disallow XIP again */
824 local_irq_disable();
826 /* Resume the write or erase operation */
827 map_write(map, CMD(0x30), adr);
828 chip->state = oldstate;
829 start = xip_currtime();
830 } else if (usec >= 1000000/HZ) {
832 * Try to save on CPU power when waiting delay
833 * is at least a system timer tick period.
834 * No need to be extremely accurate here.
836 xip_cpu_idle();
838 status = map_read(map, adr);
839 } while (!map_word_andequal(map, status, OK, OK)
840 && xip_elapsed_since(start) < usec);
843 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
846 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
847 * the flash is actively programming or erasing since we have to poll for
848 * the operation to complete anyway. We can't do that in a generic way with
849 * a XIP setup so do it before the actual flash operation in this case
850 * and stub it out from INVALIDATE_CACHE_UDELAY.
852 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
853 INVALIDATE_CACHED_RANGE(map, from, size)
855 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
856 UDELAY(map, chip, adr, usec)
859 * Extra notes:
861 * Activating this XIP support changes the way the code works a bit. For
862 * example the code to suspend the current process when concurrent access
863 * happens is never executed because xip_udelay() will always return with the
864 * same chip state as it was entered with. This is why there is no care for
865 * the presence of add_wait_queue() or schedule() calls from within a couple
866 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
867 * The queueing and scheduling are always happening within xip_udelay().
869 * Similarly, get_chip() and put_chip() just happen to always be executed
870 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
871 * is in array mode, therefore never executing many cases therein and not
872 * causing any problem with XIP.
875 #else
877 #define xip_disable(map, chip, adr)
878 #define xip_enable(map, chip, adr)
879 #define XIP_INVAL_CACHED_RANGE(x...)
881 #define UDELAY(map, chip, adr, usec) \
882 do { \
883 spin_unlock(chip->mutex); \
884 cfi_udelay(usec); \
885 spin_lock(chip->mutex); \
886 } while (0)
888 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
889 do { \
890 spin_unlock(chip->mutex); \
891 INVALIDATE_CACHED_RANGE(map, adr, len); \
892 cfi_udelay(usec); \
893 spin_lock(chip->mutex); \
894 } while (0)
896 #endif
898 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
900 unsigned long cmd_addr;
901 struct cfi_private *cfi = map->fldrv_priv;
902 int ret;
904 adr += chip->start;
906 /* Ensure cmd read/writes are aligned. */
907 cmd_addr = adr & ~(map_bankwidth(map)-1);
909 spin_lock(chip->mutex);
910 ret = get_chip(map, chip, cmd_addr, FL_READY);
911 if (ret) {
912 spin_unlock(chip->mutex);
913 return ret;
916 if (chip->state != FL_POINT && chip->state != FL_READY) {
917 map_write(map, CMD(0xf0), cmd_addr);
918 chip->state = FL_READY;
921 map_copy_from(map, buf, adr, len);
923 put_chip(map, chip, cmd_addr);
925 spin_unlock(chip->mutex);
926 return 0;
930 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
932 struct map_info *map = mtd->priv;
933 struct cfi_private *cfi = map->fldrv_priv;
934 unsigned long ofs;
935 int chipnum;
936 int ret = 0;
938 /* ofs: offset within the first chip that the first read should start */
940 chipnum = (from >> cfi->chipshift);
941 ofs = from - (chipnum << cfi->chipshift);
944 *retlen = 0;
946 while (len) {
947 unsigned long thislen;
949 if (chipnum >= cfi->numchips)
950 break;
952 if ((len + ofs -1) >> cfi->chipshift)
953 thislen = (1<<cfi->chipshift) - ofs;
954 else
955 thislen = len;
957 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
958 if (ret)
959 break;
961 *retlen += thislen;
962 len -= thislen;
963 buf += thislen;
965 ofs = 0;
966 chipnum++;
968 return ret;
972 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
974 DECLARE_WAITQUEUE(wait, current);
975 unsigned long timeo = jiffies + HZ;
976 struct cfi_private *cfi = map->fldrv_priv;
978 retry:
979 spin_lock(chip->mutex);
981 if (chip->state != FL_READY){
982 #if 0
983 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
984 #endif
985 set_current_state(TASK_UNINTERRUPTIBLE);
986 add_wait_queue(&chip->wq, &wait);
988 spin_unlock(chip->mutex);
990 schedule();
991 remove_wait_queue(&chip->wq, &wait);
992 #if 0
993 if(signal_pending(current))
994 return -EINTR;
995 #endif
996 timeo = jiffies + HZ;
998 goto retry;
1001 adr += chip->start;
1003 chip->state = FL_READY;
1005 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1006 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1007 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1009 map_copy_from(map, buf, adr, len);
1011 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1012 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1013 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1014 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1016 wake_up(&chip->wq);
1017 spin_unlock(chip->mutex);
1019 return 0;
1022 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1024 struct map_info *map = mtd->priv;
1025 struct cfi_private *cfi = map->fldrv_priv;
1026 unsigned long ofs;
1027 int chipnum;
1028 int ret = 0;
1031 /* ofs: offset within the first chip that the first read should start */
1033 /* 8 secsi bytes per chip */
1034 chipnum=from>>3;
1035 ofs=from & 7;
1038 *retlen = 0;
1040 while (len) {
1041 unsigned long thislen;
1043 if (chipnum >= cfi->numchips)
1044 break;
1046 if ((len + ofs -1) >> 3)
1047 thislen = (1<<3) - ofs;
1048 else
1049 thislen = len;
1051 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1052 if (ret)
1053 break;
1055 *retlen += thislen;
1056 len -= thislen;
1057 buf += thislen;
1059 ofs = 0;
1060 chipnum++;
1062 return ret;
1066 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1068 struct cfi_private *cfi = map->fldrv_priv;
1069 unsigned long timeo = jiffies + HZ;
1071 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1072 * have a max write time of a few hundreds usec). However, we should
1073 * use the maximum timeout value given by the chip at probe time
1074 * instead. Unfortunately, struct flchip does have a field for
1075 * maximum timeout, only for typical which can be far too short
1076 * depending of the conditions. The ' + 1' is to avoid having a
1077 * timeout of 0 jiffies if HZ is smaller than 1000.
1079 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1080 int ret = 0;
1081 map_word oldd;
1082 int retry_cnt = 0;
1084 adr += chip->start;
1086 spin_lock(chip->mutex);
1087 ret = get_chip(map, chip, adr, FL_WRITING);
1088 if (ret) {
1089 spin_unlock(chip->mutex);
1090 return ret;
1093 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1094 __func__, adr, datum.x[0] );
1097 * Check for a NOP for the case when the datum to write is already
1098 * present - it saves time and works around buggy chips that corrupt
1099 * data at other locations when 0xff is written to a location that
1100 * already contains 0xff.
1102 oldd = map_read(map, adr);
1103 if (map_word_equal(map, oldd, datum)) {
1104 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1105 __func__);
1106 goto op_done;
1109 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1110 ENABLE_VPP(map);
1111 xip_disable(map, chip, adr);
1112 retry:
1113 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1114 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1115 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1116 map_write(map, datum, adr);
1117 chip->state = FL_WRITING;
1119 INVALIDATE_CACHE_UDELAY(map, chip,
1120 adr, map_bankwidth(map),
1121 chip->word_write_time);
1123 /* See comment above for timeout value. */
1124 timeo = jiffies + uWriteTimeout;
1125 for (;;) {
1126 if (chip->state != FL_WRITING) {
1127 /* Someone's suspended the write. Sleep */
1128 DECLARE_WAITQUEUE(wait, current);
1130 set_current_state(TASK_UNINTERRUPTIBLE);
1131 add_wait_queue(&chip->wq, &wait);
1132 spin_unlock(chip->mutex);
1133 schedule();
1134 remove_wait_queue(&chip->wq, &wait);
1135 timeo = jiffies + (HZ / 2); /* FIXME */
1136 spin_lock(chip->mutex);
1137 continue;
1140 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1141 xip_enable(map, chip, adr);
1142 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1143 xip_disable(map, chip, adr);
1144 break;
1147 if (chip_ready(map, adr))
1148 break;
1150 /* Latency issues. Drop the lock, wait a while and retry */
1151 UDELAY(map, chip, adr, 1);
1153 /* Did we succeed? */
1154 if (!chip_good(map, adr, datum)) {
1155 /* reset on all failures. */
1156 map_write( map, CMD(0xF0), chip->start );
1157 /* FIXME - should have reset delay before continuing */
1159 if (++retry_cnt <= MAX_WORD_RETRIES)
1160 goto retry;
1162 ret = -EIO;
1164 xip_enable(map, chip, adr);
1165 op_done:
1166 chip->state = FL_READY;
1167 put_chip(map, chip, adr);
1168 spin_unlock(chip->mutex);
1170 return ret;
1174 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1175 size_t *retlen, const u_char *buf)
1177 struct map_info *map = mtd->priv;
1178 struct cfi_private *cfi = map->fldrv_priv;
1179 int ret = 0;
1180 int chipnum;
1181 unsigned long ofs, chipstart;
1182 DECLARE_WAITQUEUE(wait, current);
1184 *retlen = 0;
1185 if (!len)
1186 return 0;
1188 chipnum = to >> cfi->chipshift;
1189 ofs = to - (chipnum << cfi->chipshift);
1190 chipstart = cfi->chips[chipnum].start;
1192 /* If it's not bus-aligned, do the first byte write */
1193 if (ofs & (map_bankwidth(map)-1)) {
1194 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1195 int i = ofs - bus_ofs;
1196 int n = 0;
1197 map_word tmp_buf;
1199 retry:
1200 spin_lock(cfi->chips[chipnum].mutex);
1202 if (cfi->chips[chipnum].state != FL_READY) {
1203 #if 0
1204 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1205 #endif
1206 set_current_state(TASK_UNINTERRUPTIBLE);
1207 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1209 spin_unlock(cfi->chips[chipnum].mutex);
1211 schedule();
1212 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1213 #if 0
1214 if(signal_pending(current))
1215 return -EINTR;
1216 #endif
1217 goto retry;
1220 /* Load 'tmp_buf' with old contents of flash */
1221 tmp_buf = map_read(map, bus_ofs+chipstart);
1223 spin_unlock(cfi->chips[chipnum].mutex);
1225 /* Number of bytes to copy from buffer */
1226 n = min_t(int, len, map_bankwidth(map)-i);
1228 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1230 ret = do_write_oneword(map, &cfi->chips[chipnum],
1231 bus_ofs, tmp_buf);
1232 if (ret)
1233 return ret;
1235 ofs += n;
1236 buf += n;
1237 (*retlen) += n;
1238 len -= n;
1240 if (ofs >> cfi->chipshift) {
1241 chipnum ++;
1242 ofs = 0;
1243 if (chipnum == cfi->numchips)
1244 return 0;
1248 /* We are now aligned, write as much as possible */
1249 while(len >= map_bankwidth(map)) {
1250 map_word datum;
1252 datum = map_word_load(map, buf);
1254 ret = do_write_oneword(map, &cfi->chips[chipnum],
1255 ofs, datum);
1256 if (ret)
1257 return ret;
1259 ofs += map_bankwidth(map);
1260 buf += map_bankwidth(map);
1261 (*retlen) += map_bankwidth(map);
1262 len -= map_bankwidth(map);
1264 if (ofs >> cfi->chipshift) {
1265 chipnum ++;
1266 ofs = 0;
1267 if (chipnum == cfi->numchips)
1268 return 0;
1269 chipstart = cfi->chips[chipnum].start;
1273 /* Write the trailing bytes if any */
1274 if (len & (map_bankwidth(map)-1)) {
1275 map_word tmp_buf;
1277 retry1:
1278 spin_lock(cfi->chips[chipnum].mutex);
1280 if (cfi->chips[chipnum].state != FL_READY) {
1281 #if 0
1282 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1283 #endif
1284 set_current_state(TASK_UNINTERRUPTIBLE);
1285 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1287 spin_unlock(cfi->chips[chipnum].mutex);
1289 schedule();
1290 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1291 #if 0
1292 if(signal_pending(current))
1293 return -EINTR;
1294 #endif
1295 goto retry1;
1298 tmp_buf = map_read(map, ofs + chipstart);
1300 spin_unlock(cfi->chips[chipnum].mutex);
1302 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1304 ret = do_write_oneword(map, &cfi->chips[chipnum],
1305 ofs, tmp_buf);
1306 if (ret)
1307 return ret;
1309 (*retlen) += len;
1312 return 0;
1317 * FIXME: interleaved mode not tested, and probably not supported!
1319 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1320 unsigned long adr, const u_char *buf,
1321 int len)
1323 struct cfi_private *cfi = map->fldrv_priv;
1324 unsigned long timeo = jiffies + HZ;
1325 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1326 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1327 int ret = -EIO;
1328 unsigned long cmd_adr;
1329 int z, words;
1330 map_word datum;
1332 adr += chip->start;
1333 cmd_adr = adr;
1335 spin_lock(chip->mutex);
1336 ret = get_chip(map, chip, adr, FL_WRITING);
1337 if (ret) {
1338 spin_unlock(chip->mutex);
1339 return ret;
1342 datum = map_word_load(map, buf);
1344 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1345 __func__, adr, datum.x[0] );
1347 XIP_INVAL_CACHED_RANGE(map, adr, len);
1348 ENABLE_VPP(map);
1349 xip_disable(map, chip, cmd_adr);
1351 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1352 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1353 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1355 /* Write Buffer Load */
1356 map_write(map, CMD(0x25), cmd_adr);
1358 chip->state = FL_WRITING_TO_BUFFER;
1360 /* Write length of data to come */
1361 words = len / map_bankwidth(map);
1362 map_write(map, CMD(words - 1), cmd_adr);
1363 /* Write data */
1364 z = 0;
1365 while(z < words * map_bankwidth(map)) {
1366 datum = map_word_load(map, buf);
1367 map_write(map, datum, adr + z);
1369 z += map_bankwidth(map);
1370 buf += map_bankwidth(map);
1372 z -= map_bankwidth(map);
1374 adr += z;
1376 /* Write Buffer Program Confirm: GO GO GO */
1377 map_write(map, CMD(0x29), cmd_adr);
1378 chip->state = FL_WRITING;
1380 INVALIDATE_CACHE_UDELAY(map, chip,
1381 adr, map_bankwidth(map),
1382 chip->word_write_time);
1384 timeo = jiffies + uWriteTimeout;
1386 for (;;) {
1387 if (chip->state != FL_WRITING) {
1388 /* Someone's suspended the write. Sleep */
1389 DECLARE_WAITQUEUE(wait, current);
1391 set_current_state(TASK_UNINTERRUPTIBLE);
1392 add_wait_queue(&chip->wq, &wait);
1393 spin_unlock(chip->mutex);
1394 schedule();
1395 remove_wait_queue(&chip->wq, &wait);
1396 timeo = jiffies + (HZ / 2); /* FIXME */
1397 spin_lock(chip->mutex);
1398 continue;
1401 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1402 break;
1404 if (chip_ready(map, adr)) {
1405 xip_enable(map, chip, adr);
1406 goto op_done;
1409 /* Latency issues. Drop the lock, wait a while and retry */
1410 UDELAY(map, chip, adr, 1);
1413 /* reset on all failures. */
1414 map_write( map, CMD(0xF0), chip->start );
1415 xip_enable(map, chip, adr);
1416 /* FIXME - should have reset delay before continuing */
1418 printk(KERN_WARNING "MTD %s(): software timeout\n",
1419 __func__ );
1421 ret = -EIO;
1422 op_done:
1423 chip->state = FL_READY;
1424 put_chip(map, chip, adr);
1425 spin_unlock(chip->mutex);
1427 return ret;
1431 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1432 size_t *retlen, const u_char *buf)
1434 struct map_info *map = mtd->priv;
1435 struct cfi_private *cfi = map->fldrv_priv;
1436 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1437 int ret = 0;
1438 int chipnum;
1439 unsigned long ofs;
1441 *retlen = 0;
1442 if (!len)
1443 return 0;
1445 chipnum = to >> cfi->chipshift;
1446 ofs = to - (chipnum << cfi->chipshift);
1448 /* If it's not bus-aligned, do the first word write */
1449 if (ofs & (map_bankwidth(map)-1)) {
1450 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1451 if (local_len > len)
1452 local_len = len;
1453 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1454 local_len, retlen, buf);
1455 if (ret)
1456 return ret;
1457 ofs += local_len;
1458 buf += local_len;
1459 len -= local_len;
1461 if (ofs >> cfi->chipshift) {
1462 chipnum ++;
1463 ofs = 0;
1464 if (chipnum == cfi->numchips)
1465 return 0;
1469 /* Write buffer is worth it only if more than one word to write... */
1470 while (len >= map_bankwidth(map) * 2) {
1471 /* We must not cross write block boundaries */
1472 int size = wbufsize - (ofs & (wbufsize-1));
1474 if (size > len)
1475 size = len;
1476 if (size % map_bankwidth(map))
1477 size -= size % map_bankwidth(map);
1479 ret = do_write_buffer(map, &cfi->chips[chipnum],
1480 ofs, buf, size);
1481 if (ret)
1482 return ret;
1484 ofs += size;
1485 buf += size;
1486 (*retlen) += size;
1487 len -= size;
1489 if (ofs >> cfi->chipshift) {
1490 chipnum ++;
1491 ofs = 0;
1492 if (chipnum == cfi->numchips)
1493 return 0;
1497 if (len) {
1498 size_t retlen_dregs = 0;
1500 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1501 len, &retlen_dregs, buf);
1503 *retlen += retlen_dregs;
1504 return ret;
1507 return 0;
1512 * Handle devices with one erase region, that only implement
1513 * the chip erase command.
1515 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1517 struct cfi_private *cfi = map->fldrv_priv;
1518 unsigned long timeo = jiffies + HZ;
1519 unsigned long int adr;
1520 DECLARE_WAITQUEUE(wait, current);
1521 int ret = 0;
1523 adr = cfi->addr_unlock1;
1525 spin_lock(chip->mutex);
1526 ret = get_chip(map, chip, adr, FL_WRITING);
1527 if (ret) {
1528 spin_unlock(chip->mutex);
1529 return ret;
1532 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1533 __func__, chip->start );
1535 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1536 ENABLE_VPP(map);
1537 xip_disable(map, chip, adr);
1539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1540 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1541 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1542 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1543 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1544 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1546 chip->state = FL_ERASING;
1547 chip->erase_suspended = 0;
1548 chip->in_progress_block_addr = adr;
1550 INVALIDATE_CACHE_UDELAY(map, chip,
1551 adr, map->size,
1552 chip->erase_time*500);
1554 timeo = jiffies + (HZ*20);
1556 for (;;) {
1557 if (chip->state != FL_ERASING) {
1558 /* Someone's suspended the erase. Sleep */
1559 set_current_state(TASK_UNINTERRUPTIBLE);
1560 add_wait_queue(&chip->wq, &wait);
1561 spin_unlock(chip->mutex);
1562 schedule();
1563 remove_wait_queue(&chip->wq, &wait);
1564 spin_lock(chip->mutex);
1565 continue;
1567 if (chip->erase_suspended) {
1568 /* This erase was suspended and resumed.
1569 Adjust the timeout */
1570 timeo = jiffies + (HZ*20); /* FIXME */
1571 chip->erase_suspended = 0;
1574 if (chip_ready(map, adr))
1575 break;
1577 if (time_after(jiffies, timeo)) {
1578 printk(KERN_WARNING "MTD %s(): software timeout\n",
1579 __func__ );
1580 break;
1583 /* Latency issues. Drop the lock, wait a while and retry */
1584 UDELAY(map, chip, adr, 1000000/HZ);
1586 /* Did we succeed? */
1587 if (!chip_good(map, adr, map_word_ff(map))) {
1588 /* reset on all failures. */
1589 map_write( map, CMD(0xF0), chip->start );
1590 /* FIXME - should have reset delay before continuing */
1592 ret = -EIO;
1595 chip->state = FL_READY;
1596 xip_enable(map, chip, adr);
1597 put_chip(map, chip, adr);
1598 spin_unlock(chip->mutex);
1600 return ret;
1604 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1606 struct cfi_private *cfi = map->fldrv_priv;
1607 unsigned long timeo = jiffies + HZ;
1608 DECLARE_WAITQUEUE(wait, current);
1609 int ret = 0;
1611 adr += chip->start;
1613 spin_lock(chip->mutex);
1614 ret = get_chip(map, chip, adr, FL_ERASING);
1615 if (ret) {
1616 spin_unlock(chip->mutex);
1617 return ret;
1620 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1621 __func__, adr );
1623 XIP_INVAL_CACHED_RANGE(map, adr, len);
1624 ENABLE_VPP(map);
1625 xip_disable(map, chip, adr);
1627 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1628 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1629 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1630 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1631 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1632 map_write(map, CMD(0x30), adr);
1634 chip->state = FL_ERASING;
1635 chip->erase_suspended = 0;
1636 chip->in_progress_block_addr = adr;
1638 INVALIDATE_CACHE_UDELAY(map, chip,
1639 adr, len,
1640 chip->erase_time*500);
1642 timeo = jiffies + (HZ*20);
1644 for (;;) {
1645 if (chip->state != FL_ERASING) {
1646 /* Someone's suspended the erase. Sleep */
1647 set_current_state(TASK_UNINTERRUPTIBLE);
1648 add_wait_queue(&chip->wq, &wait);
1649 spin_unlock(chip->mutex);
1650 schedule();
1651 remove_wait_queue(&chip->wq, &wait);
1652 spin_lock(chip->mutex);
1653 continue;
1655 if (chip->erase_suspended) {
1656 /* This erase was suspended and resumed.
1657 Adjust the timeout */
1658 timeo = jiffies + (HZ*20); /* FIXME */
1659 chip->erase_suspended = 0;
1662 if (chip_ready(map, adr)) {
1663 xip_enable(map, chip, adr);
1664 break;
1667 if (time_after(jiffies, timeo)) {
1668 xip_enable(map, chip, adr);
1669 printk(KERN_WARNING "MTD %s(): software timeout\n",
1670 __func__ );
1671 break;
1674 /* Latency issues. Drop the lock, wait a while and retry */
1675 UDELAY(map, chip, adr, 1000000/HZ);
1677 /* Did we succeed? */
1678 if (!chip_good(map, adr, map_word_ff(map))) {
1679 /* reset on all failures. */
1680 map_write( map, CMD(0xF0), chip->start );
1681 /* FIXME - should have reset delay before continuing */
1683 ret = -EIO;
1686 chip->state = FL_READY;
1687 put_chip(map, chip, adr);
1688 spin_unlock(chip->mutex);
1689 return ret;
1693 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1695 unsigned long ofs, len;
1696 int ret;
1698 ofs = instr->addr;
1699 len = instr->len;
1701 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1702 if (ret)
1703 return ret;
1705 instr->state = MTD_ERASE_DONE;
1706 mtd_erase_callback(instr);
1708 return 0;
1712 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1714 struct map_info *map = mtd->priv;
1715 struct cfi_private *cfi = map->fldrv_priv;
1716 int ret = 0;
1718 if (instr->addr != 0)
1719 return -EINVAL;
1721 if (instr->len != mtd->size)
1722 return -EINVAL;
1724 ret = do_erase_chip(map, &cfi->chips[0]);
1725 if (ret)
1726 return ret;
1728 instr->state = MTD_ERASE_DONE;
1729 mtd_erase_callback(instr);
1731 return 0;
1734 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1735 unsigned long adr, int len, void *thunk)
1737 struct cfi_private *cfi = map->fldrv_priv;
1738 int ret;
1740 spin_lock(chip->mutex);
1741 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1742 if (ret)
1743 goto out_unlock;
1744 chip->state = FL_LOCKING;
1746 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1747 __func__, adr, len);
1749 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1750 cfi->device_type, NULL);
1751 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1752 cfi->device_type, NULL);
1753 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1754 cfi->device_type, NULL);
1755 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1756 cfi->device_type, NULL);
1757 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1758 cfi->device_type, NULL);
1759 map_write(map, CMD(0x40), chip->start + adr);
1761 chip->state = FL_READY;
1762 put_chip(map, chip, adr + chip->start);
1763 ret = 0;
1765 out_unlock:
1766 spin_unlock(chip->mutex);
1767 return ret;
1770 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1771 unsigned long adr, int len, void *thunk)
1773 struct cfi_private *cfi = map->fldrv_priv;
1774 int ret;
1776 spin_lock(chip->mutex);
1777 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1778 if (ret)
1779 goto out_unlock;
1780 chip->state = FL_UNLOCKING;
1782 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1783 __func__, adr, len);
1785 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1786 cfi->device_type, NULL);
1787 map_write(map, CMD(0x70), adr);
1789 chip->state = FL_READY;
1790 put_chip(map, chip, adr + chip->start);
1791 ret = 0;
1793 out_unlock:
1794 spin_unlock(chip->mutex);
1795 return ret;
1798 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1800 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1803 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1805 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1809 static void cfi_amdstd_sync (struct mtd_info *mtd)
1811 struct map_info *map = mtd->priv;
1812 struct cfi_private *cfi = map->fldrv_priv;
1813 int i;
1814 struct flchip *chip;
1815 int ret = 0;
1816 DECLARE_WAITQUEUE(wait, current);
1818 for (i=0; !ret && i<cfi->numchips; i++) {
1819 chip = &cfi->chips[i];
1821 retry:
1822 spin_lock(chip->mutex);
1824 switch(chip->state) {
1825 case FL_READY:
1826 case FL_STATUS:
1827 case FL_CFI_QUERY:
1828 case FL_JEDEC_QUERY:
1829 chip->oldstate = chip->state;
1830 chip->state = FL_SYNCING;
1831 /* No need to wake_up() on this state change -
1832 * as the whole point is that nobody can do anything
1833 * with the chip now anyway.
1835 case FL_SYNCING:
1836 spin_unlock(chip->mutex);
1837 break;
1839 default:
1840 /* Not an idle state */
1841 set_current_state(TASK_UNINTERRUPTIBLE);
1842 add_wait_queue(&chip->wq, &wait);
1844 spin_unlock(chip->mutex);
1846 schedule();
1848 remove_wait_queue(&chip->wq, &wait);
1850 goto retry;
1854 /* Unlock the chips again */
1856 for (i--; i >=0; i--) {
1857 chip = &cfi->chips[i];
1859 spin_lock(chip->mutex);
1861 if (chip->state == FL_SYNCING) {
1862 chip->state = chip->oldstate;
1863 wake_up(&chip->wq);
1865 spin_unlock(chip->mutex);
1870 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1872 struct map_info *map = mtd->priv;
1873 struct cfi_private *cfi = map->fldrv_priv;
1874 int i;
1875 struct flchip *chip;
1876 int ret = 0;
1878 for (i=0; !ret && i<cfi->numchips; i++) {
1879 chip = &cfi->chips[i];
1881 spin_lock(chip->mutex);
1883 switch(chip->state) {
1884 case FL_READY:
1885 case FL_STATUS:
1886 case FL_CFI_QUERY:
1887 case FL_JEDEC_QUERY:
1888 chip->oldstate = chip->state;
1889 chip->state = FL_PM_SUSPENDED;
1890 /* No need to wake_up() on this state change -
1891 * as the whole point is that nobody can do anything
1892 * with the chip now anyway.
1894 case FL_PM_SUSPENDED:
1895 break;
1897 default:
1898 ret = -EAGAIN;
1899 break;
1901 spin_unlock(chip->mutex);
1904 /* Unlock the chips again */
1906 if (ret) {
1907 for (i--; i >=0; i--) {
1908 chip = &cfi->chips[i];
1910 spin_lock(chip->mutex);
1912 if (chip->state == FL_PM_SUSPENDED) {
1913 chip->state = chip->oldstate;
1914 wake_up(&chip->wq);
1916 spin_unlock(chip->mutex);
1920 return ret;
1924 static void cfi_amdstd_resume(struct mtd_info *mtd)
1926 struct map_info *map = mtd->priv;
1927 struct cfi_private *cfi = map->fldrv_priv;
1928 int i;
1929 struct flchip *chip;
1931 for (i=0; i<cfi->numchips; i++) {
1933 chip = &cfi->chips[i];
1935 spin_lock(chip->mutex);
1937 if (chip->state == FL_PM_SUSPENDED) {
1938 chip->state = FL_READY;
1939 map_write(map, CMD(0xF0), chip->start);
1940 wake_up(&chip->wq);
1942 else
1943 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1945 spin_unlock(chip->mutex);
1949 static void cfi_amdstd_destroy(struct mtd_info *mtd)
1951 struct map_info *map = mtd->priv;
1952 struct cfi_private *cfi = map->fldrv_priv;
1954 kfree(cfi->cmdset_priv);
1955 kfree(cfi->cfiq);
1956 kfree(cfi);
1957 kfree(mtd->eraseregions);
1960 MODULE_LICENSE("GPL");
1961 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1962 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");