[MTD] [NOR] More CFI fixups for Atmel chips
[linux-2.6/verdex.git] / drivers / mtd / chips / cfi_cmdset_0001.c
blobda851c217fc3a5726c7f286241dbcafd7e122491
1 /*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
27 #include <asm/io.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
53 #define AT49BV640D 0x02de
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69 struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71 struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
77 static void cfi_intelext_destroy(struct mtd_info *);
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char **mtdbuf);
86 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
87 size_t len);
89 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
91 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
92 #include "fwh_lock.h"
97 * *********** SETUP AND PROBE BITS ***********
100 static struct mtd_chip_driver cfi_intelext_chipdrv = {
101 .probe = NULL, /* Not usable directly */
102 .destroy = cfi_intelext_destroy,
103 .name = "cfi_cmdset_0001",
104 .module = THIS_MODULE
107 /* #define DEBUG_LOCK_BITS */
108 /* #define DEBUG_CFI_FEATURES */
110 #ifdef DEBUG_CFI_FEATURES
111 static void cfi_tell_features(struct cfi_pri_intelext *extp)
113 int i;
114 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
115 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
116 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
117 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
118 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
119 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
120 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
121 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
122 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
123 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
124 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
125 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
126 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
127 for (i=11; i<32; i++) {
128 if (extp->FeatureSupport & (1<<i))
129 printk(" - Unknown Bit %X: supported\n", i);
132 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
133 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
134 for (i=1; i<8; i++) {
135 if (extp->SuspendCmdSupport & (1<<i))
136 printk(" - Unknown Bit %X: supported\n", i);
139 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
140 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
141 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
142 for (i=2; i<3; i++) {
143 if (extp->BlkStatusRegMask & (1<<i))
144 printk(" - Unknown Bit %X Active: yes\n",i);
146 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
147 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
148 for (i=6; i<16; i++) {
149 if (extp->BlkStatusRegMask & (1<<i))
150 printk(" - Unknown Bit %X Active: yes\n",i);
153 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
154 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
155 if (extp->VppOptimal)
156 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
157 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
159 #endif
161 /* Atmel chips don't use the same PRI format as Intel chips */
162 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
164 struct map_info *map = mtd->priv;
165 struct cfi_private *cfi = map->fldrv_priv;
166 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
167 struct cfi_pri_atmel atmel_pri;
168 uint32_t features = 0;
170 /* Reverse byteswapping */
171 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
172 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
173 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
175 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
176 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
178 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
180 if (atmel_pri.Features & 0x01) /* chip erase supported */
181 features |= (1<<0);
182 if (atmel_pri.Features & 0x02) /* erase suspend supported */
183 features |= (1<<1);
184 if (atmel_pri.Features & 0x04) /* program suspend supported */
185 features |= (1<<2);
186 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
187 features |= (1<<9);
188 if (atmel_pri.Features & 0x20) /* page mode read supported */
189 features |= (1<<7);
190 if (atmel_pri.Features & 0x40) /* queued erase supported */
191 features |= (1<<4);
192 if (atmel_pri.Features & 0x80) /* Protection bits supported */
193 features |= (1<<6);
195 extp->FeatureSupport = features;
197 /* burst write mode not supported */
198 cfi->cfiq->BufWriteTimeoutTyp = 0;
199 cfi->cfiq->BufWriteTimeoutMax = 0;
202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
206 struct map_info *map = mtd->priv;
207 struct cfi_private *cfi = map->fldrv_priv;
208 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
210 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
211 "erase on write disabled.\n");
212 extp->SuspendCmdSupport &= ~1;
214 #endif
216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
219 struct map_info *map = mtd->priv;
220 struct cfi_private *cfi = map->fldrv_priv;
221 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
223 if (cfip && (cfip->FeatureSupport&4)) {
224 cfip->FeatureSupport &= ~4;
225 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
228 #endif
230 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
232 struct map_info *map = mtd->priv;
233 struct cfi_private *cfi = map->fldrv_priv;
235 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
236 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
239 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
244 /* Note this is done after the region info is endian swapped */
245 cfi->cfiq->EraseRegionInfo[1] =
246 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
249 static void fixup_use_point(struct mtd_info *mtd, void *param)
251 struct map_info *map = mtd->priv;
252 if (!mtd->point && map_is_linear(map)) {
253 mtd->point = cfi_intelext_point;
254 mtd->unpoint = cfi_intelext_unpoint;
258 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
260 struct map_info *map = mtd->priv;
261 struct cfi_private *cfi = map->fldrv_priv;
262 if (cfi->cfiq->BufWriteTimeoutTyp) {
263 printk(KERN_INFO "Using buffer write method\n" );
264 mtd->write = cfi_intelext_write_buffers;
265 mtd->writev = cfi_intelext_writev;
270 * Some chips power-up with all sectors locked by default.
272 static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
274 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
275 mtd->flags |= MTD_STUPID_LOCK;
278 static struct cfi_fixup cfi_fixup_table[] = {
279 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
280 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
281 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
282 #endif
283 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
284 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
285 #endif
286 #if !FORCE_WORD_WRITE
287 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
288 #endif
289 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
290 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
291 { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
292 { 0, 0, NULL, NULL }
295 static struct cfi_fixup jedec_fixup_table[] = {
296 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
297 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
298 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
299 { 0, 0, NULL, NULL }
301 static struct cfi_fixup fixup_table[] = {
302 /* The CFI vendor ids and the JEDEC vendor IDs appear
303 * to be common. It is like the devices id's are as
304 * well. This table is to pick all cases where
305 * we know that is the case.
307 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
308 { 0, 0, NULL, NULL }
311 static inline struct cfi_pri_intelext *
312 read_pri_intelext(struct map_info *map, __u16 adr)
314 struct cfi_pri_intelext *extp;
315 unsigned int extp_size = sizeof(*extp);
317 again:
318 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
319 if (!extp)
320 return NULL;
322 if (extp->MajorVersion != '1' ||
323 (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
324 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
325 "version %c.%c.\n", extp->MajorVersion,
326 extp->MinorVersion);
327 kfree(extp);
328 return NULL;
331 /* Do some byteswapping if necessary */
332 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
333 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
334 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
336 if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
337 unsigned int extra_size = 0;
338 int nb_parts, i;
340 /* Protection Register info */
341 extra_size += (extp->NumProtectionFields - 1) *
342 sizeof(struct cfi_intelext_otpinfo);
344 /* Burst Read info */
345 extra_size += 2;
346 if (extp_size < sizeof(*extp) + extra_size)
347 goto need_more;
348 extra_size += extp->extra[extra_size-1];
350 /* Number of hardware-partitions */
351 extra_size += 1;
352 if (extp_size < sizeof(*extp) + extra_size)
353 goto need_more;
354 nb_parts = extp->extra[extra_size - 1];
356 /* skip the sizeof(partregion) field in CFI 1.4 */
357 if (extp->MinorVersion >= '4')
358 extra_size += 2;
360 for (i = 0; i < nb_parts; i++) {
361 struct cfi_intelext_regioninfo *rinfo;
362 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
363 extra_size += sizeof(*rinfo);
364 if (extp_size < sizeof(*extp) + extra_size)
365 goto need_more;
366 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
367 extra_size += (rinfo->NumBlockTypes - 1)
368 * sizeof(struct cfi_intelext_blockinfo);
371 if (extp->MinorVersion >= '4')
372 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
374 if (extp_size < sizeof(*extp) + extra_size) {
375 need_more:
376 extp_size = sizeof(*extp) + extra_size;
377 kfree(extp);
378 if (extp_size > 4096) {
379 printk(KERN_ERR
380 "%s: cfi_pri_intelext is too fat\n",
381 __FUNCTION__);
382 return NULL;
384 goto again;
388 return extp;
391 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
393 struct cfi_private *cfi = map->fldrv_priv;
394 struct mtd_info *mtd;
395 int i;
397 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
398 if (!mtd) {
399 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
400 return NULL;
402 mtd->priv = map;
403 mtd->type = MTD_NORFLASH;
405 /* Fill in the default mtd operations */
406 mtd->erase = cfi_intelext_erase_varsize;
407 mtd->read = cfi_intelext_read;
408 mtd->write = cfi_intelext_write_words;
409 mtd->sync = cfi_intelext_sync;
410 mtd->lock = cfi_intelext_lock;
411 mtd->unlock = cfi_intelext_unlock;
412 mtd->suspend = cfi_intelext_suspend;
413 mtd->resume = cfi_intelext_resume;
414 mtd->flags = MTD_CAP_NORFLASH;
415 mtd->name = map->name;
416 mtd->writesize = 1;
418 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
420 if (cfi->cfi_mode == CFI_MODE_CFI) {
422 * It's a real CFI chip, not one for which the probe
423 * routine faked a CFI structure. So we read the feature
424 * table from it.
426 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
427 struct cfi_pri_intelext *extp;
429 extp = read_pri_intelext(map, adr);
430 if (!extp) {
431 kfree(mtd);
432 return NULL;
435 /* Install our own private info structure */
436 cfi->cmdset_priv = extp;
438 cfi_fixup(mtd, cfi_fixup_table);
440 #ifdef DEBUG_CFI_FEATURES
441 /* Tell the user about it in lots of lovely detail */
442 cfi_tell_features(extp);
443 #endif
445 if(extp->SuspendCmdSupport & 1) {
446 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
449 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
450 /* Apply jedec specific fixups */
451 cfi_fixup(mtd, jedec_fixup_table);
453 /* Apply generic fixups */
454 cfi_fixup(mtd, fixup_table);
456 for (i=0; i< cfi->numchips; i++) {
457 if (cfi->cfiq->WordWriteTimeoutTyp)
458 cfi->chips[i].word_write_time =
459 1<<cfi->cfiq->WordWriteTimeoutTyp;
460 else
461 cfi->chips[i].word_write_time = 50000;
463 if (cfi->cfiq->BufWriteTimeoutTyp)
464 cfi->chips[i].buffer_write_time =
465 1<<cfi->cfiq->BufWriteTimeoutTyp;
466 /* No default; if it isn't specified, we won't use it */
468 if (cfi->cfiq->BlockEraseTimeoutTyp)
469 cfi->chips[i].erase_time =
470 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
471 else
472 cfi->chips[i].erase_time = 2000000;
474 cfi->chips[i].ref_point_counter = 0;
475 init_waitqueue_head(&(cfi->chips[i].wq));
478 map->fldrv = &cfi_intelext_chipdrv;
480 return cfi_intelext_setup(mtd);
482 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
483 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
484 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
485 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
486 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
488 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
490 struct map_info *map = mtd->priv;
491 struct cfi_private *cfi = map->fldrv_priv;
492 unsigned long offset = 0;
493 int i,j;
494 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
496 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
498 mtd->size = devsize * cfi->numchips;
500 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
501 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
502 * mtd->numeraseregions, GFP_KERNEL);
503 if (!mtd->eraseregions) {
504 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
505 goto setup_err;
508 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
509 unsigned long ernum, ersize;
510 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
511 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
513 if (mtd->erasesize < ersize) {
514 mtd->erasesize = ersize;
516 for (j=0; j<cfi->numchips; j++) {
517 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
518 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
519 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
520 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
522 offset += (ersize * ernum);
525 if (offset != devsize) {
526 /* Argh */
527 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
528 goto setup_err;
531 for (i=0; i<mtd->numeraseregions;i++){
532 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
533 i,mtd->eraseregions[i].offset,
534 mtd->eraseregions[i].erasesize,
535 mtd->eraseregions[i].numblocks);
538 #ifdef CONFIG_MTD_OTP
539 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
540 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
541 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
542 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
543 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
544 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
545 #endif
547 /* This function has the potential to distort the reality
548 a bit and therefore should be called last. */
549 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
550 goto setup_err;
552 __module_get(THIS_MODULE);
553 register_reboot_notifier(&mtd->reboot_notifier);
554 return mtd;
556 setup_err:
557 if(mtd) {
558 kfree(mtd->eraseregions);
559 kfree(mtd);
561 kfree(cfi->cmdset_priv);
562 return NULL;
565 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
566 struct cfi_private **pcfi)
568 struct map_info *map = mtd->priv;
569 struct cfi_private *cfi = *pcfi;
570 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
573 * Probing of multi-partition flash chips.
575 * To support multiple partitions when available, we simply arrange
576 * for each of them to have their own flchip structure even if they
577 * are on the same physical chip. This means completely recreating
578 * a new cfi_private structure right here which is a blatent code
579 * layering violation, but this is still the least intrusive
580 * arrangement at this point. This can be rearranged in the future
581 * if someone feels motivated enough. --nico
583 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
584 && extp->FeatureSupport & (1 << 9)) {
585 struct cfi_private *newcfi;
586 struct flchip *chip;
587 struct flchip_shared *shared;
588 int offs, numregions, numparts, partshift, numvirtchips, i, j;
590 /* Protection Register info */
591 offs = (extp->NumProtectionFields - 1) *
592 sizeof(struct cfi_intelext_otpinfo);
594 /* Burst Read info */
595 offs += extp->extra[offs+1]+2;
597 /* Number of partition regions */
598 numregions = extp->extra[offs];
599 offs += 1;
601 /* skip the sizeof(partregion) field in CFI 1.4 */
602 if (extp->MinorVersion >= '4')
603 offs += 2;
605 /* Number of hardware partitions */
606 numparts = 0;
607 for (i = 0; i < numregions; i++) {
608 struct cfi_intelext_regioninfo *rinfo;
609 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
610 numparts += rinfo->NumIdentPartitions;
611 offs += sizeof(*rinfo)
612 + (rinfo->NumBlockTypes - 1) *
613 sizeof(struct cfi_intelext_blockinfo);
616 /* Programming Region info */
617 if (extp->MinorVersion >= '4') {
618 struct cfi_intelext_programming_regioninfo *prinfo;
619 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
620 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
621 mtd->flags &= ~MTD_BIT_WRITEABLE;
622 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
623 map->name, mtd->writesize,
624 cfi->interleave * prinfo->ControlValid,
625 cfi->interleave * prinfo->ControlInvalid);
629 * All functions below currently rely on all chips having
630 * the same geometry so we'll just assume that all hardware
631 * partitions are of the same size too.
633 partshift = cfi->chipshift - __ffs(numparts);
635 if ((1 << partshift) < mtd->erasesize) {
636 printk( KERN_ERR
637 "%s: bad number of hw partitions (%d)\n",
638 __FUNCTION__, numparts);
639 return -EINVAL;
642 numvirtchips = cfi->numchips * numparts;
643 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
644 if (!newcfi)
645 return -ENOMEM;
646 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
647 if (!shared) {
648 kfree(newcfi);
649 return -ENOMEM;
651 memcpy(newcfi, cfi, sizeof(struct cfi_private));
652 newcfi->numchips = numvirtchips;
653 newcfi->chipshift = partshift;
655 chip = &newcfi->chips[0];
656 for (i = 0; i < cfi->numchips; i++) {
657 shared[i].writing = shared[i].erasing = NULL;
658 spin_lock_init(&shared[i].lock);
659 for (j = 0; j < numparts; j++) {
660 *chip = cfi->chips[i];
661 chip->start += j << partshift;
662 chip->priv = &shared[i];
663 /* those should be reset too since
664 they create memory references. */
665 init_waitqueue_head(&chip->wq);
666 spin_lock_init(&chip->_spinlock);
667 chip->mutex = &chip->_spinlock;
668 chip++;
672 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
673 "--> %d partitions of %d KiB\n",
674 map->name, cfi->numchips, cfi->interleave,
675 newcfi->numchips, 1<<(newcfi->chipshift-10));
677 map->fldrv_priv = newcfi;
678 *pcfi = newcfi;
679 kfree(cfi);
682 return 0;
686 * *********** CHIP ACCESS FUNCTIONS ***********
688 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
690 DECLARE_WAITQUEUE(wait, current);
691 struct cfi_private *cfi = map->fldrv_priv;
692 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
693 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
694 unsigned long timeo = jiffies + HZ;
696 switch (chip->state) {
698 case FL_STATUS:
699 for (;;) {
700 status = map_read(map, adr);
701 if (map_word_andequal(map, status, status_OK, status_OK))
702 break;
704 /* At this point we're fine with write operations
705 in other partitions as they don't conflict. */
706 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
707 break;
709 spin_unlock(chip->mutex);
710 cfi_udelay(1);
711 spin_lock(chip->mutex);
712 /* Someone else might have been playing with it. */
713 return -EAGAIN;
716 case FL_READY:
717 case FL_CFI_QUERY:
718 case FL_JEDEC_QUERY:
719 return 0;
721 case FL_ERASING:
722 if (!cfip ||
723 !(cfip->FeatureSupport & 2) ||
724 !(mode == FL_READY || mode == FL_POINT ||
725 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
726 goto sleep;
729 /* Erase suspend */
730 map_write(map, CMD(0xB0), adr);
732 /* If the flash has finished erasing, then 'erase suspend'
733 * appears to make some (28F320) flash devices switch to
734 * 'read' mode. Make sure that we switch to 'read status'
735 * mode so we get the right data. --rmk
737 map_write(map, CMD(0x70), adr);
738 chip->oldstate = FL_ERASING;
739 chip->state = FL_ERASE_SUSPENDING;
740 chip->erase_suspended = 1;
741 for (;;) {
742 status = map_read(map, adr);
743 if (map_word_andequal(map, status, status_OK, status_OK))
744 break;
746 if (time_after(jiffies, timeo)) {
747 /* Urgh. Resume and pretend we weren't here. */
748 map_write(map, CMD(0xd0), adr);
749 /* Make sure we're in 'read status' mode if it had finished */
750 map_write(map, CMD(0x70), adr);
751 chip->state = FL_ERASING;
752 chip->oldstate = FL_READY;
753 printk(KERN_ERR "%s: Chip not ready after erase "
754 "suspended: status = 0x%lx\n", map->name, status.x[0]);
755 return -EIO;
758 spin_unlock(chip->mutex);
759 cfi_udelay(1);
760 spin_lock(chip->mutex);
761 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
762 So we can just loop here. */
764 chip->state = FL_STATUS;
765 return 0;
767 case FL_XIP_WHILE_ERASING:
768 if (mode != FL_READY && mode != FL_POINT &&
769 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
770 goto sleep;
771 chip->oldstate = chip->state;
772 chip->state = FL_READY;
773 return 0;
775 case FL_POINT:
776 /* Only if there's no operation suspended... */
777 if (mode == FL_READY && chip->oldstate == FL_READY)
778 return 0;
780 case FL_SHUTDOWN:
781 /* The machine is rebooting now,so no one can get chip anymore */
782 return -EIO;
783 default:
784 sleep:
785 set_current_state(TASK_UNINTERRUPTIBLE);
786 add_wait_queue(&chip->wq, &wait);
787 spin_unlock(chip->mutex);
788 schedule();
789 remove_wait_queue(&chip->wq, &wait);
790 spin_lock(chip->mutex);
791 return -EAGAIN;
795 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
797 int ret;
799 retry:
800 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
801 || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
803 * OK. We have possibility for contention on the write/erase
804 * operations which are global to the real chip and not per
805 * partition. So let's fight it over in the partition which
806 * currently has authority on the operation.
808 * The rules are as follows:
810 * - any write operation must own shared->writing.
812 * - any erase operation must own _both_ shared->writing and
813 * shared->erasing.
815 * - contention arbitration is handled in the owner's context.
817 * The 'shared' struct can be read and/or written only when
818 * its lock is taken.
820 struct flchip_shared *shared = chip->priv;
821 struct flchip *contender;
822 spin_lock(&shared->lock);
823 contender = shared->writing;
824 if (contender && contender != chip) {
826 * The engine to perform desired operation on this
827 * partition is already in use by someone else.
828 * Let's fight over it in the context of the chip
829 * currently using it. If it is possible to suspend,
830 * that other partition will do just that, otherwise
831 * it'll happily send us to sleep. In any case, when
832 * get_chip returns success we're clear to go ahead.
834 ret = spin_trylock(contender->mutex);
835 spin_unlock(&shared->lock);
836 if (!ret)
837 goto retry;
838 spin_unlock(chip->mutex);
839 ret = chip_ready(map, contender, contender->start, mode);
840 spin_lock(chip->mutex);
842 if (ret == -EAGAIN) {
843 spin_unlock(contender->mutex);
844 goto retry;
846 if (ret) {
847 spin_unlock(contender->mutex);
848 return ret;
850 spin_lock(&shared->lock);
851 spin_unlock(contender->mutex);
854 /* We now own it */
855 shared->writing = chip;
856 if (mode == FL_ERASING)
857 shared->erasing = chip;
858 spin_unlock(&shared->lock);
860 ret = chip_ready(map, chip, adr, mode);
861 if (ret == -EAGAIN)
862 goto retry;
864 return ret;
867 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
869 struct cfi_private *cfi = map->fldrv_priv;
871 if (chip->priv) {
872 struct flchip_shared *shared = chip->priv;
873 spin_lock(&shared->lock);
874 if (shared->writing == chip && chip->oldstate == FL_READY) {
875 /* We own the ability to write, but we're done */
876 shared->writing = shared->erasing;
877 if (shared->writing && shared->writing != chip) {
878 /* give back ownership to who we loaned it from */
879 struct flchip *loaner = shared->writing;
880 spin_lock(loaner->mutex);
881 spin_unlock(&shared->lock);
882 spin_unlock(chip->mutex);
883 put_chip(map, loaner, loaner->start);
884 spin_lock(chip->mutex);
885 spin_unlock(loaner->mutex);
886 wake_up(&chip->wq);
887 return;
889 shared->erasing = NULL;
890 shared->writing = NULL;
891 } else if (shared->erasing == chip && shared->writing != chip) {
893 * We own the ability to erase without the ability
894 * to write, which means the erase was suspended
895 * and some other partition is currently writing.
896 * Don't let the switch below mess things up since
897 * we don't have ownership to resume anything.
899 spin_unlock(&shared->lock);
900 wake_up(&chip->wq);
901 return;
903 spin_unlock(&shared->lock);
906 switch(chip->oldstate) {
907 case FL_ERASING:
908 chip->state = chip->oldstate;
909 /* What if one interleaved chip has finished and the
910 other hasn't? The old code would leave the finished
911 one in READY mode. That's bad, and caused -EROFS
912 errors to be returned from do_erase_oneblock because
913 that's the only bit it checked for at the time.
914 As the state machine appears to explicitly allow
915 sending the 0x70 (Read Status) command to an erasing
916 chip and expecting it to be ignored, that's what we
917 do. */
918 map_write(map, CMD(0xd0), adr);
919 map_write(map, CMD(0x70), adr);
920 chip->oldstate = FL_READY;
921 chip->state = FL_ERASING;
922 break;
924 case FL_XIP_WHILE_ERASING:
925 chip->state = chip->oldstate;
926 chip->oldstate = FL_READY;
927 break;
929 case FL_READY:
930 case FL_STATUS:
931 case FL_JEDEC_QUERY:
932 /* We should really make set_vpp() count, rather than doing this */
933 DISABLE_VPP(map);
934 break;
935 default:
936 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
938 wake_up(&chip->wq);
941 #ifdef CONFIG_MTD_XIP
944 * No interrupt what so ever can be serviced while the flash isn't in array
945 * mode. This is ensured by the xip_disable() and xip_enable() functions
946 * enclosing any code path where the flash is known not to be in array mode.
947 * And within a XIP disabled code path, only functions marked with __xipram
948 * may be called and nothing else (it's a good thing to inspect generated
949 * assembly to make sure inline functions were actually inlined and that gcc
950 * didn't emit calls to its own support functions). Also configuring MTD CFI
951 * support to a single buswidth and a single interleave is also recommended.
954 static void xip_disable(struct map_info *map, struct flchip *chip,
955 unsigned long adr)
957 /* TODO: chips with no XIP use should ignore and return */
958 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
959 local_irq_disable();
962 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
963 unsigned long adr)
965 struct cfi_private *cfi = map->fldrv_priv;
966 if (chip->state != FL_POINT && chip->state != FL_READY) {
967 map_write(map, CMD(0xff), adr);
968 chip->state = FL_READY;
970 (void) map_read(map, adr);
971 xip_iprefetch();
972 local_irq_enable();
976 * When a delay is required for the flash operation to complete, the
977 * xip_wait_for_operation() function is polling for both the given timeout
978 * and pending (but still masked) hardware interrupts. Whenever there is an
979 * interrupt pending then the flash erase or write operation is suspended,
980 * array mode restored and interrupts unmasked. Task scheduling might also
981 * happen at that point. The CPU eventually returns from the interrupt or
982 * the call to schedule() and the suspended flash operation is resumed for
983 * the remaining of the delay period.
985 * Warning: this function _will_ fool interrupt latency tracing tools.
988 static int __xipram xip_wait_for_operation(
989 struct map_info *map, struct flchip *chip,
990 unsigned long adr, unsigned int chip_op_time )
992 struct cfi_private *cfi = map->fldrv_priv;
993 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
994 map_word status, OK = CMD(0x80);
995 unsigned long usec, suspended, start, done;
996 flstate_t oldstate, newstate;
998 start = xip_currtime();
999 usec = chip_op_time * 8;
1000 if (usec == 0)
1001 usec = 500000;
1002 done = 0;
1004 do {
1005 cpu_relax();
1006 if (xip_irqpending() && cfip &&
1007 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1008 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1009 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1011 * Let's suspend the erase or write operation when
1012 * supported. Note that we currently don't try to
1013 * suspend interleaved chips if there is already
1014 * another operation suspended (imagine what happens
1015 * when one chip was already done with the current
1016 * operation while another chip suspended it, then
1017 * we resume the whole thing at once). Yes, it
1018 * can happen!
1020 usec -= done;
1021 map_write(map, CMD(0xb0), adr);
1022 map_write(map, CMD(0x70), adr);
1023 suspended = xip_currtime();
1024 do {
1025 if (xip_elapsed_since(suspended) > 100000) {
1027 * The chip doesn't want to suspend
1028 * after waiting for 100 msecs.
1029 * This is a critical error but there
1030 * is not much we can do here.
1032 return -EIO;
1034 status = map_read(map, adr);
1035 } while (!map_word_andequal(map, status, OK, OK));
1037 /* Suspend succeeded */
1038 oldstate = chip->state;
1039 if (oldstate == FL_ERASING) {
1040 if (!map_word_bitsset(map, status, CMD(0x40)))
1041 break;
1042 newstate = FL_XIP_WHILE_ERASING;
1043 chip->erase_suspended = 1;
1044 } else {
1045 if (!map_word_bitsset(map, status, CMD(0x04)))
1046 break;
1047 newstate = FL_XIP_WHILE_WRITING;
1048 chip->write_suspended = 1;
1050 chip->state = newstate;
1051 map_write(map, CMD(0xff), adr);
1052 (void) map_read(map, adr);
1053 asm volatile (".rep 8; nop; .endr");
1054 local_irq_enable();
1055 spin_unlock(chip->mutex);
1056 asm volatile (".rep 8; nop; .endr");
1057 cond_resched();
1060 * We're back. However someone else might have
1061 * decided to go write to the chip if we are in
1062 * a suspended erase state. If so let's wait
1063 * until it's done.
1065 spin_lock(chip->mutex);
1066 while (chip->state != newstate) {
1067 DECLARE_WAITQUEUE(wait, current);
1068 set_current_state(TASK_UNINTERRUPTIBLE);
1069 add_wait_queue(&chip->wq, &wait);
1070 spin_unlock(chip->mutex);
1071 schedule();
1072 remove_wait_queue(&chip->wq, &wait);
1073 spin_lock(chip->mutex);
1075 /* Disallow XIP again */
1076 local_irq_disable();
1078 /* Resume the write or erase operation */
1079 map_write(map, CMD(0xd0), adr);
1080 map_write(map, CMD(0x70), adr);
1081 chip->state = oldstate;
1082 start = xip_currtime();
1083 } else if (usec >= 1000000/HZ) {
1085 * Try to save on CPU power when waiting delay
1086 * is at least a system timer tick period.
1087 * No need to be extremely accurate here.
1089 xip_cpu_idle();
1091 status = map_read(map, adr);
1092 done = xip_elapsed_since(start);
1093 } while (!map_word_andequal(map, status, OK, OK)
1094 && done < usec);
1096 return (done >= usec) ? -ETIME : 0;
1100 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1101 * the flash is actively programming or erasing since we have to poll for
1102 * the operation to complete anyway. We can't do that in a generic way with
1103 * a XIP setup so do it before the actual flash operation in this case
1104 * and stub it out from INVAL_CACHE_AND_WAIT.
1106 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1107 INVALIDATE_CACHED_RANGE(map, from, size)
1109 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1110 xip_wait_for_operation(map, chip, cmd_adr, usec)
1112 #else
1114 #define xip_disable(map, chip, adr)
1115 #define xip_enable(map, chip, adr)
1116 #define XIP_INVAL_CACHED_RANGE(x...)
1117 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1119 static int inval_cache_and_wait_for_operation(
1120 struct map_info *map, struct flchip *chip,
1121 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1122 unsigned int chip_op_time)
1124 struct cfi_private *cfi = map->fldrv_priv;
1125 map_word status, status_OK = CMD(0x80);
1126 int chip_state = chip->state;
1127 unsigned int timeo, sleep_time;
1129 spin_unlock(chip->mutex);
1130 if (inval_len)
1131 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1132 spin_lock(chip->mutex);
1134 /* set our timeout to 8 times the expected delay */
1135 timeo = chip_op_time * 8;
1136 if (!timeo)
1137 timeo = 500000;
1138 sleep_time = chip_op_time / 2;
1140 for (;;) {
1141 status = map_read(map, cmd_adr);
1142 if (map_word_andequal(map, status, status_OK, status_OK))
1143 break;
1145 if (!timeo) {
1146 map_write(map, CMD(0x70), cmd_adr);
1147 chip->state = FL_STATUS;
1148 return -ETIME;
1151 /* OK Still waiting. Drop the lock, wait a while and retry. */
1152 spin_unlock(chip->mutex);
1153 if (sleep_time >= 1000000/HZ) {
1155 * Half of the normal delay still remaining
1156 * can be performed with a sleeping delay instead
1157 * of busy waiting.
1159 msleep(sleep_time/1000);
1160 timeo -= sleep_time;
1161 sleep_time = 1000000/HZ;
1162 } else {
1163 udelay(1);
1164 cond_resched();
1165 timeo--;
1167 spin_lock(chip->mutex);
1169 while (chip->state != chip_state) {
1170 /* Someone's suspended the operation: sleep */
1171 DECLARE_WAITQUEUE(wait, current);
1172 set_current_state(TASK_UNINTERRUPTIBLE);
1173 add_wait_queue(&chip->wq, &wait);
1174 spin_unlock(chip->mutex);
1175 schedule();
1176 remove_wait_queue(&chip->wq, &wait);
1177 spin_lock(chip->mutex);
1181 /* Done and happy. */
1182 chip->state = FL_STATUS;
1183 return 0;
1186 #endif
1188 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1189 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1192 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1194 unsigned long cmd_addr;
1195 struct cfi_private *cfi = map->fldrv_priv;
1196 int ret = 0;
1198 adr += chip->start;
1200 /* Ensure cmd read/writes are aligned. */
1201 cmd_addr = adr & ~(map_bankwidth(map)-1);
1203 spin_lock(chip->mutex);
1205 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1207 if (!ret) {
1208 if (chip->state != FL_POINT && chip->state != FL_READY)
1209 map_write(map, CMD(0xff), cmd_addr);
1211 chip->state = FL_POINT;
1212 chip->ref_point_counter++;
1214 spin_unlock(chip->mutex);
1216 return ret;
1219 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1221 struct map_info *map = mtd->priv;
1222 struct cfi_private *cfi = map->fldrv_priv;
1223 unsigned long ofs, last_end = 0;
1224 int chipnum;
1225 int ret = 0;
1227 if (!map->virt || (from + len > mtd->size))
1228 return -EINVAL;
1230 /* Now lock the chip(s) to POINT state */
1232 /* ofs: offset within the first chip that the first read should start */
1233 chipnum = (from >> cfi->chipshift);
1234 ofs = from - (chipnum << cfi->chipshift);
1236 *mtdbuf = (void *)map->virt + cfi->chips[chipnum].start + ofs;
1237 *retlen = 0;
1239 while (len) {
1240 unsigned long thislen;
1242 if (chipnum >= cfi->numchips)
1243 break;
1245 /* We cannot point across chips that are virtually disjoint */
1246 if (!last_end)
1247 last_end = cfi->chips[chipnum].start;
1248 else if (cfi->chips[chipnum].start != last_end)
1249 break;
1251 if ((len + ofs -1) >> cfi->chipshift)
1252 thislen = (1<<cfi->chipshift) - ofs;
1253 else
1254 thislen = len;
1256 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1257 if (ret)
1258 break;
1260 *retlen += thislen;
1261 len -= thislen;
1263 ofs = 0;
1264 last_end += 1 << cfi->chipshift;
1265 chipnum++;
1267 return 0;
1270 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1272 struct map_info *map = mtd->priv;
1273 struct cfi_private *cfi = map->fldrv_priv;
1274 unsigned long ofs;
1275 int chipnum;
1277 /* Now unlock the chip(s) POINT state */
1279 /* ofs: offset within the first chip that the first read should start */
1280 chipnum = (from >> cfi->chipshift);
1281 ofs = from - (chipnum << cfi->chipshift);
1283 while (len) {
1284 unsigned long thislen;
1285 struct flchip *chip;
1287 chip = &cfi->chips[chipnum];
1288 if (chipnum >= cfi->numchips)
1289 break;
1291 if ((len + ofs -1) >> cfi->chipshift)
1292 thislen = (1<<cfi->chipshift) - ofs;
1293 else
1294 thislen = len;
1296 spin_lock(chip->mutex);
1297 if (chip->state == FL_POINT) {
1298 chip->ref_point_counter--;
1299 if(chip->ref_point_counter == 0)
1300 chip->state = FL_READY;
1301 } else
1302 printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1304 put_chip(map, chip, chip->start);
1305 spin_unlock(chip->mutex);
1307 len -= thislen;
1308 ofs = 0;
1309 chipnum++;
1313 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1315 unsigned long cmd_addr;
1316 struct cfi_private *cfi = map->fldrv_priv;
1317 int ret;
1319 adr += chip->start;
1321 /* Ensure cmd read/writes are aligned. */
1322 cmd_addr = adr & ~(map_bankwidth(map)-1);
1324 spin_lock(chip->mutex);
1325 ret = get_chip(map, chip, cmd_addr, FL_READY);
1326 if (ret) {
1327 spin_unlock(chip->mutex);
1328 return ret;
1331 if (chip->state != FL_POINT && chip->state != FL_READY) {
1332 map_write(map, CMD(0xff), cmd_addr);
1334 chip->state = FL_READY;
1337 map_copy_from(map, buf, adr, len);
1339 put_chip(map, chip, cmd_addr);
1341 spin_unlock(chip->mutex);
1342 return 0;
1345 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1347 struct map_info *map = mtd->priv;
1348 struct cfi_private *cfi = map->fldrv_priv;
1349 unsigned long ofs;
1350 int chipnum;
1351 int ret = 0;
1353 /* ofs: offset within the first chip that the first read should start */
1354 chipnum = (from >> cfi->chipshift);
1355 ofs = from - (chipnum << cfi->chipshift);
1357 *retlen = 0;
1359 while (len) {
1360 unsigned long thislen;
1362 if (chipnum >= cfi->numchips)
1363 break;
1365 if ((len + ofs -1) >> cfi->chipshift)
1366 thislen = (1<<cfi->chipshift) - ofs;
1367 else
1368 thislen = len;
1370 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1371 if (ret)
1372 break;
1374 *retlen += thislen;
1375 len -= thislen;
1376 buf += thislen;
1378 ofs = 0;
1379 chipnum++;
1381 return ret;
1384 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1385 unsigned long adr, map_word datum, int mode)
1387 struct cfi_private *cfi = map->fldrv_priv;
1388 map_word status, write_cmd;
1389 int ret=0;
1391 adr += chip->start;
1393 switch (mode) {
1394 case FL_WRITING:
1395 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1396 break;
1397 case FL_OTP_WRITE:
1398 write_cmd = CMD(0xc0);
1399 break;
1400 default:
1401 return -EINVAL;
1404 spin_lock(chip->mutex);
1405 ret = get_chip(map, chip, adr, mode);
1406 if (ret) {
1407 spin_unlock(chip->mutex);
1408 return ret;
1411 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1412 ENABLE_VPP(map);
1413 xip_disable(map, chip, adr);
1414 map_write(map, write_cmd, adr);
1415 map_write(map, datum, adr);
1416 chip->state = mode;
1418 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1419 adr, map_bankwidth(map),
1420 chip->word_write_time);
1421 if (ret) {
1422 xip_enable(map, chip, adr);
1423 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1424 goto out;
1427 /* check for errors */
1428 status = map_read(map, adr);
1429 if (map_word_bitsset(map, status, CMD(0x1a))) {
1430 unsigned long chipstatus = MERGESTATUS(status);
1432 /* reset status */
1433 map_write(map, CMD(0x50), adr);
1434 map_write(map, CMD(0x70), adr);
1435 xip_enable(map, chip, adr);
1437 if (chipstatus & 0x02) {
1438 ret = -EROFS;
1439 } else if (chipstatus & 0x08) {
1440 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1441 ret = -EIO;
1442 } else {
1443 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1444 ret = -EINVAL;
1447 goto out;
1450 xip_enable(map, chip, adr);
1451 out: put_chip(map, chip, adr);
1452 spin_unlock(chip->mutex);
1453 return ret;
1457 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1459 struct map_info *map = mtd->priv;
1460 struct cfi_private *cfi = map->fldrv_priv;
1461 int ret = 0;
1462 int chipnum;
1463 unsigned long ofs;
1465 *retlen = 0;
1466 if (!len)
1467 return 0;
1469 chipnum = to >> cfi->chipshift;
1470 ofs = to - (chipnum << cfi->chipshift);
1472 /* If it's not bus-aligned, do the first byte write */
1473 if (ofs & (map_bankwidth(map)-1)) {
1474 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1475 int gap = ofs - bus_ofs;
1476 int n;
1477 map_word datum;
1479 n = min_t(int, len, map_bankwidth(map)-gap);
1480 datum = map_word_ff(map);
1481 datum = map_word_load_partial(map, datum, buf, gap, n);
1483 ret = do_write_oneword(map, &cfi->chips[chipnum],
1484 bus_ofs, datum, FL_WRITING);
1485 if (ret)
1486 return ret;
1488 len -= n;
1489 ofs += n;
1490 buf += n;
1491 (*retlen) += n;
1493 if (ofs >> cfi->chipshift) {
1494 chipnum ++;
1495 ofs = 0;
1496 if (chipnum == cfi->numchips)
1497 return 0;
1501 while(len >= map_bankwidth(map)) {
1502 map_word datum = map_word_load(map, buf);
1504 ret = do_write_oneword(map, &cfi->chips[chipnum],
1505 ofs, datum, FL_WRITING);
1506 if (ret)
1507 return ret;
1509 ofs += map_bankwidth(map);
1510 buf += map_bankwidth(map);
1511 (*retlen) += map_bankwidth(map);
1512 len -= map_bankwidth(map);
1514 if (ofs >> cfi->chipshift) {
1515 chipnum ++;
1516 ofs = 0;
1517 if (chipnum == cfi->numchips)
1518 return 0;
1522 if (len & (map_bankwidth(map)-1)) {
1523 map_word datum;
1525 datum = map_word_ff(map);
1526 datum = map_word_load_partial(map, datum, buf, 0, len);
1528 ret = do_write_oneword(map, &cfi->chips[chipnum],
1529 ofs, datum, FL_WRITING);
1530 if (ret)
1531 return ret;
1533 (*retlen) += len;
1536 return 0;
1540 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1541 unsigned long adr, const struct kvec **pvec,
1542 unsigned long *pvec_seek, int len)
1544 struct cfi_private *cfi = map->fldrv_priv;
1545 map_word status, write_cmd, datum;
1546 unsigned long cmd_adr;
1547 int ret, wbufsize, word_gap, words;
1548 const struct kvec *vec;
1549 unsigned long vec_seek;
1551 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1552 adr += chip->start;
1553 cmd_adr = adr & ~(wbufsize-1);
1555 /* Let's determine this according to the interleave only once */
1556 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1558 spin_lock(chip->mutex);
1559 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1560 if (ret) {
1561 spin_unlock(chip->mutex);
1562 return ret;
1565 XIP_INVAL_CACHED_RANGE(map, adr, len);
1566 ENABLE_VPP(map);
1567 xip_disable(map, chip, cmd_adr);
1569 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1570 [...], the device will not accept any more Write to Buffer commands".
1571 So we must check here and reset those bits if they're set. Otherwise
1572 we're just pissing in the wind */
1573 if (chip->state != FL_STATUS) {
1574 map_write(map, CMD(0x70), cmd_adr);
1575 chip->state = FL_STATUS;
1577 status = map_read(map, cmd_adr);
1578 if (map_word_bitsset(map, status, CMD(0x30))) {
1579 xip_enable(map, chip, cmd_adr);
1580 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1581 xip_disable(map, chip, cmd_adr);
1582 map_write(map, CMD(0x50), cmd_adr);
1583 map_write(map, CMD(0x70), cmd_adr);
1586 chip->state = FL_WRITING_TO_BUFFER;
1587 map_write(map, write_cmd, cmd_adr);
1588 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1589 if (ret) {
1590 /* Argh. Not ready for write to buffer */
1591 map_word Xstatus = map_read(map, cmd_adr);
1592 map_write(map, CMD(0x70), cmd_adr);
1593 chip->state = FL_STATUS;
1594 status = map_read(map, cmd_adr);
1595 map_write(map, CMD(0x50), cmd_adr);
1596 map_write(map, CMD(0x70), cmd_adr);
1597 xip_enable(map, chip, cmd_adr);
1598 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1599 map->name, Xstatus.x[0], status.x[0]);
1600 goto out;
1603 /* Figure out the number of words to write */
1604 word_gap = (-adr & (map_bankwidth(map)-1));
1605 words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1606 if (!word_gap) {
1607 words--;
1608 } else {
1609 word_gap = map_bankwidth(map) - word_gap;
1610 adr -= word_gap;
1611 datum = map_word_ff(map);
1614 /* Write length of data to come */
1615 map_write(map, CMD(words), cmd_adr );
1617 /* Write data */
1618 vec = *pvec;
1619 vec_seek = *pvec_seek;
1620 do {
1621 int n = map_bankwidth(map) - word_gap;
1622 if (n > vec->iov_len - vec_seek)
1623 n = vec->iov_len - vec_seek;
1624 if (n > len)
1625 n = len;
1627 if (!word_gap && len < map_bankwidth(map))
1628 datum = map_word_ff(map);
1630 datum = map_word_load_partial(map, datum,
1631 vec->iov_base + vec_seek,
1632 word_gap, n);
1634 len -= n;
1635 word_gap += n;
1636 if (!len || word_gap == map_bankwidth(map)) {
1637 map_write(map, datum, adr);
1638 adr += map_bankwidth(map);
1639 word_gap = 0;
1642 vec_seek += n;
1643 if (vec_seek == vec->iov_len) {
1644 vec++;
1645 vec_seek = 0;
1647 } while (len);
1648 *pvec = vec;
1649 *pvec_seek = vec_seek;
1651 /* GO GO GO */
1652 map_write(map, CMD(0xd0), cmd_adr);
1653 chip->state = FL_WRITING;
1655 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1656 adr, len,
1657 chip->buffer_write_time);
1658 if (ret) {
1659 map_write(map, CMD(0x70), cmd_adr);
1660 chip->state = FL_STATUS;
1661 xip_enable(map, chip, cmd_adr);
1662 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1663 goto out;
1666 /* check for errors */
1667 status = map_read(map, cmd_adr);
1668 if (map_word_bitsset(map, status, CMD(0x1a))) {
1669 unsigned long chipstatus = MERGESTATUS(status);
1671 /* reset status */
1672 map_write(map, CMD(0x50), cmd_adr);
1673 map_write(map, CMD(0x70), cmd_adr);
1674 xip_enable(map, chip, cmd_adr);
1676 if (chipstatus & 0x02) {
1677 ret = -EROFS;
1678 } else if (chipstatus & 0x08) {
1679 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1680 ret = -EIO;
1681 } else {
1682 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1683 ret = -EINVAL;
1686 goto out;
1689 xip_enable(map, chip, cmd_adr);
1690 out: put_chip(map, chip, cmd_adr);
1691 spin_unlock(chip->mutex);
1692 return ret;
1695 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1696 unsigned long count, loff_t to, size_t *retlen)
1698 struct map_info *map = mtd->priv;
1699 struct cfi_private *cfi = map->fldrv_priv;
1700 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1701 int ret = 0;
1702 int chipnum;
1703 unsigned long ofs, vec_seek, i;
1704 size_t len = 0;
1706 for (i = 0; i < count; i++)
1707 len += vecs[i].iov_len;
1709 *retlen = 0;
1710 if (!len)
1711 return 0;
1713 chipnum = to >> cfi->chipshift;
1714 ofs = to - (chipnum << cfi->chipshift);
1715 vec_seek = 0;
1717 do {
1718 /* We must not cross write block boundaries */
1719 int size = wbufsize - (ofs & (wbufsize-1));
1721 if (size > len)
1722 size = len;
1723 ret = do_write_buffer(map, &cfi->chips[chipnum],
1724 ofs, &vecs, &vec_seek, size);
1725 if (ret)
1726 return ret;
1728 ofs += size;
1729 (*retlen) += size;
1730 len -= size;
1732 if (ofs >> cfi->chipshift) {
1733 chipnum ++;
1734 ofs = 0;
1735 if (chipnum == cfi->numchips)
1736 return 0;
1739 /* Be nice and reschedule with the chip in a usable state for other
1740 processes. */
1741 cond_resched();
1743 } while (len);
1745 return 0;
1748 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1749 size_t len, size_t *retlen, const u_char *buf)
1751 struct kvec vec;
1753 vec.iov_base = (void *) buf;
1754 vec.iov_len = len;
1756 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1759 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1760 unsigned long adr, int len, void *thunk)
1762 struct cfi_private *cfi = map->fldrv_priv;
1763 map_word status;
1764 int retries = 3;
1765 int ret;
1767 adr += chip->start;
1769 retry:
1770 spin_lock(chip->mutex);
1771 ret = get_chip(map, chip, adr, FL_ERASING);
1772 if (ret) {
1773 spin_unlock(chip->mutex);
1774 return ret;
1777 XIP_INVAL_CACHED_RANGE(map, adr, len);
1778 ENABLE_VPP(map);
1779 xip_disable(map, chip, adr);
1781 /* Clear the status register first */
1782 map_write(map, CMD(0x50), adr);
1784 /* Now erase */
1785 map_write(map, CMD(0x20), adr);
1786 map_write(map, CMD(0xD0), adr);
1787 chip->state = FL_ERASING;
1788 chip->erase_suspended = 0;
1790 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1791 adr, len,
1792 chip->erase_time);
1793 if (ret) {
1794 map_write(map, CMD(0x70), adr);
1795 chip->state = FL_STATUS;
1796 xip_enable(map, chip, adr);
1797 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1798 goto out;
1801 /* We've broken this before. It doesn't hurt to be safe */
1802 map_write(map, CMD(0x70), adr);
1803 chip->state = FL_STATUS;
1804 status = map_read(map, adr);
1806 /* check for errors */
1807 if (map_word_bitsset(map, status, CMD(0x3a))) {
1808 unsigned long chipstatus = MERGESTATUS(status);
1810 /* Reset the error bits */
1811 map_write(map, CMD(0x50), adr);
1812 map_write(map, CMD(0x70), adr);
1813 xip_enable(map, chip, adr);
1815 if ((chipstatus & 0x30) == 0x30) {
1816 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1817 ret = -EINVAL;
1818 } else if (chipstatus & 0x02) {
1819 /* Protection bit set */
1820 ret = -EROFS;
1821 } else if (chipstatus & 0x8) {
1822 /* Voltage */
1823 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1824 ret = -EIO;
1825 } else if (chipstatus & 0x20 && retries--) {
1826 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1827 put_chip(map, chip, adr);
1828 spin_unlock(chip->mutex);
1829 goto retry;
1830 } else {
1831 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1832 ret = -EIO;
1835 goto out;
1838 xip_enable(map, chip, adr);
1839 out: put_chip(map, chip, adr);
1840 spin_unlock(chip->mutex);
1841 return ret;
1844 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1846 unsigned long ofs, len;
1847 int ret;
1849 ofs = instr->addr;
1850 len = instr->len;
1852 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1853 if (ret)
1854 return ret;
1856 instr->state = MTD_ERASE_DONE;
1857 mtd_erase_callback(instr);
1859 return 0;
1862 static void cfi_intelext_sync (struct mtd_info *mtd)
1864 struct map_info *map = mtd->priv;
1865 struct cfi_private *cfi = map->fldrv_priv;
1866 int i;
1867 struct flchip *chip;
1868 int ret = 0;
1870 for (i=0; !ret && i<cfi->numchips; i++) {
1871 chip = &cfi->chips[i];
1873 spin_lock(chip->mutex);
1874 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1876 if (!ret) {
1877 chip->oldstate = chip->state;
1878 chip->state = FL_SYNCING;
1879 /* No need to wake_up() on this state change -
1880 * as the whole point is that nobody can do anything
1881 * with the chip now anyway.
1884 spin_unlock(chip->mutex);
1887 /* Unlock the chips again */
1889 for (i--; i >=0; i--) {
1890 chip = &cfi->chips[i];
1892 spin_lock(chip->mutex);
1894 if (chip->state == FL_SYNCING) {
1895 chip->state = chip->oldstate;
1896 chip->oldstate = FL_READY;
1897 wake_up(&chip->wq);
1899 spin_unlock(chip->mutex);
1903 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1904 struct flchip *chip,
1905 unsigned long adr,
1906 int len, void *thunk)
1908 struct cfi_private *cfi = map->fldrv_priv;
1909 int status, ofs_factor = cfi->interleave * cfi->device_type;
1911 adr += chip->start;
1912 xip_disable(map, chip, adr+(2*ofs_factor));
1913 map_write(map, CMD(0x90), adr+(2*ofs_factor));
1914 chip->state = FL_JEDEC_QUERY;
1915 status = cfi_read_query(map, adr+(2*ofs_factor));
1916 xip_enable(map, chip, 0);
1917 return status;
1920 #ifdef DEBUG_LOCK_BITS
1921 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1922 struct flchip *chip,
1923 unsigned long adr,
1924 int len, void *thunk)
1926 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1927 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1928 return 0;
1930 #endif
1932 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1933 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1935 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1936 unsigned long adr, int len, void *thunk)
1938 struct cfi_private *cfi = map->fldrv_priv;
1939 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1940 int udelay;
1941 int ret;
1943 adr += chip->start;
1945 spin_lock(chip->mutex);
1946 ret = get_chip(map, chip, adr, FL_LOCKING);
1947 if (ret) {
1948 spin_unlock(chip->mutex);
1949 return ret;
1952 ENABLE_VPP(map);
1953 xip_disable(map, chip, adr);
1955 map_write(map, CMD(0x60), adr);
1956 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1957 map_write(map, CMD(0x01), adr);
1958 chip->state = FL_LOCKING;
1959 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1960 map_write(map, CMD(0xD0), adr);
1961 chip->state = FL_UNLOCKING;
1962 } else
1963 BUG();
1966 * If Instant Individual Block Locking supported then no need
1967 * to delay.
1969 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1971 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1972 if (ret) {
1973 map_write(map, CMD(0x70), adr);
1974 chip->state = FL_STATUS;
1975 xip_enable(map, chip, adr);
1976 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1977 goto out;
1980 xip_enable(map, chip, adr);
1981 out: put_chip(map, chip, adr);
1982 spin_unlock(chip->mutex);
1983 return ret;
1986 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1988 int ret;
1990 #ifdef DEBUG_LOCK_BITS
1991 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1992 __FUNCTION__, ofs, len);
1993 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1994 ofs, len, NULL);
1995 #endif
1997 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1998 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2000 #ifdef DEBUG_LOCK_BITS
2001 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2002 __FUNCTION__, ret);
2003 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2004 ofs, len, NULL);
2005 #endif
2007 return ret;
2010 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2012 int ret;
2014 #ifdef DEBUG_LOCK_BITS
2015 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2016 __FUNCTION__, ofs, len);
2017 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2018 ofs, len, NULL);
2019 #endif
2021 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2022 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2024 #ifdef DEBUG_LOCK_BITS
2025 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2026 __FUNCTION__, ret);
2027 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2028 ofs, len, NULL);
2029 #endif
2031 return ret;
2034 #ifdef CONFIG_MTD_OTP
2036 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2037 u_long data_offset, u_char *buf, u_int size,
2038 u_long prot_offset, u_int groupno, u_int groupsize);
2040 static int __xipram
2041 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2042 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2044 struct cfi_private *cfi = map->fldrv_priv;
2045 int ret;
2047 spin_lock(chip->mutex);
2048 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2049 if (ret) {
2050 spin_unlock(chip->mutex);
2051 return ret;
2054 /* let's ensure we're not reading back cached data from array mode */
2055 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2057 xip_disable(map, chip, chip->start);
2058 if (chip->state != FL_JEDEC_QUERY) {
2059 map_write(map, CMD(0x90), chip->start);
2060 chip->state = FL_JEDEC_QUERY;
2062 map_copy_from(map, buf, chip->start + offset, size);
2063 xip_enable(map, chip, chip->start);
2065 /* then ensure we don't keep OTP data in the cache */
2066 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2068 put_chip(map, chip, chip->start);
2069 spin_unlock(chip->mutex);
2070 return 0;
2073 static int
2074 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2075 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2077 int ret;
2079 while (size) {
2080 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2081 int gap = offset - bus_ofs;
2082 int n = min_t(int, size, map_bankwidth(map)-gap);
2083 map_word datum = map_word_ff(map);
2085 datum = map_word_load_partial(map, datum, buf, gap, n);
2086 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2087 if (ret)
2088 return ret;
2090 offset += n;
2091 buf += n;
2092 size -= n;
2095 return 0;
2098 static int
2099 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2100 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2102 struct cfi_private *cfi = map->fldrv_priv;
2103 map_word datum;
2105 /* make sure area matches group boundaries */
2106 if (size != grpsz)
2107 return -EXDEV;
2109 datum = map_word_ff(map);
2110 datum = map_word_clr(map, datum, CMD(1 << grpno));
2111 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2114 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2115 size_t *retlen, u_char *buf,
2116 otp_op_t action, int user_regs)
2118 struct map_info *map = mtd->priv;
2119 struct cfi_private *cfi = map->fldrv_priv;
2120 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2121 struct flchip *chip;
2122 struct cfi_intelext_otpinfo *otp;
2123 u_long devsize, reg_prot_offset, data_offset;
2124 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2125 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2126 int ret;
2128 *retlen = 0;
2130 /* Check that we actually have some OTP registers */
2131 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2132 return -ENODATA;
2134 /* we need real chips here not virtual ones */
2135 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2136 chip_step = devsize >> cfi->chipshift;
2137 chip_num = 0;
2139 /* Some chips have OTP located in the _top_ partition only.
2140 For example: Intel 28F256L18T (T means top-parameter device) */
2141 if (cfi->mfr == MANUFACTURER_INTEL) {
2142 switch (cfi->id) {
2143 case 0x880b:
2144 case 0x880c:
2145 case 0x880d:
2146 chip_num = chip_step - 1;
2150 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2151 chip = &cfi->chips[chip_num];
2152 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2154 /* first OTP region */
2155 field = 0;
2156 reg_prot_offset = extp->ProtRegAddr;
2157 reg_fact_groups = 1;
2158 reg_fact_size = 1 << extp->FactProtRegSize;
2159 reg_user_groups = 1;
2160 reg_user_size = 1 << extp->UserProtRegSize;
2162 while (len > 0) {
2163 /* flash geometry fixup */
2164 data_offset = reg_prot_offset + 1;
2165 data_offset *= cfi->interleave * cfi->device_type;
2166 reg_prot_offset *= cfi->interleave * cfi->device_type;
2167 reg_fact_size *= cfi->interleave;
2168 reg_user_size *= cfi->interleave;
2170 if (user_regs) {
2171 groups = reg_user_groups;
2172 groupsize = reg_user_size;
2173 /* skip over factory reg area */
2174 groupno = reg_fact_groups;
2175 data_offset += reg_fact_groups * reg_fact_size;
2176 } else {
2177 groups = reg_fact_groups;
2178 groupsize = reg_fact_size;
2179 groupno = 0;
2182 while (len > 0 && groups > 0) {
2183 if (!action) {
2185 * Special case: if action is NULL
2186 * we fill buf with otp_info records.
2188 struct otp_info *otpinfo;
2189 map_word lockword;
2190 len -= sizeof(struct otp_info);
2191 if (len <= 0)
2192 return -ENOSPC;
2193 ret = do_otp_read(map, chip,
2194 reg_prot_offset,
2195 (u_char *)&lockword,
2196 map_bankwidth(map),
2197 0, 0, 0);
2198 if (ret)
2199 return ret;
2200 otpinfo = (struct otp_info *)buf;
2201 otpinfo->start = from;
2202 otpinfo->length = groupsize;
2203 otpinfo->locked =
2204 !map_word_bitsset(map, lockword,
2205 CMD(1 << groupno));
2206 from += groupsize;
2207 buf += sizeof(*otpinfo);
2208 *retlen += sizeof(*otpinfo);
2209 } else if (from >= groupsize) {
2210 from -= groupsize;
2211 data_offset += groupsize;
2212 } else {
2213 int size = groupsize;
2214 data_offset += from;
2215 size -= from;
2216 from = 0;
2217 if (size > len)
2218 size = len;
2219 ret = action(map, chip, data_offset,
2220 buf, size, reg_prot_offset,
2221 groupno, groupsize);
2222 if (ret < 0)
2223 return ret;
2224 buf += size;
2225 len -= size;
2226 *retlen += size;
2227 data_offset += size;
2229 groupno++;
2230 groups--;
2233 /* next OTP region */
2234 if (++field == extp->NumProtectionFields)
2235 break;
2236 reg_prot_offset = otp->ProtRegAddr;
2237 reg_fact_groups = otp->FactGroups;
2238 reg_fact_size = 1 << otp->FactProtRegSize;
2239 reg_user_groups = otp->UserGroups;
2240 reg_user_size = 1 << otp->UserProtRegSize;
2241 otp++;
2245 return 0;
2248 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2249 size_t len, size_t *retlen,
2250 u_char *buf)
2252 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2253 buf, do_otp_read, 0);
2256 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2257 size_t len, size_t *retlen,
2258 u_char *buf)
2260 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2261 buf, do_otp_read, 1);
2264 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2265 size_t len, size_t *retlen,
2266 u_char *buf)
2268 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2269 buf, do_otp_write, 1);
2272 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2273 loff_t from, size_t len)
2275 size_t retlen;
2276 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2277 NULL, do_otp_lock, 1);
2280 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2281 struct otp_info *buf, size_t len)
2283 size_t retlen;
2284 int ret;
2286 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2287 return ret ? : retlen;
2290 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2291 struct otp_info *buf, size_t len)
2293 size_t retlen;
2294 int ret;
2296 ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2297 return ret ? : retlen;
2300 #endif
2302 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2304 struct mtd_erase_region_info *region;
2305 int block, status, i;
2306 unsigned long adr;
2307 size_t len;
2309 for (i = 0; i < mtd->numeraseregions; i++) {
2310 region = &mtd->eraseregions[i];
2311 if (!region->lockmap)
2312 continue;
2314 for (block = 0; block < region->numblocks; block++){
2315 len = region->erasesize;
2316 adr = region->offset + block * len;
2318 status = cfi_varsize_frob(mtd,
2319 do_getlockstatus_oneblock, adr, len, NULL);
2320 if (status)
2321 set_bit(block, region->lockmap);
2322 else
2323 clear_bit(block, region->lockmap);
2328 static int cfi_intelext_suspend(struct mtd_info *mtd)
2330 struct map_info *map = mtd->priv;
2331 struct cfi_private *cfi = map->fldrv_priv;
2332 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2333 int i;
2334 struct flchip *chip;
2335 int ret = 0;
2337 if ((mtd->flags & MTD_STUPID_LOCK)
2338 && extp && (extp->FeatureSupport & (1 << 5)))
2339 cfi_intelext_save_locks(mtd);
2341 for (i=0; !ret && i<cfi->numchips; i++) {
2342 chip = &cfi->chips[i];
2344 spin_lock(chip->mutex);
2346 switch (chip->state) {
2347 case FL_READY:
2348 case FL_STATUS:
2349 case FL_CFI_QUERY:
2350 case FL_JEDEC_QUERY:
2351 if (chip->oldstate == FL_READY) {
2352 /* place the chip in a known state before suspend */
2353 map_write(map, CMD(0xFF), cfi->chips[i].start);
2354 chip->oldstate = chip->state;
2355 chip->state = FL_PM_SUSPENDED;
2356 /* No need to wake_up() on this state change -
2357 * as the whole point is that nobody can do anything
2358 * with the chip now anyway.
2360 } else {
2361 /* There seems to be an operation pending. We must wait for it. */
2362 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2363 ret = -EAGAIN;
2365 break;
2366 default:
2367 /* Should we actually wait? Once upon a time these routines weren't
2368 allowed to. Or should we return -EAGAIN, because the upper layers
2369 ought to have already shut down anything which was using the device
2370 anyway? The latter for now. */
2371 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2372 ret = -EAGAIN;
2373 case FL_PM_SUSPENDED:
2374 break;
2376 spin_unlock(chip->mutex);
2379 /* Unlock the chips again */
2381 if (ret) {
2382 for (i--; i >=0; i--) {
2383 chip = &cfi->chips[i];
2385 spin_lock(chip->mutex);
2387 if (chip->state == FL_PM_SUSPENDED) {
2388 /* No need to force it into a known state here,
2389 because we're returning failure, and it didn't
2390 get power cycled */
2391 chip->state = chip->oldstate;
2392 chip->oldstate = FL_READY;
2393 wake_up(&chip->wq);
2395 spin_unlock(chip->mutex);
2399 return ret;
2402 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2404 struct mtd_erase_region_info *region;
2405 int block, i;
2406 unsigned long adr;
2407 size_t len;
2409 for (i = 0; i < mtd->numeraseregions; i++) {
2410 region = &mtd->eraseregions[i];
2411 if (!region->lockmap)
2412 continue;
2414 for (block = 0; block < region->numblocks; block++) {
2415 len = region->erasesize;
2416 adr = region->offset + block * len;
2418 if (!test_bit(block, region->lockmap))
2419 cfi_intelext_unlock(mtd, adr, len);
2424 static void cfi_intelext_resume(struct mtd_info *mtd)
2426 struct map_info *map = mtd->priv;
2427 struct cfi_private *cfi = map->fldrv_priv;
2428 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2429 int i;
2430 struct flchip *chip;
2432 for (i=0; i<cfi->numchips; i++) {
2434 chip = &cfi->chips[i];
2436 spin_lock(chip->mutex);
2438 /* Go to known state. Chip may have been power cycled */
2439 if (chip->state == FL_PM_SUSPENDED) {
2440 map_write(map, CMD(0xFF), cfi->chips[i].start);
2441 chip->oldstate = chip->state = FL_READY;
2442 wake_up(&chip->wq);
2445 spin_unlock(chip->mutex);
2448 if ((mtd->flags & MTD_STUPID_LOCK)
2449 && extp && (extp->FeatureSupport & (1 << 5)))
2450 cfi_intelext_restore_locks(mtd);
2453 static int cfi_intelext_reset(struct mtd_info *mtd)
2455 struct map_info *map = mtd->priv;
2456 struct cfi_private *cfi = map->fldrv_priv;
2457 int i, ret;
2459 for (i=0; i < cfi->numchips; i++) {
2460 struct flchip *chip = &cfi->chips[i];
2462 /* force the completion of any ongoing operation
2463 and switch to array mode so any bootloader in
2464 flash is accessible for soft reboot. */
2465 spin_lock(chip->mutex);
2466 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2467 if (!ret) {
2468 map_write(map, CMD(0xff), chip->start);
2469 chip->state = FL_SHUTDOWN;
2471 spin_unlock(chip->mutex);
2474 return 0;
2477 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2478 void *v)
2480 struct mtd_info *mtd;
2482 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2483 cfi_intelext_reset(mtd);
2484 return NOTIFY_DONE;
2487 static void cfi_intelext_destroy(struct mtd_info *mtd)
2489 struct map_info *map = mtd->priv;
2490 struct cfi_private *cfi = map->fldrv_priv;
2491 struct mtd_erase_region_info *region;
2492 int i;
2493 cfi_intelext_reset(mtd);
2494 unregister_reboot_notifier(&mtd->reboot_notifier);
2495 kfree(cfi->cmdset_priv);
2496 kfree(cfi->cfiq);
2497 kfree(cfi->chips[0].priv);
2498 kfree(cfi);
2499 for (i = 0; i < mtd->numeraseregions; i++) {
2500 region = &mtd->eraseregions[i];
2501 if (region->lockmap)
2502 kfree(region->lockmap);
2504 kfree(mtd->eraseregions);
2507 MODULE_LICENSE("GPL");
2508 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2509 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2510 MODULE_ALIAS("cfi_cmdset_0003");
2511 MODULE_ALIAS("cfi_cmdset_0200");