2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
54 static int cfi_intelext_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
55 static int cfi_intelext_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
56 static int cfi_intelext_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
57 static int cfi_intelext_writev(struct mtd_info
*, const struct kvec
*, unsigned long, loff_t
, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info
*, struct erase_info
*);
59 static void cfi_intelext_sync (struct mtd_info
*);
60 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
61 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info
*, loff_t
, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info
*,
68 struct otp_info
*, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info
*,
70 struct otp_info
*, size_t);
72 static int cfi_intelext_suspend (struct mtd_info
*);
73 static void cfi_intelext_resume (struct mtd_info
*);
74 static int cfi_intelext_reboot (struct notifier_block
*, unsigned long, void *);
76 static void cfi_intelext_destroy(struct mtd_info
*);
78 struct mtd_info
*cfi_cmdset_0001(struct map_info
*, int);
80 static struct mtd_info
*cfi_intelext_setup (struct mtd_info
*);
81 static int cfi_intelext_partition_fixup(struct mtd_info
*, struct cfi_private
**);
83 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
84 size_t *retlen
, u_char
**mtdbuf
);
85 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
,
88 static int chip_ready (struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
89 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
90 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
);
96 * *********** SETUP AND PROBE BITS ***********
99 static struct mtd_chip_driver cfi_intelext_chipdrv
= {
100 .probe
= NULL
, /* Not usable directly */
101 .destroy
= cfi_intelext_destroy
,
102 .name
= "cfi_cmdset_0001",
103 .module
= THIS_MODULE
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
113 printk(" Extended Query version %c.%c\n", extp
->MajorVersion
, extp
->MinorVersion
);
114 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
115 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
116 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
117 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
118 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
119 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
120 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
121 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
122 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
123 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
124 printk(" - Simultaneous operations: %s\n", extp
->FeatureSupport
&512?"supported":"unsupported");
125 printk(" - Extended Flash Array: %s\n", extp
->FeatureSupport
&1024?"supported":"unsupported");
126 for (i
=11; i
<32; i
++) {
127 if (extp
->FeatureSupport
& (1<<i
))
128 printk(" - Unknown Bit %X: supported\n", i
);
131 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
132 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
133 for (i
=1; i
<8; i
++) {
134 if (extp
->SuspendCmdSupport
& (1<<i
))
135 printk(" - Unknown Bit %X: supported\n", i
);
138 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
139 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
140 printk(" - Lock-Down Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
141 for (i
=2; i
<3; i
++) {
142 if (extp
->BlkStatusRegMask
& (1<<i
))
143 printk(" - Unknown Bit %X Active: yes\n",i
);
145 printk(" - EFA Lock Bit: %s\n", extp
->BlkStatusRegMask
&16?"yes":"no");
146 printk(" - EFA Lock-Down Bit: %s\n", extp
->BlkStatusRegMask
&32?"yes":"no");
147 for (i
=6; i
<16; i
++) {
148 if (extp
->BlkStatusRegMask
& (1<<i
))
149 printk(" - Unknown Bit %X Active: yes\n",i
);
152 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153 extp
->VccOptimal
>> 4, extp
->VccOptimal
& 0xf);
154 if (extp
->VppOptimal
)
155 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156 extp
->VppOptimal
>> 4, extp
->VppOptimal
& 0xf);
160 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
161 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
162 static void fixup_intel_strataflash(struct mtd_info
*mtd
, void* param
)
164 struct map_info
*map
= mtd
->priv
;
165 struct cfi_private
*cfi
= map
->fldrv_priv
;
166 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
168 printk(KERN_WARNING
"cfi_cmdset_0001: Suspend "
169 "erase on write disabled.\n");
170 extp
->SuspendCmdSupport
&= ~1;
174 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
175 static void fixup_no_write_suspend(struct mtd_info
*mtd
, void* param
)
177 struct map_info
*map
= mtd
->priv
;
178 struct cfi_private
*cfi
= map
->fldrv_priv
;
179 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
181 if (cfip
&& (cfip
->FeatureSupport
&4)) {
182 cfip
->FeatureSupport
&= ~4;
183 printk(KERN_WARNING
"cfi_cmdset_0001: write suspend disabled\n");
188 static void fixup_st_m28w320ct(struct mtd_info
*mtd
, void* param
)
190 struct map_info
*map
= mtd
->priv
;
191 struct cfi_private
*cfi
= map
->fldrv_priv
;
193 cfi
->cfiq
->BufWriteTimeoutTyp
= 0; /* Not supported */
194 cfi
->cfiq
->BufWriteTimeoutMax
= 0; /* Not supported */
197 static void fixup_st_m28w320cb(struct mtd_info
*mtd
, void* param
)
199 struct map_info
*map
= mtd
->priv
;
200 struct cfi_private
*cfi
= map
->fldrv_priv
;
202 /* Note this is done after the region info is endian swapped */
203 cfi
->cfiq
->EraseRegionInfo
[1] =
204 (cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff0000) | 0x3e;
207 static void fixup_use_point(struct mtd_info
*mtd
, void *param
)
209 struct map_info
*map
= mtd
->priv
;
210 if (!mtd
->point
&& map_is_linear(map
)) {
211 mtd
->point
= cfi_intelext_point
;
212 mtd
->unpoint
= cfi_intelext_unpoint
;
216 static void fixup_use_write_buffers(struct mtd_info
*mtd
, void *param
)
218 struct map_info
*map
= mtd
->priv
;
219 struct cfi_private
*cfi
= map
->fldrv_priv
;
220 if (cfi
->cfiq
->BufWriteTimeoutTyp
) {
221 printk(KERN_INFO
"Using buffer write method\n" );
222 mtd
->write
= cfi_intelext_write_buffers
;
223 mtd
->writev
= cfi_intelext_writev
;
228 * Some chips power-up with all sectors locked by default.
230 static void fixup_use_powerup_lock(struct mtd_info
*mtd
, void *param
)
232 printk(KERN_INFO
"Using auto-unlock on power-up/resume\n" );
233 mtd
->flags
|= MTD_STUPID_LOCK
;
236 static struct cfi_fixup cfi_fixup_table
[] = {
237 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
238 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_intel_strataflash
, NULL
},
240 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
241 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_no_write_suspend
, NULL
},
243 #if !FORCE_WORD_WRITE
244 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_write_buffers
, NULL
},
246 { CFI_MFR_ST
, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct
, NULL
},
247 { CFI_MFR_ST
, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb
, NULL
},
248 { MANUFACTURER_INTEL
, 0x891c, fixup_use_powerup_lock
, NULL
, },
252 static struct cfi_fixup jedec_fixup_table
[] = {
253 { MANUFACTURER_INTEL
, I82802AB
, fixup_use_fwh_lock
, NULL
, },
254 { MANUFACTURER_INTEL
, I82802AC
, fixup_use_fwh_lock
, NULL
, },
255 { MANUFACTURER_ST
, M50LPW080
, fixup_use_fwh_lock
, NULL
, },
258 static struct cfi_fixup fixup_table
[] = {
259 /* The CFI vendor ids and the JEDEC vendor IDs appear
260 * to be common. It is like the devices id's are as
261 * well. This table is to pick all cases where
262 * we know that is the case.
264 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_point
, NULL
},
268 static inline struct cfi_pri_intelext
*
269 read_pri_intelext(struct map_info
*map
, __u16 adr
)
271 struct cfi_pri_intelext
*extp
;
272 unsigned int extp_size
= sizeof(*extp
);
275 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, extp_size
, "Intel/Sharp");
279 if (extp
->MajorVersion
!= '1' ||
280 (extp
->MinorVersion
< '0' || extp
->MinorVersion
> '4')) {
281 printk(KERN_ERR
" Unknown Intel/Sharp Extended Query "
282 "version %c.%c.\n", extp
->MajorVersion
,
288 /* Do some byteswapping if necessary */
289 extp
->FeatureSupport
= le32_to_cpu(extp
->FeatureSupport
);
290 extp
->BlkStatusRegMask
= le16_to_cpu(extp
->BlkStatusRegMask
);
291 extp
->ProtRegAddr
= le16_to_cpu(extp
->ProtRegAddr
);
293 if (extp
->MajorVersion
== '1' && extp
->MinorVersion
>= '3') {
294 unsigned int extra_size
= 0;
297 /* Protection Register info */
298 extra_size
+= (extp
->NumProtectionFields
- 1) *
299 sizeof(struct cfi_intelext_otpinfo
);
301 /* Burst Read info */
303 if (extp_size
< sizeof(*extp
) + extra_size
)
305 extra_size
+= extp
->extra
[extra_size
-1];
307 /* Number of hardware-partitions */
309 if (extp_size
< sizeof(*extp
) + extra_size
)
311 nb_parts
= extp
->extra
[extra_size
- 1];
313 /* skip the sizeof(partregion) field in CFI 1.4 */
314 if (extp
->MinorVersion
>= '4')
317 for (i
= 0; i
< nb_parts
; i
++) {
318 struct cfi_intelext_regioninfo
*rinfo
;
319 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[extra_size
];
320 extra_size
+= sizeof(*rinfo
);
321 if (extp_size
< sizeof(*extp
) + extra_size
)
323 rinfo
->NumIdentPartitions
=le16_to_cpu(rinfo
->NumIdentPartitions
);
324 extra_size
+= (rinfo
->NumBlockTypes
- 1)
325 * sizeof(struct cfi_intelext_blockinfo
);
328 if (extp
->MinorVersion
>= '4')
329 extra_size
+= sizeof(struct cfi_intelext_programming_regioninfo
);
331 if (extp_size
< sizeof(*extp
) + extra_size
) {
333 extp_size
= sizeof(*extp
) + extra_size
;
335 if (extp_size
> 4096) {
337 "%s: cfi_pri_intelext is too fat\n",
348 struct mtd_info
*cfi_cmdset_0001(struct map_info
*map
, int primary
)
350 struct cfi_private
*cfi
= map
->fldrv_priv
;
351 struct mtd_info
*mtd
;
354 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
356 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
360 mtd
->type
= MTD_NORFLASH
;
362 /* Fill in the default mtd operations */
363 mtd
->erase
= cfi_intelext_erase_varsize
;
364 mtd
->read
= cfi_intelext_read
;
365 mtd
->write
= cfi_intelext_write_words
;
366 mtd
->sync
= cfi_intelext_sync
;
367 mtd
->lock
= cfi_intelext_lock
;
368 mtd
->unlock
= cfi_intelext_unlock
;
369 mtd
->suspend
= cfi_intelext_suspend
;
370 mtd
->resume
= cfi_intelext_resume
;
371 mtd
->flags
= MTD_CAP_NORFLASH
;
372 mtd
->name
= map
->name
;
375 mtd
->reboot_notifier
.notifier_call
= cfi_intelext_reboot
;
377 if (cfi
->cfi_mode
== CFI_MODE_CFI
) {
379 * It's a real CFI chip, not one for which the probe
380 * routine faked a CFI structure. So we read the feature
383 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
384 struct cfi_pri_intelext
*extp
;
386 extp
= read_pri_intelext(map
, adr
);
392 /* Install our own private info structure */
393 cfi
->cmdset_priv
= extp
;
395 cfi_fixup(mtd
, cfi_fixup_table
);
397 #ifdef DEBUG_CFI_FEATURES
398 /* Tell the user about it in lots of lovely detail */
399 cfi_tell_features(extp
);
402 if(extp
->SuspendCmdSupport
& 1) {
403 printk(KERN_NOTICE
"cfi_cmdset_0001: Erase suspend on write enabled\n");
406 else if (cfi
->cfi_mode
== CFI_MODE_JEDEC
) {
407 /* Apply jedec specific fixups */
408 cfi_fixup(mtd
, jedec_fixup_table
);
410 /* Apply generic fixups */
411 cfi_fixup(mtd
, fixup_table
);
413 for (i
=0; i
< cfi
->numchips
; i
++) {
414 if (cfi
->cfiq
->WordWriteTimeoutTyp
)
415 cfi
->chips
[i
].word_write_time
=
416 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
418 cfi
->chips
[i
].word_write_time
= 50000;
420 if (cfi
->cfiq
->BufWriteTimeoutTyp
)
421 cfi
->chips
[i
].buffer_write_time
=
422 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
423 /* No default; if it isn't specified, we won't use it */
425 if (cfi
->cfiq
->BlockEraseTimeoutTyp
)
426 cfi
->chips
[i
].erase_time
=
427 1000<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
429 cfi
->chips
[i
].erase_time
= 2000000;
431 cfi
->chips
[i
].ref_point_counter
= 0;
432 init_waitqueue_head(&(cfi
->chips
[i
].wq
));
435 map
->fldrv
= &cfi_intelext_chipdrv
;
437 return cfi_intelext_setup(mtd
);
439 struct mtd_info
*cfi_cmdset_0003(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
440 struct mtd_info
*cfi_cmdset_0200(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0001
);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0003
);
443 EXPORT_SYMBOL_GPL(cfi_cmdset_0200
);
445 static struct mtd_info
*cfi_intelext_setup(struct mtd_info
*mtd
)
447 struct map_info
*map
= mtd
->priv
;
448 struct cfi_private
*cfi
= map
->fldrv_priv
;
449 unsigned long offset
= 0;
451 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
453 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
455 mtd
->size
= devsize
* cfi
->numchips
;
457 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
458 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
459 * mtd
->numeraseregions
, GFP_KERNEL
);
460 if (!mtd
->eraseregions
) {
461 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
465 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
466 unsigned long ernum
, ersize
;
467 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
468 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
470 if (mtd
->erasesize
< ersize
) {
471 mtd
->erasesize
= ersize
;
473 for (j
=0; j
<cfi
->numchips
; j
++) {
474 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
475 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
476 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
477 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].lockmap
= kmalloc(ernum
/ 8 + 1, GFP_KERNEL
);
479 offset
+= (ersize
* ernum
);
482 if (offset
!= devsize
) {
484 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
488 for (i
=0; i
<mtd
->numeraseregions
;i
++){
489 printk(KERN_DEBUG
"erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
490 i
,mtd
->eraseregions
[i
].offset
,
491 mtd
->eraseregions
[i
].erasesize
,
492 mtd
->eraseregions
[i
].numblocks
);
495 #ifdef CONFIG_MTD_OTP
496 mtd
->read_fact_prot_reg
= cfi_intelext_read_fact_prot_reg
;
497 mtd
->read_user_prot_reg
= cfi_intelext_read_user_prot_reg
;
498 mtd
->write_user_prot_reg
= cfi_intelext_write_user_prot_reg
;
499 mtd
->lock_user_prot_reg
= cfi_intelext_lock_user_prot_reg
;
500 mtd
->get_fact_prot_info
= cfi_intelext_get_fact_prot_info
;
501 mtd
->get_user_prot_info
= cfi_intelext_get_user_prot_info
;
504 /* This function has the potential to distort the reality
505 a bit and therefore should be called last. */
506 if (cfi_intelext_partition_fixup(mtd
, &cfi
) != 0)
509 __module_get(THIS_MODULE
);
510 register_reboot_notifier(&mtd
->reboot_notifier
);
515 kfree(mtd
->eraseregions
);
518 kfree(cfi
->cmdset_priv
);
522 static int cfi_intelext_partition_fixup(struct mtd_info
*mtd
,
523 struct cfi_private
**pcfi
)
525 struct map_info
*map
= mtd
->priv
;
526 struct cfi_private
*cfi
= *pcfi
;
527 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
530 * Probing of multi-partition flash chips.
532 * To support multiple partitions when available, we simply arrange
533 * for each of them to have their own flchip structure even if they
534 * are on the same physical chip. This means completely recreating
535 * a new cfi_private structure right here which is a blatent code
536 * layering violation, but this is still the least intrusive
537 * arrangement at this point. This can be rearranged in the future
538 * if someone feels motivated enough. --nico
540 if (extp
&& extp
->MajorVersion
== '1' && extp
->MinorVersion
>= '3'
541 && extp
->FeatureSupport
& (1 << 9)) {
542 struct cfi_private
*newcfi
;
544 struct flchip_shared
*shared
;
545 int offs
, numregions
, numparts
, partshift
, numvirtchips
, i
, j
;
547 /* Protection Register info */
548 offs
= (extp
->NumProtectionFields
- 1) *
549 sizeof(struct cfi_intelext_otpinfo
);
551 /* Burst Read info */
552 offs
+= extp
->extra
[offs
+1]+2;
554 /* Number of partition regions */
555 numregions
= extp
->extra
[offs
];
558 /* skip the sizeof(partregion) field in CFI 1.4 */
559 if (extp
->MinorVersion
>= '4')
562 /* Number of hardware partitions */
564 for (i
= 0; i
< numregions
; i
++) {
565 struct cfi_intelext_regioninfo
*rinfo
;
566 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[offs
];
567 numparts
+= rinfo
->NumIdentPartitions
;
568 offs
+= sizeof(*rinfo
)
569 + (rinfo
->NumBlockTypes
- 1) *
570 sizeof(struct cfi_intelext_blockinfo
);
573 /* Programming Region info */
574 if (extp
->MinorVersion
>= '4') {
575 struct cfi_intelext_programming_regioninfo
*prinfo
;
576 prinfo
= (struct cfi_intelext_programming_regioninfo
*)&extp
->extra
[offs
];
577 mtd
->writesize
= cfi
->interleave
<< prinfo
->ProgRegShift
;
578 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
579 printk(KERN_DEBUG
"%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
580 map
->name
, mtd
->writesize
,
581 cfi
->interleave
* prinfo
->ControlValid
,
582 cfi
->interleave
* prinfo
->ControlInvalid
);
586 * All functions below currently rely on all chips having
587 * the same geometry so we'll just assume that all hardware
588 * partitions are of the same size too.
590 partshift
= cfi
->chipshift
- __ffs(numparts
);
592 if ((1 << partshift
) < mtd
->erasesize
) {
594 "%s: bad number of hw partitions (%d)\n",
595 __FUNCTION__
, numparts
);
599 numvirtchips
= cfi
->numchips
* numparts
;
600 newcfi
= kmalloc(sizeof(struct cfi_private
) + numvirtchips
* sizeof(struct flchip
), GFP_KERNEL
);
603 shared
= kmalloc(sizeof(struct flchip_shared
) * cfi
->numchips
, GFP_KERNEL
);
608 memcpy(newcfi
, cfi
, sizeof(struct cfi_private
));
609 newcfi
->numchips
= numvirtchips
;
610 newcfi
->chipshift
= partshift
;
612 chip
= &newcfi
->chips
[0];
613 for (i
= 0; i
< cfi
->numchips
; i
++) {
614 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
615 spin_lock_init(&shared
[i
].lock
);
616 for (j
= 0; j
< numparts
; j
++) {
617 *chip
= cfi
->chips
[i
];
618 chip
->start
+= j
<< partshift
;
619 chip
->priv
= &shared
[i
];
620 /* those should be reset too since
621 they create memory references. */
622 init_waitqueue_head(&chip
->wq
);
623 spin_lock_init(&chip
->_spinlock
);
624 chip
->mutex
= &chip
->_spinlock
;
629 printk(KERN_DEBUG
"%s: %d set(s) of %d interleaved chips "
630 "--> %d partitions of %d KiB\n",
631 map
->name
, cfi
->numchips
, cfi
->interleave
,
632 newcfi
->numchips
, 1<<(newcfi
->chipshift
-10));
634 map
->fldrv_priv
= newcfi
;
643 * *********** CHIP ACCESS FUNCTIONS ***********
645 static int chip_ready (struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
647 DECLARE_WAITQUEUE(wait
, current
);
648 struct cfi_private
*cfi
= map
->fldrv_priv
;
649 map_word status
, status_OK
= CMD(0x80), status_PWS
= CMD(0x01);
650 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
651 unsigned long timeo
= jiffies
+ HZ
;
653 switch (chip
->state
) {
657 status
= map_read(map
, adr
);
658 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
661 /* At this point we're fine with write operations
662 in other partitions as they don't conflict. */
663 if (chip
->priv
&& map_word_andequal(map
, status
, status_PWS
, status_PWS
))
666 spin_unlock(chip
->mutex
);
668 spin_lock(chip
->mutex
);
669 /* Someone else might have been playing with it. */
680 !(cfip
->FeatureSupport
& 2) ||
681 !(mode
== FL_READY
|| mode
== FL_POINT
||
682 (mode
== FL_WRITING
&& (cfip
->SuspendCmdSupport
& 1))))
687 map_write(map
, CMD(0xB0), adr
);
689 /* If the flash has finished erasing, then 'erase suspend'
690 * appears to make some (28F320) flash devices switch to
691 * 'read' mode. Make sure that we switch to 'read status'
692 * mode so we get the right data. --rmk
694 map_write(map
, CMD(0x70), adr
);
695 chip
->oldstate
= FL_ERASING
;
696 chip
->state
= FL_ERASE_SUSPENDING
;
697 chip
->erase_suspended
= 1;
699 status
= map_read(map
, adr
);
700 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
703 if (time_after(jiffies
, timeo
)) {
704 /* Urgh. Resume and pretend we weren't here. */
705 map_write(map
, CMD(0xd0), adr
);
706 /* Make sure we're in 'read status' mode if it had finished */
707 map_write(map
, CMD(0x70), adr
);
708 chip
->state
= FL_ERASING
;
709 chip
->oldstate
= FL_READY
;
710 printk(KERN_ERR
"%s: Chip not ready after erase "
711 "suspended: status = 0x%lx\n", map
->name
, status
.x
[0]);
715 spin_unlock(chip
->mutex
);
717 spin_lock(chip
->mutex
);
718 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
719 So we can just loop here. */
721 chip
->state
= FL_STATUS
;
724 case FL_XIP_WHILE_ERASING
:
725 if (mode
!= FL_READY
&& mode
!= FL_POINT
&&
726 (mode
!= FL_WRITING
|| !cfip
|| !(cfip
->SuspendCmdSupport
&1)))
728 chip
->oldstate
= chip
->state
;
729 chip
->state
= FL_READY
;
733 /* Only if there's no operation suspended... */
734 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
738 /* The machine is rebooting now,so no one can get chip anymore */
742 set_current_state(TASK_UNINTERRUPTIBLE
);
743 add_wait_queue(&chip
->wq
, &wait
);
744 spin_unlock(chip
->mutex
);
746 remove_wait_queue(&chip
->wq
, &wait
);
747 spin_lock(chip
->mutex
);
752 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
757 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
758 || mode
== FL_OTP_WRITE
|| mode
== FL_SHUTDOWN
)) {
760 * OK. We have possibility for contention on the write/erase
761 * operations which are global to the real chip and not per
762 * partition. So let's fight it over in the partition which
763 * currently has authority on the operation.
765 * The rules are as follows:
767 * - any write operation must own shared->writing.
769 * - any erase operation must own _both_ shared->writing and
772 * - contention arbitration is handled in the owner's context.
774 * The 'shared' struct can be read and/or written only when
777 struct flchip_shared
*shared
= chip
->priv
;
778 struct flchip
*contender
;
779 spin_lock(&shared
->lock
);
780 contender
= shared
->writing
;
781 if (contender
&& contender
!= chip
) {
783 * The engine to perform desired operation on this
784 * partition is already in use by someone else.
785 * Let's fight over it in the context of the chip
786 * currently using it. If it is possible to suspend,
787 * that other partition will do just that, otherwise
788 * it'll happily send us to sleep. In any case, when
789 * get_chip returns success we're clear to go ahead.
791 ret
= spin_trylock(contender
->mutex
);
792 spin_unlock(&shared
->lock
);
795 spin_unlock(chip
->mutex
);
796 ret
= chip_ready(map
, contender
, contender
->start
, mode
);
797 spin_lock(chip
->mutex
);
799 if (ret
== -EAGAIN
) {
800 spin_unlock(contender
->mutex
);
804 spin_unlock(contender
->mutex
);
807 spin_lock(&shared
->lock
);
808 spin_unlock(contender
->mutex
);
812 shared
->writing
= chip
;
813 if (mode
== FL_ERASING
)
814 shared
->erasing
= chip
;
815 spin_unlock(&shared
->lock
);
817 ret
= chip_ready(map
, chip
, adr
, mode
);
824 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
826 struct cfi_private
*cfi
= map
->fldrv_priv
;
829 struct flchip_shared
*shared
= chip
->priv
;
830 spin_lock(&shared
->lock
);
831 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
832 /* We own the ability to write, but we're done */
833 shared
->writing
= shared
->erasing
;
834 if (shared
->writing
&& shared
->writing
!= chip
) {
835 /* give back ownership to who we loaned it from */
836 struct flchip
*loaner
= shared
->writing
;
837 spin_lock(loaner
->mutex
);
838 spin_unlock(&shared
->lock
);
839 spin_unlock(chip
->mutex
);
840 put_chip(map
, loaner
, loaner
->start
);
841 spin_lock(chip
->mutex
);
842 spin_unlock(loaner
->mutex
);
846 shared
->erasing
= NULL
;
847 shared
->writing
= NULL
;
848 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
850 * We own the ability to erase without the ability
851 * to write, which means the erase was suspended
852 * and some other partition is currently writing.
853 * Don't let the switch below mess things up since
854 * we don't have ownership to resume anything.
856 spin_unlock(&shared
->lock
);
860 spin_unlock(&shared
->lock
);
863 switch(chip
->oldstate
) {
865 chip
->state
= chip
->oldstate
;
866 /* What if one interleaved chip has finished and the
867 other hasn't? The old code would leave the finished
868 one in READY mode. That's bad, and caused -EROFS
869 errors to be returned from do_erase_oneblock because
870 that's the only bit it checked for at the time.
871 As the state machine appears to explicitly allow
872 sending the 0x70 (Read Status) command to an erasing
873 chip and expecting it to be ignored, that's what we
875 map_write(map
, CMD(0xd0), adr
);
876 map_write(map
, CMD(0x70), adr
);
877 chip
->oldstate
= FL_READY
;
878 chip
->state
= FL_ERASING
;
881 case FL_XIP_WHILE_ERASING
:
882 chip
->state
= chip
->oldstate
;
883 chip
->oldstate
= FL_READY
;
889 /* We should really make set_vpp() count, rather than doing this */
893 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!!\n", map
->name
, chip
->oldstate
);
898 #ifdef CONFIG_MTD_XIP
901 * No interrupt what so ever can be serviced while the flash isn't in array
902 * mode. This is ensured by the xip_disable() and xip_enable() functions
903 * enclosing any code path where the flash is known not to be in array mode.
904 * And within a XIP disabled code path, only functions marked with __xipram
905 * may be called and nothing else (it's a good thing to inspect generated
906 * assembly to make sure inline functions were actually inlined and that gcc
907 * didn't emit calls to its own support functions). Also configuring MTD CFI
908 * support to a single buswidth and a single interleave is also recommended.
911 static void xip_disable(struct map_info
*map
, struct flchip
*chip
,
914 /* TODO: chips with no XIP use should ignore and return */
915 (void) map_read(map
, adr
); /* ensure mmu mapping is up to date */
919 static void __xipram
xip_enable(struct map_info
*map
, struct flchip
*chip
,
922 struct cfi_private
*cfi
= map
->fldrv_priv
;
923 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
924 map_write(map
, CMD(0xff), adr
);
925 chip
->state
= FL_READY
;
927 (void) map_read(map
, adr
);
933 * When a delay is required for the flash operation to complete, the
934 * xip_wait_for_operation() function is polling for both the given timeout
935 * and pending (but still masked) hardware interrupts. Whenever there is an
936 * interrupt pending then the flash erase or write operation is suspended,
937 * array mode restored and interrupts unmasked. Task scheduling might also
938 * happen at that point. The CPU eventually returns from the interrupt or
939 * the call to schedule() and the suspended flash operation is resumed for
940 * the remaining of the delay period.
942 * Warning: this function _will_ fool interrupt latency tracing tools.
945 static int __xipram
xip_wait_for_operation(
946 struct map_info
*map
, struct flchip
*chip
,
947 unsigned long adr
, unsigned int chip_op_time
)
949 struct cfi_private
*cfi
= map
->fldrv_priv
;
950 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
951 map_word status
, OK
= CMD(0x80);
952 unsigned long usec
, suspended
, start
, done
;
953 flstate_t oldstate
, newstate
;
955 start
= xip_currtime();
956 usec
= chip_op_time
* 8;
963 if (xip_irqpending() && cfip
&&
964 ((chip
->state
== FL_ERASING
&& (cfip
->FeatureSupport
&2)) ||
965 (chip
->state
== FL_WRITING
&& (cfip
->FeatureSupport
&4))) &&
966 (cfi_interleave_is_1(cfi
) || chip
->oldstate
== FL_READY
)) {
968 * Let's suspend the erase or write operation when
969 * supported. Note that we currently don't try to
970 * suspend interleaved chips if there is already
971 * another operation suspended (imagine what happens
972 * when one chip was already done with the current
973 * operation while another chip suspended it, then
974 * we resume the whole thing at once). Yes, it
978 map_write(map
, CMD(0xb0), adr
);
979 map_write(map
, CMD(0x70), adr
);
980 suspended
= xip_currtime();
982 if (xip_elapsed_since(suspended
) > 100000) {
984 * The chip doesn't want to suspend
985 * after waiting for 100 msecs.
986 * This is a critical error but there
987 * is not much we can do here.
991 status
= map_read(map
, adr
);
992 } while (!map_word_andequal(map
, status
, OK
, OK
));
994 /* Suspend succeeded */
995 oldstate
= chip
->state
;
996 if (oldstate
== FL_ERASING
) {
997 if (!map_word_bitsset(map
, status
, CMD(0x40)))
999 newstate
= FL_XIP_WHILE_ERASING
;
1000 chip
->erase_suspended
= 1;
1002 if (!map_word_bitsset(map
, status
, CMD(0x04)))
1004 newstate
= FL_XIP_WHILE_WRITING
;
1005 chip
->write_suspended
= 1;
1007 chip
->state
= newstate
;
1008 map_write(map
, CMD(0xff), adr
);
1009 (void) map_read(map
, adr
);
1010 asm volatile (".rep 8; nop; .endr");
1012 spin_unlock(chip
->mutex
);
1013 asm volatile (".rep 8; nop; .endr");
1017 * We're back. However someone else might have
1018 * decided to go write to the chip if we are in
1019 * a suspended erase state. If so let's wait
1022 spin_lock(chip
->mutex
);
1023 while (chip
->state
!= newstate
) {
1024 DECLARE_WAITQUEUE(wait
, current
);
1025 set_current_state(TASK_UNINTERRUPTIBLE
);
1026 add_wait_queue(&chip
->wq
, &wait
);
1027 spin_unlock(chip
->mutex
);
1029 remove_wait_queue(&chip
->wq
, &wait
);
1030 spin_lock(chip
->mutex
);
1032 /* Disallow XIP again */
1033 local_irq_disable();
1035 /* Resume the write or erase operation */
1036 map_write(map
, CMD(0xd0), adr
);
1037 map_write(map
, CMD(0x70), adr
);
1038 chip
->state
= oldstate
;
1039 start
= xip_currtime();
1040 } else if (usec
>= 1000000/HZ
) {
1042 * Try to save on CPU power when waiting delay
1043 * is at least a system timer tick period.
1044 * No need to be extremely accurate here.
1048 status
= map_read(map
, adr
);
1049 done
= xip_elapsed_since(start
);
1050 } while (!map_word_andequal(map
, status
, OK
, OK
)
1053 return (done
>= usec
) ? -ETIME
: 0;
1057 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1058 * the flash is actively programming or erasing since we have to poll for
1059 * the operation to complete anyway. We can't do that in a generic way with
1060 * a XIP setup so do it before the actual flash operation in this case
1061 * and stub it out from INVAL_CACHE_AND_WAIT.
1063 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1064 INVALIDATE_CACHED_RANGE(map, from, size)
1066 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1067 xip_wait_for_operation(map, chip, cmd_adr, usec)
1071 #define xip_disable(map, chip, adr)
1072 #define xip_enable(map, chip, adr)
1073 #define XIP_INVAL_CACHED_RANGE(x...)
1074 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1076 static int inval_cache_and_wait_for_operation(
1077 struct map_info
*map
, struct flchip
*chip
,
1078 unsigned long cmd_adr
, unsigned long inval_adr
, int inval_len
,
1079 unsigned int chip_op_time
)
1081 struct cfi_private
*cfi
= map
->fldrv_priv
;
1082 map_word status
, status_OK
= CMD(0x80);
1083 int chip_state
= chip
->state
;
1084 unsigned int timeo
, sleep_time
;
1086 spin_unlock(chip
->mutex
);
1088 INVALIDATE_CACHED_RANGE(map
, inval_adr
, inval_len
);
1089 spin_lock(chip
->mutex
);
1091 /* set our timeout to 8 times the expected delay */
1092 timeo
= chip_op_time
* 8;
1095 sleep_time
= chip_op_time
/ 2;
1098 status
= map_read(map
, cmd_adr
);
1099 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1103 map_write(map
, CMD(0x70), cmd_adr
);
1104 chip
->state
= FL_STATUS
;
1108 /* OK Still waiting. Drop the lock, wait a while and retry. */
1109 spin_unlock(chip
->mutex
);
1110 if (sleep_time
>= 1000000/HZ
) {
1112 * Half of the normal delay still remaining
1113 * can be performed with a sleeping delay instead
1116 msleep(sleep_time
/1000);
1117 timeo
-= sleep_time
;
1118 sleep_time
= 1000000/HZ
;
1124 spin_lock(chip
->mutex
);
1126 while (chip
->state
!= chip_state
) {
1127 /* Someone's suspended the operation: sleep */
1128 DECLARE_WAITQUEUE(wait
, current
);
1129 set_current_state(TASK_UNINTERRUPTIBLE
);
1130 add_wait_queue(&chip
->wq
, &wait
);
1131 spin_unlock(chip
->mutex
);
1133 remove_wait_queue(&chip
->wq
, &wait
);
1134 spin_lock(chip
->mutex
);
1138 /* Done and happy. */
1139 chip
->state
= FL_STATUS
;
1145 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1146 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1149 static int do_point_onechip (struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
)
1151 unsigned long cmd_addr
;
1152 struct cfi_private
*cfi
= map
->fldrv_priv
;
1157 /* Ensure cmd read/writes are aligned. */
1158 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1160 spin_lock(chip
->mutex
);
1162 ret
= get_chip(map
, chip
, cmd_addr
, FL_POINT
);
1165 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
)
1166 map_write(map
, CMD(0xff), cmd_addr
);
1168 chip
->state
= FL_POINT
;
1169 chip
->ref_point_counter
++;
1171 spin_unlock(chip
->mutex
);
1176 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
**mtdbuf
)
1178 struct map_info
*map
= mtd
->priv
;
1179 struct cfi_private
*cfi
= map
->fldrv_priv
;
1180 unsigned long ofs
, last_end
= 0;
1184 if (!map
->virt
|| (from
+ len
> mtd
->size
))
1187 /* Now lock the chip(s) to POINT state */
1189 /* ofs: offset within the first chip that the first read should start */
1190 chipnum
= (from
>> cfi
->chipshift
);
1191 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1193 *mtdbuf
= (void *)map
->virt
+ cfi
->chips
[chipnum
].start
+ ofs
;
1197 unsigned long thislen
;
1199 if (chipnum
>= cfi
->numchips
)
1202 /* We cannot point across chips that are virtually disjoint */
1204 last_end
= cfi
->chips
[chipnum
].start
;
1205 else if (cfi
->chips
[chipnum
].start
!= last_end
)
1208 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1209 thislen
= (1<<cfi
->chipshift
) - ofs
;
1213 ret
= do_point_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
);
1221 last_end
+= 1 << cfi
->chipshift
;
1227 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
, size_t len
)
1229 struct map_info
*map
= mtd
->priv
;
1230 struct cfi_private
*cfi
= map
->fldrv_priv
;
1234 /* Now unlock the chip(s) POINT state */
1236 /* ofs: offset within the first chip that the first read should start */
1237 chipnum
= (from
>> cfi
->chipshift
);
1238 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1241 unsigned long thislen
;
1242 struct flchip
*chip
;
1244 chip
= &cfi
->chips
[chipnum
];
1245 if (chipnum
>= cfi
->numchips
)
1248 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1249 thislen
= (1<<cfi
->chipshift
) - ofs
;
1253 spin_lock(chip
->mutex
);
1254 if (chip
->state
== FL_POINT
) {
1255 chip
->ref_point_counter
--;
1256 if(chip
->ref_point_counter
== 0)
1257 chip
->state
= FL_READY
;
1259 printk(KERN_ERR
"%s: Warning: unpoint called on non pointed region\n", map
->name
); /* Should this give an error? */
1261 put_chip(map
, chip
, chip
->start
);
1262 spin_unlock(chip
->mutex
);
1270 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
1272 unsigned long cmd_addr
;
1273 struct cfi_private
*cfi
= map
->fldrv_priv
;
1278 /* Ensure cmd read/writes are aligned. */
1279 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1281 spin_lock(chip
->mutex
);
1282 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
1284 spin_unlock(chip
->mutex
);
1288 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
1289 map_write(map
, CMD(0xff), cmd_addr
);
1291 chip
->state
= FL_READY
;
1294 map_copy_from(map
, buf
, adr
, len
);
1296 put_chip(map
, chip
, cmd_addr
);
1298 spin_unlock(chip
->mutex
);
1302 static int cfi_intelext_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1304 struct map_info
*map
= mtd
->priv
;
1305 struct cfi_private
*cfi
= map
->fldrv_priv
;
1310 /* ofs: offset within the first chip that the first read should start */
1311 chipnum
= (from
>> cfi
->chipshift
);
1312 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1317 unsigned long thislen
;
1319 if (chipnum
>= cfi
->numchips
)
1322 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1323 thislen
= (1<<cfi
->chipshift
) - ofs
;
1327 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1341 static int __xipram
do_write_oneword(struct map_info
*map
, struct flchip
*chip
,
1342 unsigned long adr
, map_word datum
, int mode
)
1344 struct cfi_private
*cfi
= map
->fldrv_priv
;
1345 map_word status
, write_cmd
;
1352 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0x40) : CMD(0x41);
1355 write_cmd
= CMD(0xc0);
1361 spin_lock(chip
->mutex
);
1362 ret
= get_chip(map
, chip
, adr
, mode
);
1364 spin_unlock(chip
->mutex
);
1368 XIP_INVAL_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1370 xip_disable(map
, chip
, adr
);
1371 map_write(map
, write_cmd
, adr
);
1372 map_write(map
, datum
, adr
);
1375 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1376 adr
, map_bankwidth(map
),
1377 chip
->word_write_time
);
1379 xip_enable(map
, chip
, adr
);
1380 printk(KERN_ERR
"%s: word write error (status timeout)\n", map
->name
);
1384 /* check for errors */
1385 status
= map_read(map
, adr
);
1386 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1387 unsigned long chipstatus
= MERGESTATUS(status
);
1390 map_write(map
, CMD(0x50), adr
);
1391 map_write(map
, CMD(0x70), adr
);
1392 xip_enable(map
, chip
, adr
);
1394 if (chipstatus
& 0x02) {
1396 } else if (chipstatus
& 0x08) {
1397 printk(KERN_ERR
"%s: word write error (bad VPP)\n", map
->name
);
1400 printk(KERN_ERR
"%s: word write error (status 0x%lx)\n", map
->name
, chipstatus
);
1407 xip_enable(map
, chip
, adr
);
1408 out
: put_chip(map
, chip
, adr
);
1409 spin_unlock(chip
->mutex
);
1414 static int cfi_intelext_write_words (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
1416 struct map_info
*map
= mtd
->priv
;
1417 struct cfi_private
*cfi
= map
->fldrv_priv
;
1426 chipnum
= to
>> cfi
->chipshift
;
1427 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1429 /* If it's not bus-aligned, do the first byte write */
1430 if (ofs
& (map_bankwidth(map
)-1)) {
1431 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1432 int gap
= ofs
- bus_ofs
;
1436 n
= min_t(int, len
, map_bankwidth(map
)-gap
);
1437 datum
= map_word_ff(map
);
1438 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
1440 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1441 bus_ofs
, datum
, FL_WRITING
);
1450 if (ofs
>> cfi
->chipshift
) {
1453 if (chipnum
== cfi
->numchips
)
1458 while(len
>= map_bankwidth(map
)) {
1459 map_word datum
= map_word_load(map
, buf
);
1461 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1462 ofs
, datum
, FL_WRITING
);
1466 ofs
+= map_bankwidth(map
);
1467 buf
+= map_bankwidth(map
);
1468 (*retlen
) += map_bankwidth(map
);
1469 len
-= map_bankwidth(map
);
1471 if (ofs
>> cfi
->chipshift
) {
1474 if (chipnum
== cfi
->numchips
)
1479 if (len
& (map_bankwidth(map
)-1)) {
1482 datum
= map_word_ff(map
);
1483 datum
= map_word_load_partial(map
, datum
, buf
, 0, len
);
1485 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1486 ofs
, datum
, FL_WRITING
);
1497 static int __xipram
do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1498 unsigned long adr
, const struct kvec
**pvec
,
1499 unsigned long *pvec_seek
, int len
)
1501 struct cfi_private
*cfi
= map
->fldrv_priv
;
1502 map_word status
, write_cmd
, datum
;
1503 unsigned long cmd_adr
;
1504 int ret
, wbufsize
, word_gap
, words
;
1505 const struct kvec
*vec
;
1506 unsigned long vec_seek
;
1507 unsigned long initial_adr
;
1508 int initial_len
= len
;
1510 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1513 cmd_adr
= adr
& ~(wbufsize
-1);
1515 /* Let's determine this according to the interleave only once */
1516 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0xe8) : CMD(0xe9);
1518 spin_lock(chip
->mutex
);
1519 ret
= get_chip(map
, chip
, cmd_adr
, FL_WRITING
);
1521 spin_unlock(chip
->mutex
);
1525 XIP_INVAL_CACHED_RANGE(map
, initial_adr
, initial_len
);
1527 xip_disable(map
, chip
, cmd_adr
);
1529 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1530 [...], the device will not accept any more Write to Buffer commands".
1531 So we must check here and reset those bits if they're set. Otherwise
1532 we're just pissing in the wind */
1533 if (chip
->state
!= FL_STATUS
) {
1534 map_write(map
, CMD(0x70), cmd_adr
);
1535 chip
->state
= FL_STATUS
;
1537 status
= map_read(map
, cmd_adr
);
1538 if (map_word_bitsset(map
, status
, CMD(0x30))) {
1539 xip_enable(map
, chip
, cmd_adr
);
1540 printk(KERN_WARNING
"SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status
.x
[0]);
1541 xip_disable(map
, chip
, cmd_adr
);
1542 map_write(map
, CMD(0x50), cmd_adr
);
1543 map_write(map
, CMD(0x70), cmd_adr
);
1546 chip
->state
= FL_WRITING_TO_BUFFER
;
1547 map_write(map
, write_cmd
, cmd_adr
);
1548 ret
= WAIT_TIMEOUT(map
, chip
, cmd_adr
, 0);
1550 /* Argh. Not ready for write to buffer */
1551 map_word Xstatus
= map_read(map
, cmd_adr
);
1552 map_write(map
, CMD(0x70), cmd_adr
);
1553 chip
->state
= FL_STATUS
;
1554 status
= map_read(map
, cmd_adr
);
1555 map_write(map
, CMD(0x50), cmd_adr
);
1556 map_write(map
, CMD(0x70), cmd_adr
);
1557 xip_enable(map
, chip
, cmd_adr
);
1558 printk(KERN_ERR
"%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1559 map
->name
, Xstatus
.x
[0], status
.x
[0]);
1563 /* Figure out the number of words to write */
1564 word_gap
= (-adr
& (map_bankwidth(map
)-1));
1565 words
= (len
- word_gap
+ map_bankwidth(map
) - 1) / map_bankwidth(map
);
1569 word_gap
= map_bankwidth(map
) - word_gap
;
1571 datum
= map_word_ff(map
);
1574 /* Write length of data to come */
1575 map_write(map
, CMD(words
), cmd_adr
);
1579 vec_seek
= *pvec_seek
;
1581 int n
= map_bankwidth(map
) - word_gap
;
1582 if (n
> vec
->iov_len
- vec_seek
)
1583 n
= vec
->iov_len
- vec_seek
;
1587 if (!word_gap
&& len
< map_bankwidth(map
))
1588 datum
= map_word_ff(map
);
1590 datum
= map_word_load_partial(map
, datum
,
1591 vec
->iov_base
+ vec_seek
,
1596 if (!len
|| word_gap
== map_bankwidth(map
)) {
1597 map_write(map
, datum
, adr
);
1598 adr
+= map_bankwidth(map
);
1603 if (vec_seek
== vec
->iov_len
) {
1609 *pvec_seek
= vec_seek
;
1612 map_write(map
, CMD(0xd0), cmd_adr
);
1613 chip
->state
= FL_WRITING
;
1615 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, cmd_adr
,
1616 initial_adr
, initial_len
,
1617 chip
->buffer_write_time
);
1619 map_write(map
, CMD(0x70), cmd_adr
);
1620 chip
->state
= FL_STATUS
;
1621 xip_enable(map
, chip
, cmd_adr
);
1622 printk(KERN_ERR
"%s: buffer write error (status timeout)\n", map
->name
);
1626 /* check for errors */
1627 status
= map_read(map
, cmd_adr
);
1628 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1629 unsigned long chipstatus
= MERGESTATUS(status
);
1632 map_write(map
, CMD(0x50), cmd_adr
);
1633 map_write(map
, CMD(0x70), cmd_adr
);
1634 xip_enable(map
, chip
, cmd_adr
);
1636 if (chipstatus
& 0x02) {
1638 } else if (chipstatus
& 0x08) {
1639 printk(KERN_ERR
"%s: buffer write error (bad VPP)\n", map
->name
);
1642 printk(KERN_ERR
"%s: buffer write error (status 0x%lx)\n", map
->name
, chipstatus
);
1649 xip_enable(map
, chip
, cmd_adr
);
1650 out
: put_chip(map
, chip
, cmd_adr
);
1651 spin_unlock(chip
->mutex
);
1655 static int cfi_intelext_writev (struct mtd_info
*mtd
, const struct kvec
*vecs
,
1656 unsigned long count
, loff_t to
, size_t *retlen
)
1658 struct map_info
*map
= mtd
->priv
;
1659 struct cfi_private
*cfi
= map
->fldrv_priv
;
1660 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1663 unsigned long ofs
, vec_seek
, i
;
1666 for (i
= 0; i
< count
; i
++)
1667 len
+= vecs
[i
].iov_len
;
1673 chipnum
= to
>> cfi
->chipshift
;
1674 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1678 /* We must not cross write block boundaries */
1679 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1683 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1684 ofs
, &vecs
, &vec_seek
, size
);
1692 if (ofs
>> cfi
->chipshift
) {
1695 if (chipnum
== cfi
->numchips
)
1699 /* Be nice and reschedule with the chip in a usable state for other
1708 static int cfi_intelext_write_buffers (struct mtd_info
*mtd
, loff_t to
,
1709 size_t len
, size_t *retlen
, const u_char
*buf
)
1713 vec
.iov_base
= (void *) buf
;
1716 return cfi_intelext_writev(mtd
, &vec
, 1, to
, retlen
);
1719 static int __xipram
do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
,
1720 unsigned long adr
, int len
, void *thunk
)
1722 struct cfi_private
*cfi
= map
->fldrv_priv
;
1730 spin_lock(chip
->mutex
);
1731 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1733 spin_unlock(chip
->mutex
);
1737 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1739 xip_disable(map
, chip
, adr
);
1741 /* Clear the status register first */
1742 map_write(map
, CMD(0x50), adr
);
1745 map_write(map
, CMD(0x20), adr
);
1746 map_write(map
, CMD(0xD0), adr
);
1747 chip
->state
= FL_ERASING
;
1748 chip
->erase_suspended
= 0;
1750 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1754 map_write(map
, CMD(0x70), adr
);
1755 chip
->state
= FL_STATUS
;
1756 xip_enable(map
, chip
, adr
);
1757 printk(KERN_ERR
"%s: block erase error: (status timeout)\n", map
->name
);
1761 /* We've broken this before. It doesn't hurt to be safe */
1762 map_write(map
, CMD(0x70), adr
);
1763 chip
->state
= FL_STATUS
;
1764 status
= map_read(map
, adr
);
1766 /* check for errors */
1767 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
1768 unsigned long chipstatus
= MERGESTATUS(status
);
1770 /* Reset the error bits */
1771 map_write(map
, CMD(0x50), adr
);
1772 map_write(map
, CMD(0x70), adr
);
1773 xip_enable(map
, chip
, adr
);
1775 if ((chipstatus
& 0x30) == 0x30) {
1776 printk(KERN_ERR
"%s: block erase error: (bad command sequence, status 0x%lx)\n", map
->name
, chipstatus
);
1778 } else if (chipstatus
& 0x02) {
1779 /* Protection bit set */
1781 } else if (chipstatus
& 0x8) {
1783 printk(KERN_ERR
"%s: block erase error: (bad VPP)\n", map
->name
);
1785 } else if (chipstatus
& 0x20 && retries
--) {
1786 printk(KERN_DEBUG
"block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr
, chipstatus
);
1787 put_chip(map
, chip
, adr
);
1788 spin_unlock(chip
->mutex
);
1791 printk(KERN_ERR
"%s: block erase failed at 0x%08lx (status 0x%lx)\n", map
->name
, adr
, chipstatus
);
1798 xip_enable(map
, chip
, adr
);
1799 out
: put_chip(map
, chip
, adr
);
1800 spin_unlock(chip
->mutex
);
1804 static int cfi_intelext_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1806 unsigned long ofs
, len
;
1812 ret
= cfi_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1816 instr
->state
= MTD_ERASE_DONE
;
1817 mtd_erase_callback(instr
);
1822 static void cfi_intelext_sync (struct mtd_info
*mtd
)
1824 struct map_info
*map
= mtd
->priv
;
1825 struct cfi_private
*cfi
= map
->fldrv_priv
;
1827 struct flchip
*chip
;
1830 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1831 chip
= &cfi
->chips
[i
];
1833 spin_lock(chip
->mutex
);
1834 ret
= get_chip(map
, chip
, chip
->start
, FL_SYNCING
);
1837 chip
->oldstate
= chip
->state
;
1838 chip
->state
= FL_SYNCING
;
1839 /* No need to wake_up() on this state change -
1840 * as the whole point is that nobody can do anything
1841 * with the chip now anyway.
1844 spin_unlock(chip
->mutex
);
1847 /* Unlock the chips again */
1849 for (i
--; i
>=0; i
--) {
1850 chip
= &cfi
->chips
[i
];
1852 spin_lock(chip
->mutex
);
1854 if (chip
->state
== FL_SYNCING
) {
1855 chip
->state
= chip
->oldstate
;
1856 chip
->oldstate
= FL_READY
;
1859 spin_unlock(chip
->mutex
);
1863 static int __xipram
do_getlockstatus_oneblock(struct map_info
*map
,
1864 struct flchip
*chip
,
1866 int len
, void *thunk
)
1868 struct cfi_private
*cfi
= map
->fldrv_priv
;
1869 int status
, ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1872 xip_disable(map
, chip
, adr
+(2*ofs_factor
));
1873 map_write(map
, CMD(0x90), adr
+(2*ofs_factor
));
1874 chip
->state
= FL_JEDEC_QUERY
;
1875 status
= cfi_read_query(map
, adr
+(2*ofs_factor
));
1876 xip_enable(map
, chip
, 0);
1880 #ifdef DEBUG_LOCK_BITS
1881 static int __xipram
do_printlockstatus_oneblock(struct map_info
*map
,
1882 struct flchip
*chip
,
1884 int len
, void *thunk
)
1886 printk(KERN_DEBUG
"block status register for 0x%08lx is %x\n",
1887 adr
, do_getlockstatus_oneblock(map
, chip
, adr
, len
, thunk
));
1892 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1893 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1895 static int __xipram
do_xxlock_oneblock(struct map_info
*map
, struct flchip
*chip
,
1896 unsigned long adr
, int len
, void *thunk
)
1898 struct cfi_private
*cfi
= map
->fldrv_priv
;
1899 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
1905 spin_lock(chip
->mutex
);
1906 ret
= get_chip(map
, chip
, adr
, FL_LOCKING
);
1908 spin_unlock(chip
->mutex
);
1913 xip_disable(map
, chip
, adr
);
1915 map_write(map
, CMD(0x60), adr
);
1916 if (thunk
== DO_XXLOCK_ONEBLOCK_LOCK
) {
1917 map_write(map
, CMD(0x01), adr
);
1918 chip
->state
= FL_LOCKING
;
1919 } else if (thunk
== DO_XXLOCK_ONEBLOCK_UNLOCK
) {
1920 map_write(map
, CMD(0xD0), adr
);
1921 chip
->state
= FL_UNLOCKING
;
1926 * If Instant Individual Block Locking supported then no need
1929 udelay
= (!extp
|| !(extp
->FeatureSupport
& (1 << 5))) ? 1000000/HZ
: 0;
1931 ret
= WAIT_TIMEOUT(map
, chip
, adr
, udelay
);
1933 map_write(map
, CMD(0x70), adr
);
1934 chip
->state
= FL_STATUS
;
1935 xip_enable(map
, chip
, adr
);
1936 printk(KERN_ERR
"%s: block unlock error: (status timeout)\n", map
->name
);
1940 xip_enable(map
, chip
, adr
);
1941 out
: put_chip(map
, chip
, adr
);
1942 spin_unlock(chip
->mutex
);
1946 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1950 #ifdef DEBUG_LOCK_BITS
1951 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1952 __FUNCTION__
, ofs
, len
);
1953 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1957 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
1958 ofs
, len
, DO_XXLOCK_ONEBLOCK_LOCK
);
1960 #ifdef DEBUG_LOCK_BITS
1961 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1963 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1970 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1974 #ifdef DEBUG_LOCK_BITS
1975 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1976 __FUNCTION__
, ofs
, len
);
1977 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1981 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
1982 ofs
, len
, DO_XXLOCK_ONEBLOCK_UNLOCK
);
1984 #ifdef DEBUG_LOCK_BITS
1985 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1987 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1994 #ifdef CONFIG_MTD_OTP
1996 typedef int (*otp_op_t
)(struct map_info
*map
, struct flchip
*chip
,
1997 u_long data_offset
, u_char
*buf
, u_int size
,
1998 u_long prot_offset
, u_int groupno
, u_int groupsize
);
2001 do_otp_read(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2002 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2004 struct cfi_private
*cfi
= map
->fldrv_priv
;
2007 spin_lock(chip
->mutex
);
2008 ret
= get_chip(map
, chip
, chip
->start
, FL_JEDEC_QUERY
);
2010 spin_unlock(chip
->mutex
);
2014 /* let's ensure we're not reading back cached data from array mode */
2015 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2017 xip_disable(map
, chip
, chip
->start
);
2018 if (chip
->state
!= FL_JEDEC_QUERY
) {
2019 map_write(map
, CMD(0x90), chip
->start
);
2020 chip
->state
= FL_JEDEC_QUERY
;
2022 map_copy_from(map
, buf
, chip
->start
+ offset
, size
);
2023 xip_enable(map
, chip
, chip
->start
);
2025 /* then ensure we don't keep OTP data in the cache */
2026 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2028 put_chip(map
, chip
, chip
->start
);
2029 spin_unlock(chip
->mutex
);
2034 do_otp_write(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2035 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2040 unsigned long bus_ofs
= offset
& ~(map_bankwidth(map
)-1);
2041 int gap
= offset
- bus_ofs
;
2042 int n
= min_t(int, size
, map_bankwidth(map
)-gap
);
2043 map_word datum
= map_word_ff(map
);
2045 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
2046 ret
= do_write_oneword(map
, chip
, bus_ofs
, datum
, FL_OTP_WRITE
);
2059 do_otp_lock(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2060 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2062 struct cfi_private
*cfi
= map
->fldrv_priv
;
2065 /* make sure area matches group boundaries */
2069 datum
= map_word_ff(map
);
2070 datum
= map_word_clr(map
, datum
, CMD(1 << grpno
));
2071 return do_write_oneword(map
, chip
, prot
, datum
, FL_OTP_WRITE
);
2074 static int cfi_intelext_otp_walk(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2075 size_t *retlen
, u_char
*buf
,
2076 otp_op_t action
, int user_regs
)
2078 struct map_info
*map
= mtd
->priv
;
2079 struct cfi_private
*cfi
= map
->fldrv_priv
;
2080 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2081 struct flchip
*chip
;
2082 struct cfi_intelext_otpinfo
*otp
;
2083 u_long devsize
, reg_prot_offset
, data_offset
;
2084 u_int chip_num
, chip_step
, field
, reg_fact_size
, reg_user_size
;
2085 u_int groups
, groupno
, groupsize
, reg_fact_groups
, reg_user_groups
;
2090 /* Check that we actually have some OTP registers */
2091 if (!extp
|| !(extp
->FeatureSupport
& 64) || !extp
->NumProtectionFields
)
2094 /* we need real chips here not virtual ones */
2095 devsize
= (1 << cfi
->cfiq
->DevSize
) * cfi
->interleave
;
2096 chip_step
= devsize
>> cfi
->chipshift
;
2099 /* Some chips have OTP located in the _top_ partition only.
2100 For example: Intel 28F256L18T (T means top-parameter device) */
2101 if (cfi
->mfr
== MANUFACTURER_INTEL
) {
2106 chip_num
= chip_step
- 1;
2110 for ( ; chip_num
< cfi
->numchips
; chip_num
+= chip_step
) {
2111 chip
= &cfi
->chips
[chip_num
];
2112 otp
= (struct cfi_intelext_otpinfo
*)&extp
->extra
[0];
2114 /* first OTP region */
2116 reg_prot_offset
= extp
->ProtRegAddr
;
2117 reg_fact_groups
= 1;
2118 reg_fact_size
= 1 << extp
->FactProtRegSize
;
2119 reg_user_groups
= 1;
2120 reg_user_size
= 1 << extp
->UserProtRegSize
;
2123 /* flash geometry fixup */
2124 data_offset
= reg_prot_offset
+ 1;
2125 data_offset
*= cfi
->interleave
* cfi
->device_type
;
2126 reg_prot_offset
*= cfi
->interleave
* cfi
->device_type
;
2127 reg_fact_size
*= cfi
->interleave
;
2128 reg_user_size
*= cfi
->interleave
;
2131 groups
= reg_user_groups
;
2132 groupsize
= reg_user_size
;
2133 /* skip over factory reg area */
2134 groupno
= reg_fact_groups
;
2135 data_offset
+= reg_fact_groups
* reg_fact_size
;
2137 groups
= reg_fact_groups
;
2138 groupsize
= reg_fact_size
;
2142 while (len
> 0 && groups
> 0) {
2145 * Special case: if action is NULL
2146 * we fill buf with otp_info records.
2148 struct otp_info
*otpinfo
;
2150 len
-= sizeof(struct otp_info
);
2153 ret
= do_otp_read(map
, chip
,
2155 (u_char
*)&lockword
,
2160 otpinfo
= (struct otp_info
*)buf
;
2161 otpinfo
->start
= from
;
2162 otpinfo
->length
= groupsize
;
2164 !map_word_bitsset(map
, lockword
,
2167 buf
+= sizeof(*otpinfo
);
2168 *retlen
+= sizeof(*otpinfo
);
2169 } else if (from
>= groupsize
) {
2171 data_offset
+= groupsize
;
2173 int size
= groupsize
;
2174 data_offset
+= from
;
2179 ret
= action(map
, chip
, data_offset
,
2180 buf
, size
, reg_prot_offset
,
2181 groupno
, groupsize
);
2187 data_offset
+= size
;
2193 /* next OTP region */
2194 if (++field
== extp
->NumProtectionFields
)
2196 reg_prot_offset
= otp
->ProtRegAddr
;
2197 reg_fact_groups
= otp
->FactGroups
;
2198 reg_fact_size
= 1 << otp
->FactProtRegSize
;
2199 reg_user_groups
= otp
->UserGroups
;
2200 reg_user_size
= 1 << otp
->UserProtRegSize
;
2208 static int cfi_intelext_read_fact_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2209 size_t len
, size_t *retlen
,
2212 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2213 buf
, do_otp_read
, 0);
2216 static int cfi_intelext_read_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2217 size_t len
, size_t *retlen
,
2220 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2221 buf
, do_otp_read
, 1);
2224 static int cfi_intelext_write_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2225 size_t len
, size_t *retlen
,
2228 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2229 buf
, do_otp_write
, 1);
2232 static int cfi_intelext_lock_user_prot_reg(struct mtd_info
*mtd
,
2233 loff_t from
, size_t len
)
2236 return cfi_intelext_otp_walk(mtd
, from
, len
, &retlen
,
2237 NULL
, do_otp_lock
, 1);
2240 static int cfi_intelext_get_fact_prot_info(struct mtd_info
*mtd
,
2241 struct otp_info
*buf
, size_t len
)
2246 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 0);
2247 return ret
? : retlen
;
2250 static int cfi_intelext_get_user_prot_info(struct mtd_info
*mtd
,
2251 struct otp_info
*buf
, size_t len
)
2256 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 1);
2257 return ret
? : retlen
;
2262 static void cfi_intelext_save_locks(struct mtd_info
*mtd
)
2264 struct mtd_erase_region_info
*region
;
2265 int block
, status
, i
;
2269 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2270 region
= &mtd
->eraseregions
[i
];
2271 if (!region
->lockmap
)
2274 for (block
= 0; block
< region
->numblocks
; block
++){
2275 len
= region
->erasesize
;
2276 adr
= region
->offset
+ block
* len
;
2278 status
= cfi_varsize_frob(mtd
,
2279 do_getlockstatus_oneblock
, adr
, len
, NULL
);
2281 set_bit(block
, region
->lockmap
);
2283 clear_bit(block
, region
->lockmap
);
2288 static int cfi_intelext_suspend(struct mtd_info
*mtd
)
2290 struct map_info
*map
= mtd
->priv
;
2291 struct cfi_private
*cfi
= map
->fldrv_priv
;
2292 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2294 struct flchip
*chip
;
2297 if ((mtd
->flags
& MTD_STUPID_LOCK
)
2298 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2299 cfi_intelext_save_locks(mtd
);
2301 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
2302 chip
= &cfi
->chips
[i
];
2304 spin_lock(chip
->mutex
);
2306 switch (chip
->state
) {
2310 case FL_JEDEC_QUERY
:
2311 if (chip
->oldstate
== FL_READY
) {
2312 /* place the chip in a known state before suspend */
2313 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2314 chip
->oldstate
= chip
->state
;
2315 chip
->state
= FL_PM_SUSPENDED
;
2316 /* No need to wake_up() on this state change -
2317 * as the whole point is that nobody can do anything
2318 * with the chip now anyway.
2321 /* There seems to be an operation pending. We must wait for it. */
2322 printk(KERN_NOTICE
"Flash device refused suspend due to pending operation (oldstate %d)\n", chip
->oldstate
);
2327 /* Should we actually wait? Once upon a time these routines weren't
2328 allowed to. Or should we return -EAGAIN, because the upper layers
2329 ought to have already shut down anything which was using the device
2330 anyway? The latter for now. */
2331 printk(KERN_NOTICE
"Flash device refused suspend due to active operation (state %d)\n", chip
->oldstate
);
2333 case FL_PM_SUSPENDED
:
2336 spin_unlock(chip
->mutex
);
2339 /* Unlock the chips again */
2342 for (i
--; i
>=0; i
--) {
2343 chip
= &cfi
->chips
[i
];
2345 spin_lock(chip
->mutex
);
2347 if (chip
->state
== FL_PM_SUSPENDED
) {
2348 /* No need to force it into a known state here,
2349 because we're returning failure, and it didn't
2351 chip
->state
= chip
->oldstate
;
2352 chip
->oldstate
= FL_READY
;
2355 spin_unlock(chip
->mutex
);
2362 static void cfi_intelext_restore_locks(struct mtd_info
*mtd
)
2364 struct mtd_erase_region_info
*region
;
2369 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2370 region
= &mtd
->eraseregions
[i
];
2371 if (!region
->lockmap
)
2374 for (block
= 0; block
< region
->numblocks
; block
++) {
2375 len
= region
->erasesize
;
2376 adr
= region
->offset
+ block
* len
;
2378 if (!test_bit(block
, region
->lockmap
))
2379 cfi_intelext_unlock(mtd
, adr
, len
);
2384 static void cfi_intelext_resume(struct mtd_info
*mtd
)
2386 struct map_info
*map
= mtd
->priv
;
2387 struct cfi_private
*cfi
= map
->fldrv_priv
;
2388 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2390 struct flchip
*chip
;
2392 for (i
=0; i
<cfi
->numchips
; i
++) {
2394 chip
= &cfi
->chips
[i
];
2396 spin_lock(chip
->mutex
);
2398 /* Go to known state. Chip may have been power cycled */
2399 if (chip
->state
== FL_PM_SUSPENDED
) {
2400 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2401 chip
->oldstate
= chip
->state
= FL_READY
;
2405 spin_unlock(chip
->mutex
);
2408 if ((mtd
->flags
& MTD_STUPID_LOCK
)
2409 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2410 cfi_intelext_restore_locks(mtd
);
2413 static int cfi_intelext_reset(struct mtd_info
*mtd
)
2415 struct map_info
*map
= mtd
->priv
;
2416 struct cfi_private
*cfi
= map
->fldrv_priv
;
2419 for (i
=0; i
< cfi
->numchips
; i
++) {
2420 struct flchip
*chip
= &cfi
->chips
[i
];
2422 /* force the completion of any ongoing operation
2423 and switch to array mode so any bootloader in
2424 flash is accessible for soft reboot. */
2425 spin_lock(chip
->mutex
);
2426 ret
= get_chip(map
, chip
, chip
->start
, FL_SHUTDOWN
);
2428 map_write(map
, CMD(0xff), chip
->start
);
2429 chip
->state
= FL_SHUTDOWN
;
2431 spin_unlock(chip
->mutex
);
2437 static int cfi_intelext_reboot(struct notifier_block
*nb
, unsigned long val
,
2440 struct mtd_info
*mtd
;
2442 mtd
= container_of(nb
, struct mtd_info
, reboot_notifier
);
2443 cfi_intelext_reset(mtd
);
2447 static void cfi_intelext_destroy(struct mtd_info
*mtd
)
2449 struct map_info
*map
= mtd
->priv
;
2450 struct cfi_private
*cfi
= map
->fldrv_priv
;
2451 struct mtd_erase_region_info
*region
;
2453 cfi_intelext_reset(mtd
);
2454 unregister_reboot_notifier(&mtd
->reboot_notifier
);
2455 kfree(cfi
->cmdset_priv
);
2457 kfree(cfi
->chips
[0].priv
);
2459 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2460 region
= &mtd
->eraseregions
[i
];
2461 if (region
->lockmap
)
2462 kfree(region
->lockmap
);
2464 kfree(mtd
->eraseregions
);
2467 MODULE_LICENSE("GPL");
2468 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2469 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2470 MODULE_ALIAS("cfi_cmdset_0003");
2471 MODULE_ALIAS("cfi_cmdset_0200");