2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
19 * - auto unlock sectors on resume for auto locking flash on power up
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/init.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
35 #include <linux/bitmap.h>
36 #include <linux/mtd/xip.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/compatmac.h>
40 #include <linux/mtd/cfi.h>
42 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
43 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
45 // debugging, turns off buffer write mode if set to 1
46 #define FORCE_WORD_WRITE 0
48 #define MANUFACTURER_INTEL 0x0089
49 #define I82802AB 0x00ad
50 #define I82802AC 0x00ac
51 #define MANUFACTURER_ST 0x0020
52 #define M50LPW080 0x002F
54 static int cfi_intelext_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
55 static int cfi_intelext_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
56 static int cfi_intelext_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
57 static int cfi_intelext_writev(struct mtd_info
*, const struct kvec
*, unsigned long, loff_t
, size_t *);
58 static int cfi_intelext_erase_varsize(struct mtd_info
*, struct erase_info
*);
59 static void cfi_intelext_sync (struct mtd_info
*);
60 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
61 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
63 static int cfi_intelext_read_fact_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
64 static int cfi_intelext_read_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
65 static int cfi_intelext_write_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
66 static int cfi_intelext_lock_user_prot_reg (struct mtd_info
*, loff_t
, size_t);
67 static int cfi_intelext_get_fact_prot_info (struct mtd_info
*,
68 struct otp_info
*, size_t);
69 static int cfi_intelext_get_user_prot_info (struct mtd_info
*,
70 struct otp_info
*, size_t);
72 static int cfi_intelext_suspend (struct mtd_info
*);
73 static void cfi_intelext_resume (struct mtd_info
*);
74 static int cfi_intelext_reboot (struct notifier_block
*, unsigned long, void *);
76 static void cfi_intelext_destroy(struct mtd_info
*);
78 struct mtd_info
*cfi_cmdset_0001(struct map_info
*, int);
80 static struct mtd_info
*cfi_intelext_setup (struct mtd_info
*);
81 static int cfi_intelext_partition_fixup(struct mtd_info
*, struct cfi_private
**);
83 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
84 size_t *retlen
, u_char
**mtdbuf
);
85 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
,
88 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
89 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
);
95 * *********** SETUP AND PROBE BITS ***********
98 static struct mtd_chip_driver cfi_intelext_chipdrv
= {
99 .probe
= NULL
, /* Not usable directly */
100 .destroy
= cfi_intelext_destroy
,
101 .name
= "cfi_cmdset_0001",
102 .module
= THIS_MODULE
105 /* #define DEBUG_LOCK_BITS */
106 /* #define DEBUG_CFI_FEATURES */
108 #ifdef DEBUG_CFI_FEATURES
109 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
112 printk(" Extended Query version %c.%c\n", extp
->MajorVersion
, extp
->MinorVersion
);
113 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
114 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
115 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
116 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
117 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
118 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
119 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
120 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
121 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
122 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
123 printk(" - Simultaneous operations: %s\n", extp
->FeatureSupport
&512?"supported":"unsupported");
124 printk(" - Extended Flash Array: %s\n", extp
->FeatureSupport
&1024?"supported":"unsupported");
125 for (i
=11; i
<32; i
++) {
126 if (extp
->FeatureSupport
& (1<<i
))
127 printk(" - Unknown Bit %X: supported\n", i
);
130 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
131 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
132 for (i
=1; i
<8; i
++) {
133 if (extp
->SuspendCmdSupport
& (1<<i
))
134 printk(" - Unknown Bit %X: supported\n", i
);
137 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
138 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
139 printk(" - Lock-Down Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
140 for (i
=2; i
<3; i
++) {
141 if (extp
->BlkStatusRegMask
& (1<<i
))
142 printk(" - Unknown Bit %X Active: yes\n",i
);
144 printk(" - EFA Lock Bit: %s\n", extp
->BlkStatusRegMask
&16?"yes":"no");
145 printk(" - EFA Lock-Down Bit: %s\n", extp
->BlkStatusRegMask
&32?"yes":"no");
146 for (i
=6; i
<16; i
++) {
147 if (extp
->BlkStatusRegMask
& (1<<i
))
148 printk(" - Unknown Bit %X Active: yes\n",i
);
151 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
152 extp
->VccOptimal
>> 4, extp
->VccOptimal
& 0xf);
153 if (extp
->VppOptimal
)
154 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
155 extp
->VppOptimal
>> 4, extp
->VppOptimal
& 0xf);
159 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
160 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
161 static void fixup_intel_strataflash(struct mtd_info
*mtd
, void* param
)
163 struct map_info
*map
= mtd
->priv
;
164 struct cfi_private
*cfi
= map
->fldrv_priv
;
165 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
167 printk(KERN_WARNING
"cfi_cmdset_0001: Suspend "
168 "erase on write disabled.\n");
169 extp
->SuspendCmdSupport
&= ~1;
173 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
174 static void fixup_no_write_suspend(struct mtd_info
*mtd
, void* param
)
176 struct map_info
*map
= mtd
->priv
;
177 struct cfi_private
*cfi
= map
->fldrv_priv
;
178 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
180 if (cfip
&& (cfip
->FeatureSupport
&4)) {
181 cfip
->FeatureSupport
&= ~4;
182 printk(KERN_WARNING
"cfi_cmdset_0001: write suspend disabled\n");
187 static void fixup_st_m28w320ct(struct mtd_info
*mtd
, void* param
)
189 struct map_info
*map
= mtd
->priv
;
190 struct cfi_private
*cfi
= map
->fldrv_priv
;
192 cfi
->cfiq
->BufWriteTimeoutTyp
= 0; /* Not supported */
193 cfi
->cfiq
->BufWriteTimeoutMax
= 0; /* Not supported */
196 static void fixup_st_m28w320cb(struct mtd_info
*mtd
, void* param
)
198 struct map_info
*map
= mtd
->priv
;
199 struct cfi_private
*cfi
= map
->fldrv_priv
;
201 /* Note this is done after the region info is endian swapped */
202 cfi
->cfiq
->EraseRegionInfo
[1] =
203 (cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff0000) | 0x3e;
206 static void fixup_use_point(struct mtd_info
*mtd
, void *param
)
208 struct map_info
*map
= mtd
->priv
;
209 if (!mtd
->point
&& map_is_linear(map
)) {
210 mtd
->point
= cfi_intelext_point
;
211 mtd
->unpoint
= cfi_intelext_unpoint
;
215 static void fixup_use_write_buffers(struct mtd_info
*mtd
, void *param
)
217 struct map_info
*map
= mtd
->priv
;
218 struct cfi_private
*cfi
= map
->fldrv_priv
;
219 if (cfi
->cfiq
->BufWriteTimeoutTyp
) {
220 printk(KERN_INFO
"Using buffer write method\n" );
221 mtd
->write
= cfi_intelext_write_buffers
;
222 mtd
->writev
= cfi_intelext_writev
;
227 * Some chips power-up with all sectors locked by default.
229 static void fixup_use_powerup_lock(struct mtd_info
*mtd
, void *param
)
231 printk(KERN_INFO
"Using auto-unlock on power-up/resume\n" );
232 mtd
->flags
|= MTD_STUPID_LOCK
;
235 static struct cfi_fixup cfi_fixup_table
[] = {
236 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
237 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_intel_strataflash
, NULL
},
239 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
240 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_no_write_suspend
, NULL
},
242 #if !FORCE_WORD_WRITE
243 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_write_buffers
, NULL
},
245 { CFI_MFR_ST
, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct
, NULL
},
246 { CFI_MFR_ST
, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb
, NULL
},
247 { MANUFACTURER_INTEL
, 0x891c, fixup_use_powerup_lock
, NULL
, },
251 static struct cfi_fixup jedec_fixup_table
[] = {
252 { MANUFACTURER_INTEL
, I82802AB
, fixup_use_fwh_lock
, NULL
, },
253 { MANUFACTURER_INTEL
, I82802AC
, fixup_use_fwh_lock
, NULL
, },
254 { MANUFACTURER_ST
, M50LPW080
, fixup_use_fwh_lock
, NULL
, },
257 static struct cfi_fixup fixup_table
[] = {
258 /* The CFI vendor ids and the JEDEC vendor IDs appear
259 * to be common. It is like the devices id's are as
260 * well. This table is to pick all cases where
261 * we know that is the case.
263 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_point
, NULL
},
267 static inline struct cfi_pri_intelext
*
268 read_pri_intelext(struct map_info
*map
, __u16 adr
)
270 struct cfi_pri_intelext
*extp
;
271 unsigned int extp_size
= sizeof(*extp
);
274 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, extp_size
, "Intel/Sharp");
278 if (extp
->MajorVersion
!= '1' ||
279 (extp
->MinorVersion
< '0' || extp
->MinorVersion
> '4')) {
280 printk(KERN_ERR
" Unknown Intel/Sharp Extended Query "
281 "version %c.%c.\n", extp
->MajorVersion
,
287 /* Do some byteswapping if necessary */
288 extp
->FeatureSupport
= le32_to_cpu(extp
->FeatureSupport
);
289 extp
->BlkStatusRegMask
= le16_to_cpu(extp
->BlkStatusRegMask
);
290 extp
->ProtRegAddr
= le16_to_cpu(extp
->ProtRegAddr
);
292 if (extp
->MajorVersion
== '1' && extp
->MinorVersion
>= '3') {
293 unsigned int extra_size
= 0;
296 /* Protection Register info */
297 extra_size
+= (extp
->NumProtectionFields
- 1) *
298 sizeof(struct cfi_intelext_otpinfo
);
300 /* Burst Read info */
302 if (extp_size
< sizeof(*extp
) + extra_size
)
304 extra_size
+= extp
->extra
[extra_size
-1];
306 /* Number of hardware-partitions */
308 if (extp_size
< sizeof(*extp
) + extra_size
)
310 nb_parts
= extp
->extra
[extra_size
- 1];
312 /* skip the sizeof(partregion) field in CFI 1.4 */
313 if (extp
->MinorVersion
>= '4')
316 for (i
= 0; i
< nb_parts
; i
++) {
317 struct cfi_intelext_regioninfo
*rinfo
;
318 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[extra_size
];
319 extra_size
+= sizeof(*rinfo
);
320 if (extp_size
< sizeof(*extp
) + extra_size
)
322 rinfo
->NumIdentPartitions
=le16_to_cpu(rinfo
->NumIdentPartitions
);
323 extra_size
+= (rinfo
->NumBlockTypes
- 1)
324 * sizeof(struct cfi_intelext_blockinfo
);
327 if (extp
->MinorVersion
>= '4')
328 extra_size
+= sizeof(struct cfi_intelext_programming_regioninfo
);
330 if (extp_size
< sizeof(*extp
) + extra_size
) {
332 extp_size
= sizeof(*extp
) + extra_size
;
334 if (extp_size
> 4096) {
336 "%s: cfi_pri_intelext is too fat\n",
347 struct mtd_info
*cfi_cmdset_0001(struct map_info
*map
, int primary
)
349 struct cfi_private
*cfi
= map
->fldrv_priv
;
350 struct mtd_info
*mtd
;
353 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
355 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
359 mtd
->type
= MTD_NORFLASH
;
361 /* Fill in the default mtd operations */
362 mtd
->erase
= cfi_intelext_erase_varsize
;
363 mtd
->read
= cfi_intelext_read
;
364 mtd
->write
= cfi_intelext_write_words
;
365 mtd
->sync
= cfi_intelext_sync
;
366 mtd
->lock
= cfi_intelext_lock
;
367 mtd
->unlock
= cfi_intelext_unlock
;
368 mtd
->suspend
= cfi_intelext_suspend
;
369 mtd
->resume
= cfi_intelext_resume
;
370 mtd
->flags
= MTD_CAP_NORFLASH
;
371 mtd
->name
= map
->name
;
374 mtd
->reboot_notifier
.notifier_call
= cfi_intelext_reboot
;
376 if (cfi
->cfi_mode
== CFI_MODE_CFI
) {
378 * It's a real CFI chip, not one for which the probe
379 * routine faked a CFI structure. So we read the feature
382 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
383 struct cfi_pri_intelext
*extp
;
385 extp
= read_pri_intelext(map
, adr
);
391 /* Install our own private info structure */
392 cfi
->cmdset_priv
= extp
;
394 cfi_fixup(mtd
, cfi_fixup_table
);
396 #ifdef DEBUG_CFI_FEATURES
397 /* Tell the user about it in lots of lovely detail */
398 cfi_tell_features(extp
);
401 if(extp
->SuspendCmdSupport
& 1) {
402 printk(KERN_NOTICE
"cfi_cmdset_0001: Erase suspend on write enabled\n");
405 else if (cfi
->cfi_mode
== CFI_MODE_JEDEC
) {
406 /* Apply jedec specific fixups */
407 cfi_fixup(mtd
, jedec_fixup_table
);
409 /* Apply generic fixups */
410 cfi_fixup(mtd
, fixup_table
);
412 for (i
=0; i
< cfi
->numchips
; i
++) {
413 if (cfi
->cfiq
->WordWriteTimeoutTyp
)
414 cfi
->chips
[i
].word_write_time
=
415 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
417 cfi
->chips
[i
].word_write_time
= 50000;
419 if (cfi
->cfiq
->BufWriteTimeoutTyp
)
420 cfi
->chips
[i
].buffer_write_time
=
421 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
422 /* No default; if it isn't specified, we won't use it */
424 if (cfi
->cfiq
->BlockEraseTimeoutTyp
)
425 cfi
->chips
[i
].erase_time
=
426 1000<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
428 cfi
->chips
[i
].erase_time
= 2000000;
430 cfi
->chips
[i
].ref_point_counter
= 0;
431 init_waitqueue_head(&(cfi
->chips
[i
].wq
));
434 map
->fldrv
= &cfi_intelext_chipdrv
;
436 return cfi_intelext_setup(mtd
);
438 struct mtd_info
*cfi_cmdset_0003(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
439 struct mtd_info
*cfi_cmdset_0200(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
440 EXPORT_SYMBOL_GPL(cfi_cmdset_0001
);
441 EXPORT_SYMBOL_GPL(cfi_cmdset_0003
);
442 EXPORT_SYMBOL_GPL(cfi_cmdset_0200
);
444 static struct mtd_info
*cfi_intelext_setup(struct mtd_info
*mtd
)
446 struct map_info
*map
= mtd
->priv
;
447 struct cfi_private
*cfi
= map
->fldrv_priv
;
448 unsigned long offset
= 0;
450 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
452 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
454 mtd
->size
= devsize
* cfi
->numchips
;
456 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
457 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
458 * mtd
->numeraseregions
, GFP_KERNEL
);
459 if (!mtd
->eraseregions
) {
460 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
464 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
465 unsigned long ernum
, ersize
;
466 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
467 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
469 if (mtd
->erasesize
< ersize
) {
470 mtd
->erasesize
= ersize
;
472 for (j
=0; j
<cfi
->numchips
; j
++) {
473 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
474 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
475 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
476 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].lockmap
= kmalloc(ernum
/ 8 + 1, GFP_KERNEL
);
478 offset
+= (ersize
* ernum
);
481 if (offset
!= devsize
) {
483 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
487 for (i
=0; i
<mtd
->numeraseregions
;i
++){
488 printk(KERN_DEBUG
"erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
489 i
,mtd
->eraseregions
[i
].offset
,
490 mtd
->eraseregions
[i
].erasesize
,
491 mtd
->eraseregions
[i
].numblocks
);
494 #ifdef CONFIG_MTD_OTP
495 mtd
->read_fact_prot_reg
= cfi_intelext_read_fact_prot_reg
;
496 mtd
->read_user_prot_reg
= cfi_intelext_read_user_prot_reg
;
497 mtd
->write_user_prot_reg
= cfi_intelext_write_user_prot_reg
;
498 mtd
->lock_user_prot_reg
= cfi_intelext_lock_user_prot_reg
;
499 mtd
->get_fact_prot_info
= cfi_intelext_get_fact_prot_info
;
500 mtd
->get_user_prot_info
= cfi_intelext_get_user_prot_info
;
503 /* This function has the potential to distort the reality
504 a bit and therefore should be called last. */
505 if (cfi_intelext_partition_fixup(mtd
, &cfi
) != 0)
508 __module_get(THIS_MODULE
);
509 register_reboot_notifier(&mtd
->reboot_notifier
);
514 kfree(mtd
->eraseregions
);
517 kfree(cfi
->cmdset_priv
);
521 static int cfi_intelext_partition_fixup(struct mtd_info
*mtd
,
522 struct cfi_private
**pcfi
)
524 struct map_info
*map
= mtd
->priv
;
525 struct cfi_private
*cfi
= *pcfi
;
526 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
529 * Probing of multi-partition flash chips.
531 * To support multiple partitions when available, we simply arrange
532 * for each of them to have their own flchip structure even if they
533 * are on the same physical chip. This means completely recreating
534 * a new cfi_private structure right here which is a blatent code
535 * layering violation, but this is still the least intrusive
536 * arrangement at this point. This can be rearranged in the future
537 * if someone feels motivated enough. --nico
539 if (extp
&& extp
->MajorVersion
== '1' && extp
->MinorVersion
>= '3'
540 && extp
->FeatureSupport
& (1 << 9)) {
541 struct cfi_private
*newcfi
;
543 struct flchip_shared
*shared
;
544 int offs
, numregions
, numparts
, partshift
, numvirtchips
, i
, j
;
546 /* Protection Register info */
547 offs
= (extp
->NumProtectionFields
- 1) *
548 sizeof(struct cfi_intelext_otpinfo
);
550 /* Burst Read info */
551 offs
+= extp
->extra
[offs
+1]+2;
553 /* Number of partition regions */
554 numregions
= extp
->extra
[offs
];
557 /* skip the sizeof(partregion) field in CFI 1.4 */
558 if (extp
->MinorVersion
>= '4')
561 /* Number of hardware partitions */
563 for (i
= 0; i
< numregions
; i
++) {
564 struct cfi_intelext_regioninfo
*rinfo
;
565 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[offs
];
566 numparts
+= rinfo
->NumIdentPartitions
;
567 offs
+= sizeof(*rinfo
)
568 + (rinfo
->NumBlockTypes
- 1) *
569 sizeof(struct cfi_intelext_blockinfo
);
572 /* Programming Region info */
573 if (extp
->MinorVersion
>= '4') {
574 struct cfi_intelext_programming_regioninfo
*prinfo
;
575 prinfo
= (struct cfi_intelext_programming_regioninfo
*)&extp
->extra
[offs
];
576 mtd
->writesize
= cfi
->interleave
<< prinfo
->ProgRegShift
;
577 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
578 printk(KERN_DEBUG
"%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
579 map
->name
, mtd
->writesize
,
580 cfi
->interleave
* prinfo
->ControlValid
,
581 cfi
->interleave
* prinfo
->ControlInvalid
);
585 * All functions below currently rely on all chips having
586 * the same geometry so we'll just assume that all hardware
587 * partitions are of the same size too.
589 partshift
= cfi
->chipshift
- __ffs(numparts
);
591 if ((1 << partshift
) < mtd
->erasesize
) {
593 "%s: bad number of hw partitions (%d)\n",
594 __FUNCTION__
, numparts
);
598 numvirtchips
= cfi
->numchips
* numparts
;
599 newcfi
= kmalloc(sizeof(struct cfi_private
) + numvirtchips
* sizeof(struct flchip
), GFP_KERNEL
);
602 shared
= kmalloc(sizeof(struct flchip_shared
) * cfi
->numchips
, GFP_KERNEL
);
607 memcpy(newcfi
, cfi
, sizeof(struct cfi_private
));
608 newcfi
->numchips
= numvirtchips
;
609 newcfi
->chipshift
= partshift
;
611 chip
= &newcfi
->chips
[0];
612 for (i
= 0; i
< cfi
->numchips
; i
++) {
613 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
614 spin_lock_init(&shared
[i
].lock
);
615 for (j
= 0; j
< numparts
; j
++) {
616 *chip
= cfi
->chips
[i
];
617 chip
->start
+= j
<< partshift
;
618 chip
->priv
= &shared
[i
];
619 /* those should be reset too since
620 they create memory references. */
621 init_waitqueue_head(&chip
->wq
);
622 spin_lock_init(&chip
->_spinlock
);
623 chip
->mutex
= &chip
->_spinlock
;
628 printk(KERN_DEBUG
"%s: %d set(s) of %d interleaved chips "
629 "--> %d partitions of %d KiB\n",
630 map
->name
, cfi
->numchips
, cfi
->interleave
,
631 newcfi
->numchips
, 1<<(newcfi
->chipshift
-10));
633 map
->fldrv_priv
= newcfi
;
642 * *********** CHIP ACCESS FUNCTIONS ***********
645 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
647 DECLARE_WAITQUEUE(wait
, current
);
648 struct cfi_private
*cfi
= map
->fldrv_priv
;
649 map_word status
, status_OK
= CMD(0x80), status_PWS
= CMD(0x01);
651 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
654 timeo
= jiffies
+ HZ
;
656 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
|| mode
== FL_OTP_WRITE
|| mode
== FL_SHUTDOWN
)) {
658 * OK. We have possibility for contension on the write/erase
659 * operations which are global to the real chip and not per
660 * partition. So let's fight it over in the partition which
661 * currently has authority on the operation.
663 * The rules are as follows:
665 * - any write operation must own shared->writing.
667 * - any erase operation must own _both_ shared->writing and
670 * - contension arbitration is handled in the owner's context.
672 * The 'shared' struct can be read and/or written only when
675 struct flchip_shared
*shared
= chip
->priv
;
676 struct flchip
*contender
;
677 spin_lock(&shared
->lock
);
678 contender
= shared
->writing
;
679 if (contender
&& contender
!= chip
) {
681 * The engine to perform desired operation on this
682 * partition is already in use by someone else.
683 * Let's fight over it in the context of the chip
684 * currently using it. If it is possible to suspend,
685 * that other partition will do just that, otherwise
686 * it'll happily send us to sleep. In any case, when
687 * get_chip returns success we're clear to go ahead.
689 int ret
= spin_trylock(contender
->mutex
);
690 spin_unlock(&shared
->lock
);
693 spin_unlock(chip
->mutex
);
694 ret
= get_chip(map
, contender
, contender
->start
, mode
);
695 spin_lock(chip
->mutex
);
697 spin_unlock(contender
->mutex
);
700 timeo
= jiffies
+ HZ
;
701 spin_lock(&shared
->lock
);
702 spin_unlock(contender
->mutex
);
706 shared
->writing
= chip
;
707 if (mode
== FL_ERASING
)
708 shared
->erasing
= chip
;
709 spin_unlock(&shared
->lock
);
712 switch (chip
->state
) {
716 status
= map_read(map
, adr
);
717 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
720 /* At this point we're fine with write operations
721 in other partitions as they don't conflict. */
722 if (chip
->priv
&& map_word_andequal(map
, status
, status_PWS
, status_PWS
))
725 if (time_after(jiffies
, timeo
)) {
726 printk(KERN_ERR
"%s: Waiting for chip to be ready timed out. Status %lx\n",
727 map
->name
, status
.x
[0]);
730 spin_unlock(chip
->mutex
);
732 spin_lock(chip
->mutex
);
733 /* Someone else might have been playing with it. */
744 !(cfip
->FeatureSupport
& 2) ||
745 !(mode
== FL_READY
|| mode
== FL_POINT
||
746 (mode
== FL_WRITING
&& (cfip
->SuspendCmdSupport
& 1))))
751 map_write(map
, CMD(0xB0), adr
);
753 /* If the flash has finished erasing, then 'erase suspend'
754 * appears to make some (28F320) flash devices switch to
755 * 'read' mode. Make sure that we switch to 'read status'
756 * mode so we get the right data. --rmk
758 map_write(map
, CMD(0x70), adr
);
759 chip
->oldstate
= FL_ERASING
;
760 chip
->state
= FL_ERASE_SUSPENDING
;
761 chip
->erase_suspended
= 1;
763 status
= map_read(map
, adr
);
764 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
767 if (time_after(jiffies
, timeo
)) {
768 /* Urgh. Resume and pretend we weren't here. */
769 map_write(map
, CMD(0xd0), adr
);
770 /* Make sure we're in 'read status' mode if it had finished */
771 map_write(map
, CMD(0x70), adr
);
772 chip
->state
= FL_ERASING
;
773 chip
->oldstate
= FL_READY
;
774 printk(KERN_ERR
"%s: Chip not ready after erase "
775 "suspended: status = 0x%lx\n", map
->name
, status
.x
[0]);
779 spin_unlock(chip
->mutex
);
781 spin_lock(chip
->mutex
);
782 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
783 So we can just loop here. */
785 chip
->state
= FL_STATUS
;
788 case FL_XIP_WHILE_ERASING
:
789 if (mode
!= FL_READY
&& mode
!= FL_POINT
&&
790 (mode
!= FL_WRITING
|| !cfip
|| !(cfip
->SuspendCmdSupport
&1)))
792 chip
->oldstate
= chip
->state
;
793 chip
->state
= FL_READY
;
797 /* Only if there's no operation suspended... */
798 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
802 /* The machine is rebooting now,so no one can get chip anymore */
806 set_current_state(TASK_UNINTERRUPTIBLE
);
807 add_wait_queue(&chip
->wq
, &wait
);
808 spin_unlock(chip
->mutex
);
810 remove_wait_queue(&chip
->wq
, &wait
);
811 spin_lock(chip
->mutex
);
816 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
818 struct cfi_private
*cfi
= map
->fldrv_priv
;
821 struct flchip_shared
*shared
= chip
->priv
;
822 spin_lock(&shared
->lock
);
823 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
824 /* We own the ability to write, but we're done */
825 shared
->writing
= shared
->erasing
;
826 if (shared
->writing
&& shared
->writing
!= chip
) {
827 /* give back ownership to who we loaned it from */
828 struct flchip
*loaner
= shared
->writing
;
829 spin_lock(loaner
->mutex
);
830 spin_unlock(&shared
->lock
);
831 spin_unlock(chip
->mutex
);
832 put_chip(map
, loaner
, loaner
->start
);
833 spin_lock(chip
->mutex
);
834 spin_unlock(loaner
->mutex
);
838 shared
->erasing
= NULL
;
839 shared
->writing
= NULL
;
840 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
842 * We own the ability to erase without the ability
843 * to write, which means the erase was suspended
844 * and some other partition is currently writing.
845 * Don't let the switch below mess things up since
846 * we don't have ownership to resume anything.
848 spin_unlock(&shared
->lock
);
852 spin_unlock(&shared
->lock
);
855 switch(chip
->oldstate
) {
857 chip
->state
= chip
->oldstate
;
858 /* What if one interleaved chip has finished and the
859 other hasn't? The old code would leave the finished
860 one in READY mode. That's bad, and caused -EROFS
861 errors to be returned from do_erase_oneblock because
862 that's the only bit it checked for at the time.
863 As the state machine appears to explicitly allow
864 sending the 0x70 (Read Status) command to an erasing
865 chip and expecting it to be ignored, that's what we
867 map_write(map
, CMD(0xd0), adr
);
868 map_write(map
, CMD(0x70), adr
);
869 chip
->oldstate
= FL_READY
;
870 chip
->state
= FL_ERASING
;
873 case FL_XIP_WHILE_ERASING
:
874 chip
->state
= chip
->oldstate
;
875 chip
->oldstate
= FL_READY
;
881 /* We should really make set_vpp() count, rather than doing this */
885 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!!\n", map
->name
, chip
->oldstate
);
890 #ifdef CONFIG_MTD_XIP
893 * No interrupt what so ever can be serviced while the flash isn't in array
894 * mode. This is ensured by the xip_disable() and xip_enable() functions
895 * enclosing any code path where the flash is known not to be in array mode.
896 * And within a XIP disabled code path, only functions marked with __xipram
897 * may be called and nothing else (it's a good thing to inspect generated
898 * assembly to make sure inline functions were actually inlined and that gcc
899 * didn't emit calls to its own support functions). Also configuring MTD CFI
900 * support to a single buswidth and a single interleave is also recommended.
903 static void xip_disable(struct map_info
*map
, struct flchip
*chip
,
906 /* TODO: chips with no XIP use should ignore and return */
907 (void) map_read(map
, adr
); /* ensure mmu mapping is up to date */
911 static void __xipram
xip_enable(struct map_info
*map
, struct flchip
*chip
,
914 struct cfi_private
*cfi
= map
->fldrv_priv
;
915 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
916 map_write(map
, CMD(0xff), adr
);
917 chip
->state
= FL_READY
;
919 (void) map_read(map
, adr
);
925 * When a delay is required for the flash operation to complete, the
926 * xip_wait_for_operation() function is polling for both the given timeout
927 * and pending (but still masked) hardware interrupts. Whenever there is an
928 * interrupt pending then the flash erase or write operation is suspended,
929 * array mode restored and interrupts unmasked. Task scheduling might also
930 * happen at that point. The CPU eventually returns from the interrupt or
931 * the call to schedule() and the suspended flash operation is resumed for
932 * the remaining of the delay period.
934 * Warning: this function _will_ fool interrupt latency tracing tools.
937 static int __xipram
xip_wait_for_operation(
938 struct map_info
*map
, struct flchip
*chip
,
939 unsigned long adr
, unsigned int chip_op_time
)
941 struct cfi_private
*cfi
= map
->fldrv_priv
;
942 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
943 map_word status
, OK
= CMD(0x80);
944 unsigned long usec
, suspended
, start
, done
;
945 flstate_t oldstate
, newstate
;
947 start
= xip_currtime();
948 usec
= chip_op_time
* 8;
955 if (xip_irqpending() && cfip
&&
956 ((chip
->state
== FL_ERASING
&& (cfip
->FeatureSupport
&2)) ||
957 (chip
->state
== FL_WRITING
&& (cfip
->FeatureSupport
&4))) &&
958 (cfi_interleave_is_1(cfi
) || chip
->oldstate
== FL_READY
)) {
960 * Let's suspend the erase or write operation when
961 * supported. Note that we currently don't try to
962 * suspend interleaved chips if there is already
963 * another operation suspended (imagine what happens
964 * when one chip was already done with the current
965 * operation while another chip suspended it, then
966 * we resume the whole thing at once). Yes, it
970 map_write(map
, CMD(0xb0), adr
);
971 map_write(map
, CMD(0x70), adr
);
972 suspended
= xip_currtime();
974 if (xip_elapsed_since(suspended
) > 100000) {
976 * The chip doesn't want to suspend
977 * after waiting for 100 msecs.
978 * This is a critical error but there
979 * is not much we can do here.
983 status
= map_read(map
, adr
);
984 } while (!map_word_andequal(map
, status
, OK
, OK
));
986 /* Suspend succeeded */
987 oldstate
= chip
->state
;
988 if (oldstate
== FL_ERASING
) {
989 if (!map_word_bitsset(map
, status
, CMD(0x40)))
991 newstate
= FL_XIP_WHILE_ERASING
;
992 chip
->erase_suspended
= 1;
994 if (!map_word_bitsset(map
, status
, CMD(0x04)))
996 newstate
= FL_XIP_WHILE_WRITING
;
997 chip
->write_suspended
= 1;
999 chip
->state
= newstate
;
1000 map_write(map
, CMD(0xff), adr
);
1001 (void) map_read(map
, adr
);
1002 asm volatile (".rep 8; nop; .endr");
1004 spin_unlock(chip
->mutex
);
1005 asm volatile (".rep 8; nop; .endr");
1009 * We're back. However someone else might have
1010 * decided to go write to the chip if we are in
1011 * a suspended erase state. If so let's wait
1014 spin_lock(chip
->mutex
);
1015 while (chip
->state
!= newstate
) {
1016 DECLARE_WAITQUEUE(wait
, current
);
1017 set_current_state(TASK_UNINTERRUPTIBLE
);
1018 add_wait_queue(&chip
->wq
, &wait
);
1019 spin_unlock(chip
->mutex
);
1021 remove_wait_queue(&chip
->wq
, &wait
);
1022 spin_lock(chip
->mutex
);
1024 /* Disallow XIP again */
1025 local_irq_disable();
1027 /* Resume the write or erase operation */
1028 map_write(map
, CMD(0xd0), adr
);
1029 map_write(map
, CMD(0x70), adr
);
1030 chip
->state
= oldstate
;
1031 start
= xip_currtime();
1032 } else if (usec
>= 1000000/HZ
) {
1034 * Try to save on CPU power when waiting delay
1035 * is at least a system timer tick period.
1036 * No need to be extremely accurate here.
1040 status
= map_read(map
, adr
);
1041 done
= xip_elapsed_since(start
);
1042 } while (!map_word_andequal(map
, status
, OK
, OK
)
1045 return (done
>= usec
) ? -ETIME
: 0;
1049 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1050 * the flash is actively programming or erasing since we have to poll for
1051 * the operation to complete anyway. We can't do that in a generic way with
1052 * a XIP setup so do it before the actual flash operation in this case
1053 * and stub it out from INVAL_CACHE_AND_WAIT.
1055 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1056 INVALIDATE_CACHED_RANGE(map, from, size)
1058 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1059 xip_wait_for_operation(map, chip, cmd_adr, usec)
1063 #define xip_disable(map, chip, adr)
1064 #define xip_enable(map, chip, adr)
1065 #define XIP_INVAL_CACHED_RANGE(x...)
1066 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1068 static int inval_cache_and_wait_for_operation(
1069 struct map_info
*map
, struct flchip
*chip
,
1070 unsigned long cmd_adr
, unsigned long inval_adr
, int inval_len
,
1071 unsigned int chip_op_time
)
1073 struct cfi_private
*cfi
= map
->fldrv_priv
;
1074 map_word status
, status_OK
= CMD(0x80);
1075 int chip_state
= chip
->state
;
1076 unsigned int timeo
, sleep_time
;
1078 spin_unlock(chip
->mutex
);
1080 INVALIDATE_CACHED_RANGE(map
, inval_adr
, inval_len
);
1081 spin_lock(chip
->mutex
);
1083 /* set our timeout to 8 times the expected delay */
1084 timeo
= chip_op_time
* 8;
1087 sleep_time
= chip_op_time
/ 2;
1090 status
= map_read(map
, cmd_adr
);
1091 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1095 map_write(map
, CMD(0x70), cmd_adr
);
1096 chip
->state
= FL_STATUS
;
1100 /* OK Still waiting. Drop the lock, wait a while and retry. */
1101 spin_unlock(chip
->mutex
);
1102 if (sleep_time
>= 1000000/HZ
) {
1104 * Half of the normal delay still remaining
1105 * can be performed with a sleeping delay instead
1108 msleep(sleep_time
/1000);
1109 timeo
-= sleep_time
;
1110 sleep_time
= 1000000/HZ
;
1116 spin_lock(chip
->mutex
);
1118 while (chip
->state
!= chip_state
) {
1119 /* Someone's suspended the operation: sleep */
1120 DECLARE_WAITQUEUE(wait
, current
);
1121 set_current_state(TASK_UNINTERRUPTIBLE
);
1122 add_wait_queue(&chip
->wq
, &wait
);
1123 spin_unlock(chip
->mutex
);
1125 remove_wait_queue(&chip
->wq
, &wait
);
1126 spin_lock(chip
->mutex
);
1130 /* Done and happy. */
1131 chip
->state
= FL_STATUS
;
1137 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1138 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1141 static int do_point_onechip (struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
)
1143 unsigned long cmd_addr
;
1144 struct cfi_private
*cfi
= map
->fldrv_priv
;
1149 /* Ensure cmd read/writes are aligned. */
1150 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1152 spin_lock(chip
->mutex
);
1154 ret
= get_chip(map
, chip
, cmd_addr
, FL_POINT
);
1157 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
)
1158 map_write(map
, CMD(0xff), cmd_addr
);
1160 chip
->state
= FL_POINT
;
1161 chip
->ref_point_counter
++;
1163 spin_unlock(chip
->mutex
);
1168 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
**mtdbuf
)
1170 struct map_info
*map
= mtd
->priv
;
1171 struct cfi_private
*cfi
= map
->fldrv_priv
;
1172 unsigned long ofs
, last_end
= 0;
1176 if (!map
->virt
|| (from
+ len
> mtd
->size
))
1179 /* Now lock the chip(s) to POINT state */
1181 /* ofs: offset within the first chip that the first read should start */
1182 chipnum
= (from
>> cfi
->chipshift
);
1183 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1185 *mtdbuf
= (void *)map
->virt
+ cfi
->chips
[chipnum
].start
+ ofs
;
1189 unsigned long thislen
;
1191 if (chipnum
>= cfi
->numchips
)
1194 /* We cannot point across chips that are virtually disjoint */
1196 last_end
= cfi
->chips
[chipnum
].start
;
1197 else if (cfi
->chips
[chipnum
].start
!= last_end
)
1200 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1201 thislen
= (1<<cfi
->chipshift
) - ofs
;
1205 ret
= do_point_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
);
1213 last_end
+= 1 << cfi
->chipshift
;
1219 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
, size_t len
)
1221 struct map_info
*map
= mtd
->priv
;
1222 struct cfi_private
*cfi
= map
->fldrv_priv
;
1226 /* Now unlock the chip(s) POINT state */
1228 /* ofs: offset within the first chip that the first read should start */
1229 chipnum
= (from
>> cfi
->chipshift
);
1230 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1233 unsigned long thislen
;
1234 struct flchip
*chip
;
1236 chip
= &cfi
->chips
[chipnum
];
1237 if (chipnum
>= cfi
->numchips
)
1240 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1241 thislen
= (1<<cfi
->chipshift
) - ofs
;
1245 spin_lock(chip
->mutex
);
1246 if (chip
->state
== FL_POINT
) {
1247 chip
->ref_point_counter
--;
1248 if(chip
->ref_point_counter
== 0)
1249 chip
->state
= FL_READY
;
1251 printk(KERN_ERR
"%s: Warning: unpoint called on non pointed region\n", map
->name
); /* Should this give an error? */
1253 put_chip(map
, chip
, chip
->start
);
1254 spin_unlock(chip
->mutex
);
1262 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
1264 unsigned long cmd_addr
;
1265 struct cfi_private
*cfi
= map
->fldrv_priv
;
1270 /* Ensure cmd read/writes are aligned. */
1271 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1273 spin_lock(chip
->mutex
);
1274 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
1276 spin_unlock(chip
->mutex
);
1280 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
1281 map_write(map
, CMD(0xff), cmd_addr
);
1283 chip
->state
= FL_READY
;
1286 map_copy_from(map
, buf
, adr
, len
);
1288 put_chip(map
, chip
, cmd_addr
);
1290 spin_unlock(chip
->mutex
);
1294 static int cfi_intelext_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1296 struct map_info
*map
= mtd
->priv
;
1297 struct cfi_private
*cfi
= map
->fldrv_priv
;
1302 /* ofs: offset within the first chip that the first read should start */
1303 chipnum
= (from
>> cfi
->chipshift
);
1304 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1309 unsigned long thislen
;
1311 if (chipnum
>= cfi
->numchips
)
1314 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1315 thislen
= (1<<cfi
->chipshift
) - ofs
;
1319 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1333 static int __xipram
do_write_oneword(struct map_info
*map
, struct flchip
*chip
,
1334 unsigned long adr
, map_word datum
, int mode
)
1336 struct cfi_private
*cfi
= map
->fldrv_priv
;
1337 map_word status
, write_cmd
;
1344 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0x40) : CMD(0x41);
1347 write_cmd
= CMD(0xc0);
1353 spin_lock(chip
->mutex
);
1354 ret
= get_chip(map
, chip
, adr
, mode
);
1356 spin_unlock(chip
->mutex
);
1360 XIP_INVAL_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1362 xip_disable(map
, chip
, adr
);
1363 map_write(map
, write_cmd
, adr
);
1364 map_write(map
, datum
, adr
);
1367 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1368 adr
, map_bankwidth(map
),
1369 chip
->word_write_time
);
1371 xip_enable(map
, chip
, adr
);
1372 printk(KERN_ERR
"%s: word write error (status timeout)\n", map
->name
);
1376 /* check for errors */
1377 status
= map_read(map
, adr
);
1378 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1379 unsigned long chipstatus
= MERGESTATUS(status
);
1382 map_write(map
, CMD(0x50), adr
);
1383 map_write(map
, CMD(0x70), adr
);
1384 xip_enable(map
, chip
, adr
);
1386 if (chipstatus
& 0x02) {
1388 } else if (chipstatus
& 0x08) {
1389 printk(KERN_ERR
"%s: word write error (bad VPP)\n", map
->name
);
1392 printk(KERN_ERR
"%s: word write error (status 0x%lx)\n", map
->name
, chipstatus
);
1399 xip_enable(map
, chip
, adr
);
1400 out
: put_chip(map
, chip
, adr
);
1401 spin_unlock(chip
->mutex
);
1406 static int cfi_intelext_write_words (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
1408 struct map_info
*map
= mtd
->priv
;
1409 struct cfi_private
*cfi
= map
->fldrv_priv
;
1418 chipnum
= to
>> cfi
->chipshift
;
1419 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1421 /* If it's not bus-aligned, do the first byte write */
1422 if (ofs
& (map_bankwidth(map
)-1)) {
1423 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1424 int gap
= ofs
- bus_ofs
;
1428 n
= min_t(int, len
, map_bankwidth(map
)-gap
);
1429 datum
= map_word_ff(map
);
1430 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
1432 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1433 bus_ofs
, datum
, FL_WRITING
);
1442 if (ofs
>> cfi
->chipshift
) {
1445 if (chipnum
== cfi
->numchips
)
1450 while(len
>= map_bankwidth(map
)) {
1451 map_word datum
= map_word_load(map
, buf
);
1453 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1454 ofs
, datum
, FL_WRITING
);
1458 ofs
+= map_bankwidth(map
);
1459 buf
+= map_bankwidth(map
);
1460 (*retlen
) += map_bankwidth(map
);
1461 len
-= map_bankwidth(map
);
1463 if (ofs
>> cfi
->chipshift
) {
1466 if (chipnum
== cfi
->numchips
)
1471 if (len
& (map_bankwidth(map
)-1)) {
1474 datum
= map_word_ff(map
);
1475 datum
= map_word_load_partial(map
, datum
, buf
, 0, len
);
1477 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1478 ofs
, datum
, FL_WRITING
);
1489 static int __xipram
do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1490 unsigned long adr
, const struct kvec
**pvec
,
1491 unsigned long *pvec_seek
, int len
)
1493 struct cfi_private
*cfi
= map
->fldrv_priv
;
1494 map_word status
, write_cmd
, datum
;
1495 unsigned long cmd_adr
;
1496 int ret
, wbufsize
, word_gap
, words
;
1497 const struct kvec
*vec
;
1498 unsigned long vec_seek
;
1500 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1502 cmd_adr
= adr
& ~(wbufsize
-1);
1504 /* Let's determine this according to the interleave only once */
1505 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0xe8) : CMD(0xe9);
1507 spin_lock(chip
->mutex
);
1508 ret
= get_chip(map
, chip
, cmd_adr
, FL_WRITING
);
1510 spin_unlock(chip
->mutex
);
1514 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1516 xip_disable(map
, chip
, cmd_adr
);
1518 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1519 [...], the device will not accept any more Write to Buffer commands".
1520 So we must check here and reset those bits if they're set. Otherwise
1521 we're just pissing in the wind */
1522 if (chip
->state
!= FL_STATUS
) {
1523 map_write(map
, CMD(0x70), cmd_adr
);
1524 chip
->state
= FL_STATUS
;
1526 status
= map_read(map
, cmd_adr
);
1527 if (map_word_bitsset(map
, status
, CMD(0x30))) {
1528 xip_enable(map
, chip
, cmd_adr
);
1529 printk(KERN_WARNING
"SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status
.x
[0]);
1530 xip_disable(map
, chip
, cmd_adr
);
1531 map_write(map
, CMD(0x50), cmd_adr
);
1532 map_write(map
, CMD(0x70), cmd_adr
);
1535 chip
->state
= FL_WRITING_TO_BUFFER
;
1536 map_write(map
, write_cmd
, cmd_adr
);
1537 ret
= WAIT_TIMEOUT(map
, chip
, cmd_adr
, 0);
1539 /* Argh. Not ready for write to buffer */
1540 map_word Xstatus
= map_read(map
, cmd_adr
);
1541 map_write(map
, CMD(0x70), cmd_adr
);
1542 chip
->state
= FL_STATUS
;
1543 status
= map_read(map
, cmd_adr
);
1544 map_write(map
, CMD(0x50), cmd_adr
);
1545 map_write(map
, CMD(0x70), cmd_adr
);
1546 xip_enable(map
, chip
, cmd_adr
);
1547 printk(KERN_ERR
"%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1548 map
->name
, Xstatus
.x
[0], status
.x
[0]);
1552 /* Figure out the number of words to write */
1553 word_gap
= (-adr
& (map_bankwidth(map
)-1));
1554 words
= (len
- word_gap
+ map_bankwidth(map
) - 1) / map_bankwidth(map
);
1558 word_gap
= map_bankwidth(map
) - word_gap
;
1560 datum
= map_word_ff(map
);
1563 /* Write length of data to come */
1564 map_write(map
, CMD(words
), cmd_adr
);
1568 vec_seek
= *pvec_seek
;
1570 int n
= map_bankwidth(map
) - word_gap
;
1571 if (n
> vec
->iov_len
- vec_seek
)
1572 n
= vec
->iov_len
- vec_seek
;
1576 if (!word_gap
&& len
< map_bankwidth(map
))
1577 datum
= map_word_ff(map
);
1579 datum
= map_word_load_partial(map
, datum
,
1580 vec
->iov_base
+ vec_seek
,
1585 if (!len
|| word_gap
== map_bankwidth(map
)) {
1586 map_write(map
, datum
, adr
);
1587 adr
+= map_bankwidth(map
);
1592 if (vec_seek
== vec
->iov_len
) {
1598 *pvec_seek
= vec_seek
;
1601 map_write(map
, CMD(0xd0), cmd_adr
);
1602 chip
->state
= FL_WRITING
;
1604 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, cmd_adr
,
1606 chip
->buffer_write_time
);
1608 map_write(map
, CMD(0x70), cmd_adr
);
1609 chip
->state
= FL_STATUS
;
1610 xip_enable(map
, chip
, cmd_adr
);
1611 printk(KERN_ERR
"%s: buffer write error (status timeout)\n", map
->name
);
1615 /* check for errors */
1616 status
= map_read(map
, cmd_adr
);
1617 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1618 unsigned long chipstatus
= MERGESTATUS(status
);
1621 map_write(map
, CMD(0x50), cmd_adr
);
1622 map_write(map
, CMD(0x70), cmd_adr
);
1623 xip_enable(map
, chip
, cmd_adr
);
1625 if (chipstatus
& 0x02) {
1627 } else if (chipstatus
& 0x08) {
1628 printk(KERN_ERR
"%s: buffer write error (bad VPP)\n", map
->name
);
1631 printk(KERN_ERR
"%s: buffer write error (status 0x%lx)\n", map
->name
, chipstatus
);
1638 xip_enable(map
, chip
, cmd_adr
);
1639 out
: put_chip(map
, chip
, cmd_adr
);
1640 spin_unlock(chip
->mutex
);
1644 static int cfi_intelext_writev (struct mtd_info
*mtd
, const struct kvec
*vecs
,
1645 unsigned long count
, loff_t to
, size_t *retlen
)
1647 struct map_info
*map
= mtd
->priv
;
1648 struct cfi_private
*cfi
= map
->fldrv_priv
;
1649 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1652 unsigned long ofs
, vec_seek
, i
;
1655 for (i
= 0; i
< count
; i
++)
1656 len
+= vecs
[i
].iov_len
;
1662 chipnum
= to
>> cfi
->chipshift
;
1663 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1667 /* We must not cross write block boundaries */
1668 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1672 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1673 ofs
, &vecs
, &vec_seek
, size
);
1681 if (ofs
>> cfi
->chipshift
) {
1684 if (chipnum
== cfi
->numchips
)
1688 /* Be nice and reschedule with the chip in a usable state for other
1697 static int cfi_intelext_write_buffers (struct mtd_info
*mtd
, loff_t to
,
1698 size_t len
, size_t *retlen
, const u_char
*buf
)
1702 vec
.iov_base
= (void *) buf
;
1705 return cfi_intelext_writev(mtd
, &vec
, 1, to
, retlen
);
1708 static int __xipram
do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
,
1709 unsigned long adr
, int len
, void *thunk
)
1711 struct cfi_private
*cfi
= map
->fldrv_priv
;
1719 spin_lock(chip
->mutex
);
1720 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1722 spin_unlock(chip
->mutex
);
1726 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1728 xip_disable(map
, chip
, adr
);
1730 /* Clear the status register first */
1731 map_write(map
, CMD(0x50), adr
);
1734 map_write(map
, CMD(0x20), adr
);
1735 map_write(map
, CMD(0xD0), adr
);
1736 chip
->state
= FL_ERASING
;
1737 chip
->erase_suspended
= 0;
1739 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1743 map_write(map
, CMD(0x70), adr
);
1744 chip
->state
= FL_STATUS
;
1745 xip_enable(map
, chip
, adr
);
1746 printk(KERN_ERR
"%s: block erase error: (status timeout)\n", map
->name
);
1750 /* We've broken this before. It doesn't hurt to be safe */
1751 map_write(map
, CMD(0x70), adr
);
1752 chip
->state
= FL_STATUS
;
1753 status
= map_read(map
, adr
);
1755 /* check for errors */
1756 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
1757 unsigned long chipstatus
= MERGESTATUS(status
);
1759 /* Reset the error bits */
1760 map_write(map
, CMD(0x50), adr
);
1761 map_write(map
, CMD(0x70), adr
);
1762 xip_enable(map
, chip
, adr
);
1764 if ((chipstatus
& 0x30) == 0x30) {
1765 printk(KERN_ERR
"%s: block erase error: (bad command sequence, status 0x%lx)\n", map
->name
, chipstatus
);
1767 } else if (chipstatus
& 0x02) {
1768 /* Protection bit set */
1770 } else if (chipstatus
& 0x8) {
1772 printk(KERN_ERR
"%s: block erase error: (bad VPP)\n", map
->name
);
1774 } else if (chipstatus
& 0x20 && retries
--) {
1775 printk(KERN_DEBUG
"block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr
, chipstatus
);
1776 put_chip(map
, chip
, adr
);
1777 spin_unlock(chip
->mutex
);
1780 printk(KERN_ERR
"%s: block erase failed at 0x%08lx (status 0x%lx)\n", map
->name
, adr
, chipstatus
);
1787 xip_enable(map
, chip
, adr
);
1788 out
: put_chip(map
, chip
, adr
);
1789 spin_unlock(chip
->mutex
);
1793 static int cfi_intelext_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1795 unsigned long ofs
, len
;
1801 ret
= cfi_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1805 instr
->state
= MTD_ERASE_DONE
;
1806 mtd_erase_callback(instr
);
1811 static void cfi_intelext_sync (struct mtd_info
*mtd
)
1813 struct map_info
*map
= mtd
->priv
;
1814 struct cfi_private
*cfi
= map
->fldrv_priv
;
1816 struct flchip
*chip
;
1819 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1820 chip
= &cfi
->chips
[i
];
1822 spin_lock(chip
->mutex
);
1823 ret
= get_chip(map
, chip
, chip
->start
, FL_SYNCING
);
1826 chip
->oldstate
= chip
->state
;
1827 chip
->state
= FL_SYNCING
;
1828 /* No need to wake_up() on this state change -
1829 * as the whole point is that nobody can do anything
1830 * with the chip now anyway.
1833 spin_unlock(chip
->mutex
);
1836 /* Unlock the chips again */
1838 for (i
--; i
>=0; i
--) {
1839 chip
= &cfi
->chips
[i
];
1841 spin_lock(chip
->mutex
);
1843 if (chip
->state
== FL_SYNCING
) {
1844 chip
->state
= chip
->oldstate
;
1845 chip
->oldstate
= FL_READY
;
1848 spin_unlock(chip
->mutex
);
1852 static int __xipram
do_getlockstatus_oneblock(struct map_info
*map
,
1853 struct flchip
*chip
,
1855 int len
, void *thunk
)
1857 struct cfi_private
*cfi
= map
->fldrv_priv
;
1858 int status
, ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1861 xip_disable(map
, chip
, adr
+(2*ofs_factor
));
1862 map_write(map
, CMD(0x90), adr
+(2*ofs_factor
));
1863 chip
->state
= FL_JEDEC_QUERY
;
1864 status
= cfi_read_query(map
, adr
+(2*ofs_factor
));
1865 xip_enable(map
, chip
, 0);
1869 #ifdef DEBUG_LOCK_BITS
1870 static int __xipram
do_printlockstatus_oneblock(struct map_info
*map
,
1871 struct flchip
*chip
,
1873 int len
, void *thunk
)
1875 printk(KERN_DEBUG
"block status register for 0x%08lx is %x\n",
1876 adr
, do_getlockstatus_oneblock(map
, chip
, adr
, len
, thunk
));
1881 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1882 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1884 static int __xipram
do_xxlock_oneblock(struct map_info
*map
, struct flchip
*chip
,
1885 unsigned long adr
, int len
, void *thunk
)
1887 struct cfi_private
*cfi
= map
->fldrv_priv
;
1888 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
1894 spin_lock(chip
->mutex
);
1895 ret
= get_chip(map
, chip
, adr
, FL_LOCKING
);
1897 spin_unlock(chip
->mutex
);
1902 xip_disable(map
, chip
, adr
);
1904 map_write(map
, CMD(0x60), adr
);
1905 if (thunk
== DO_XXLOCK_ONEBLOCK_LOCK
) {
1906 map_write(map
, CMD(0x01), adr
);
1907 chip
->state
= FL_LOCKING
;
1908 } else if (thunk
== DO_XXLOCK_ONEBLOCK_UNLOCK
) {
1909 map_write(map
, CMD(0xD0), adr
);
1910 chip
->state
= FL_UNLOCKING
;
1915 * If Instant Individual Block Locking supported then no need
1918 udelay
= (!extp
|| !(extp
->FeatureSupport
& (1 << 5))) ? 1000000/HZ
: 0;
1920 ret
= WAIT_TIMEOUT(map
, chip
, adr
, udelay
);
1922 map_write(map
, CMD(0x70), adr
);
1923 chip
->state
= FL_STATUS
;
1924 xip_enable(map
, chip
, adr
);
1925 printk(KERN_ERR
"%s: block unlock error: (status timeout)\n", map
->name
);
1929 xip_enable(map
, chip
, adr
);
1930 out
: put_chip(map
, chip
, adr
);
1931 spin_unlock(chip
->mutex
);
1935 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1939 #ifdef DEBUG_LOCK_BITS
1940 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1941 __FUNCTION__
, ofs
, len
);
1942 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1946 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
1947 ofs
, len
, DO_XXLOCK_ONEBLOCK_LOCK
);
1949 #ifdef DEBUG_LOCK_BITS
1950 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1952 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1959 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1963 #ifdef DEBUG_LOCK_BITS
1964 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1965 __FUNCTION__
, ofs
, len
);
1966 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1970 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
1971 ofs
, len
, DO_XXLOCK_ONEBLOCK_UNLOCK
);
1973 #ifdef DEBUG_LOCK_BITS
1974 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1976 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1983 #ifdef CONFIG_MTD_OTP
1985 typedef int (*otp_op_t
)(struct map_info
*map
, struct flchip
*chip
,
1986 u_long data_offset
, u_char
*buf
, u_int size
,
1987 u_long prot_offset
, u_int groupno
, u_int groupsize
);
1990 do_otp_read(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
1991 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
1993 struct cfi_private
*cfi
= map
->fldrv_priv
;
1996 spin_lock(chip
->mutex
);
1997 ret
= get_chip(map
, chip
, chip
->start
, FL_JEDEC_QUERY
);
1999 spin_unlock(chip
->mutex
);
2003 /* let's ensure we're not reading back cached data from array mode */
2004 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2006 xip_disable(map
, chip
, chip
->start
);
2007 if (chip
->state
!= FL_JEDEC_QUERY
) {
2008 map_write(map
, CMD(0x90), chip
->start
);
2009 chip
->state
= FL_JEDEC_QUERY
;
2011 map_copy_from(map
, buf
, chip
->start
+ offset
, size
);
2012 xip_enable(map
, chip
, chip
->start
);
2014 /* then ensure we don't keep OTP data in the cache */
2015 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2017 put_chip(map
, chip
, chip
->start
);
2018 spin_unlock(chip
->mutex
);
2023 do_otp_write(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2024 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2029 unsigned long bus_ofs
= offset
& ~(map_bankwidth(map
)-1);
2030 int gap
= offset
- bus_ofs
;
2031 int n
= min_t(int, size
, map_bankwidth(map
)-gap
);
2032 map_word datum
= map_word_ff(map
);
2034 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
2035 ret
= do_write_oneword(map
, chip
, bus_ofs
, datum
, FL_OTP_WRITE
);
2048 do_otp_lock(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2049 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2051 struct cfi_private
*cfi
= map
->fldrv_priv
;
2054 /* make sure area matches group boundaries */
2058 datum
= map_word_ff(map
);
2059 datum
= map_word_clr(map
, datum
, CMD(1 << grpno
));
2060 return do_write_oneword(map
, chip
, prot
, datum
, FL_OTP_WRITE
);
2063 static int cfi_intelext_otp_walk(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2064 size_t *retlen
, u_char
*buf
,
2065 otp_op_t action
, int user_regs
)
2067 struct map_info
*map
= mtd
->priv
;
2068 struct cfi_private
*cfi
= map
->fldrv_priv
;
2069 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2070 struct flchip
*chip
;
2071 struct cfi_intelext_otpinfo
*otp
;
2072 u_long devsize
, reg_prot_offset
, data_offset
;
2073 u_int chip_num
, chip_step
, field
, reg_fact_size
, reg_user_size
;
2074 u_int groups
, groupno
, groupsize
, reg_fact_groups
, reg_user_groups
;
2079 /* Check that we actually have some OTP registers */
2080 if (!extp
|| !(extp
->FeatureSupport
& 64) || !extp
->NumProtectionFields
)
2083 /* we need real chips here not virtual ones */
2084 devsize
= (1 << cfi
->cfiq
->DevSize
) * cfi
->interleave
;
2085 chip_step
= devsize
>> cfi
->chipshift
;
2088 /* Some chips have OTP located in the _top_ partition only.
2089 For example: Intel 28F256L18T (T means top-parameter device) */
2090 if (cfi
->mfr
== MANUFACTURER_INTEL
) {
2095 chip_num
= chip_step
- 1;
2099 for ( ; chip_num
< cfi
->numchips
; chip_num
+= chip_step
) {
2100 chip
= &cfi
->chips
[chip_num
];
2101 otp
= (struct cfi_intelext_otpinfo
*)&extp
->extra
[0];
2103 /* first OTP region */
2105 reg_prot_offset
= extp
->ProtRegAddr
;
2106 reg_fact_groups
= 1;
2107 reg_fact_size
= 1 << extp
->FactProtRegSize
;
2108 reg_user_groups
= 1;
2109 reg_user_size
= 1 << extp
->UserProtRegSize
;
2112 /* flash geometry fixup */
2113 data_offset
= reg_prot_offset
+ 1;
2114 data_offset
*= cfi
->interleave
* cfi
->device_type
;
2115 reg_prot_offset
*= cfi
->interleave
* cfi
->device_type
;
2116 reg_fact_size
*= cfi
->interleave
;
2117 reg_user_size
*= cfi
->interleave
;
2120 groups
= reg_user_groups
;
2121 groupsize
= reg_user_size
;
2122 /* skip over factory reg area */
2123 groupno
= reg_fact_groups
;
2124 data_offset
+= reg_fact_groups
* reg_fact_size
;
2126 groups
= reg_fact_groups
;
2127 groupsize
= reg_fact_size
;
2131 while (len
> 0 && groups
> 0) {
2134 * Special case: if action is NULL
2135 * we fill buf with otp_info records.
2137 struct otp_info
*otpinfo
;
2139 len
-= sizeof(struct otp_info
);
2142 ret
= do_otp_read(map
, chip
,
2144 (u_char
*)&lockword
,
2149 otpinfo
= (struct otp_info
*)buf
;
2150 otpinfo
->start
= from
;
2151 otpinfo
->length
= groupsize
;
2153 !map_word_bitsset(map
, lockword
,
2156 buf
+= sizeof(*otpinfo
);
2157 *retlen
+= sizeof(*otpinfo
);
2158 } else if (from
>= groupsize
) {
2160 data_offset
+= groupsize
;
2162 int size
= groupsize
;
2163 data_offset
+= from
;
2168 ret
= action(map
, chip
, data_offset
,
2169 buf
, size
, reg_prot_offset
,
2170 groupno
, groupsize
);
2176 data_offset
+= size
;
2182 /* next OTP region */
2183 if (++field
== extp
->NumProtectionFields
)
2185 reg_prot_offset
= otp
->ProtRegAddr
;
2186 reg_fact_groups
= otp
->FactGroups
;
2187 reg_fact_size
= 1 << otp
->FactProtRegSize
;
2188 reg_user_groups
= otp
->UserGroups
;
2189 reg_user_size
= 1 << otp
->UserProtRegSize
;
2197 static int cfi_intelext_read_fact_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2198 size_t len
, size_t *retlen
,
2201 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2202 buf
, do_otp_read
, 0);
2205 static int cfi_intelext_read_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2206 size_t len
, size_t *retlen
,
2209 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2210 buf
, do_otp_read
, 1);
2213 static int cfi_intelext_write_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2214 size_t len
, size_t *retlen
,
2217 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2218 buf
, do_otp_write
, 1);
2221 static int cfi_intelext_lock_user_prot_reg(struct mtd_info
*mtd
,
2222 loff_t from
, size_t len
)
2225 return cfi_intelext_otp_walk(mtd
, from
, len
, &retlen
,
2226 NULL
, do_otp_lock
, 1);
2229 static int cfi_intelext_get_fact_prot_info(struct mtd_info
*mtd
,
2230 struct otp_info
*buf
, size_t len
)
2235 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 0);
2236 return ret
? : retlen
;
2239 static int cfi_intelext_get_user_prot_info(struct mtd_info
*mtd
,
2240 struct otp_info
*buf
, size_t len
)
2245 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 1);
2246 return ret
? : retlen
;
2251 static void cfi_intelext_save_locks(struct mtd_info
*mtd
)
2253 struct mtd_erase_region_info
*region
;
2254 int block
, status
, i
;
2258 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2259 region
= &mtd
->eraseregions
[i
];
2260 if (!region
->lockmap
)
2263 for (block
= 0; block
< region
->numblocks
; block
++){
2264 len
= region
->erasesize
;
2265 adr
= region
->offset
+ block
* len
;
2267 status
= cfi_varsize_frob(mtd
,
2268 do_getlockstatus_oneblock
, adr
, len
, NULL
);
2270 set_bit(block
, region
->lockmap
);
2272 clear_bit(block
, region
->lockmap
);
2277 static int cfi_intelext_suspend(struct mtd_info
*mtd
)
2279 struct map_info
*map
= mtd
->priv
;
2280 struct cfi_private
*cfi
= map
->fldrv_priv
;
2281 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2283 struct flchip
*chip
;
2286 if ((mtd
->flags
& MTD_STUPID_LOCK
)
2287 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2288 cfi_intelext_save_locks(mtd
);
2290 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
2291 chip
= &cfi
->chips
[i
];
2293 spin_lock(chip
->mutex
);
2295 switch (chip
->state
) {
2299 case FL_JEDEC_QUERY
:
2300 if (chip
->oldstate
== FL_READY
) {
2301 /* place the chip in a known state before suspend */
2302 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2303 chip
->oldstate
= chip
->state
;
2304 chip
->state
= FL_PM_SUSPENDED
;
2305 /* No need to wake_up() on this state change -
2306 * as the whole point is that nobody can do anything
2307 * with the chip now anyway.
2310 /* There seems to be an operation pending. We must wait for it. */
2311 printk(KERN_NOTICE
"Flash device refused suspend due to pending operation (oldstate %d)\n", chip
->oldstate
);
2316 /* Should we actually wait? Once upon a time these routines weren't
2317 allowed to. Or should we return -EAGAIN, because the upper layers
2318 ought to have already shut down anything which was using the device
2319 anyway? The latter for now. */
2320 printk(KERN_NOTICE
"Flash device refused suspend due to active operation (state %d)\n", chip
->oldstate
);
2322 case FL_PM_SUSPENDED
:
2325 spin_unlock(chip
->mutex
);
2328 /* Unlock the chips again */
2331 for (i
--; i
>=0; i
--) {
2332 chip
= &cfi
->chips
[i
];
2334 spin_lock(chip
->mutex
);
2336 if (chip
->state
== FL_PM_SUSPENDED
) {
2337 /* No need to force it into a known state here,
2338 because we're returning failure, and it didn't
2340 chip
->state
= chip
->oldstate
;
2341 chip
->oldstate
= FL_READY
;
2344 spin_unlock(chip
->mutex
);
2351 static void cfi_intelext_restore_locks(struct mtd_info
*mtd
)
2353 struct mtd_erase_region_info
*region
;
2358 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2359 region
= &mtd
->eraseregions
[i
];
2360 if (!region
->lockmap
)
2363 for (block
= 0; block
< region
->numblocks
; block
++) {
2364 len
= region
->erasesize
;
2365 adr
= region
->offset
+ block
* len
;
2367 if (!test_bit(block
, region
->lockmap
))
2368 cfi_intelext_unlock(mtd
, adr
, len
);
2373 static void cfi_intelext_resume(struct mtd_info
*mtd
)
2375 struct map_info
*map
= mtd
->priv
;
2376 struct cfi_private
*cfi
= map
->fldrv_priv
;
2377 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2379 struct flchip
*chip
;
2381 for (i
=0; i
<cfi
->numchips
; i
++) {
2383 chip
= &cfi
->chips
[i
];
2385 spin_lock(chip
->mutex
);
2387 /* Go to known state. Chip may have been power cycled */
2388 if (chip
->state
== FL_PM_SUSPENDED
) {
2389 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2390 chip
->oldstate
= chip
->state
= FL_READY
;
2394 spin_unlock(chip
->mutex
);
2397 if ((mtd
->flags
& MTD_STUPID_LOCK
)
2398 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2399 cfi_intelext_restore_locks(mtd
);
2402 static int cfi_intelext_reset(struct mtd_info
*mtd
)
2404 struct map_info
*map
= mtd
->priv
;
2405 struct cfi_private
*cfi
= map
->fldrv_priv
;
2408 for (i
=0; i
< cfi
->numchips
; i
++) {
2409 struct flchip
*chip
= &cfi
->chips
[i
];
2411 /* force the completion of any ongoing operation
2412 and switch to array mode so any bootloader in
2413 flash is accessible for soft reboot. */
2414 spin_lock(chip
->mutex
);
2415 ret
= get_chip(map
, chip
, chip
->start
, FL_SHUTDOWN
);
2417 map_write(map
, CMD(0xff), chip
->start
);
2418 chip
->state
= FL_SHUTDOWN
;
2420 spin_unlock(chip
->mutex
);
2426 static int cfi_intelext_reboot(struct notifier_block
*nb
, unsigned long val
,
2429 struct mtd_info
*mtd
;
2431 mtd
= container_of(nb
, struct mtd_info
, reboot_notifier
);
2432 cfi_intelext_reset(mtd
);
2436 static void cfi_intelext_destroy(struct mtd_info
*mtd
)
2438 struct map_info
*map
= mtd
->priv
;
2439 struct cfi_private
*cfi
= map
->fldrv_priv
;
2440 struct mtd_erase_region_info
*region
;
2442 cfi_intelext_reset(mtd
);
2443 unregister_reboot_notifier(&mtd
->reboot_notifier
);
2444 kfree(cfi
->cmdset_priv
);
2446 kfree(cfi
->chips
[0].priv
);
2448 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2449 region
= &mtd
->eraseregions
[i
];
2450 if (region
->lockmap
)
2451 kfree(region
->lockmap
);
2453 kfree(mtd
->eraseregions
);
2456 MODULE_LICENSE("GPL");
2457 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2458 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2459 MODULE_ALIAS("cfi_cmdset_0003");
2460 MODULE_ALIAS("cfi_cmdset_0200");