2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
46 #define MANUFACTURER_INTEL 0x0089
47 #define I82802AB 0x00ad
48 #define I82802AC 0x00ac
49 #define PF38F4476 0x881c
50 #define MANUFACTURER_ST 0x0020
51 #define M50LPW080 0x002F
52 #define M50FLW080A 0x0080
53 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
56 static int cfi_intelext_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
57 static int cfi_intelext_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
58 static int cfi_intelext_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
59 static int cfi_intelext_writev(struct mtd_info
*, const struct kvec
*, unsigned long, loff_t
, size_t *);
60 static int cfi_intelext_erase_varsize(struct mtd_info
*, struct erase_info
*);
61 static void cfi_intelext_sync (struct mtd_info
*);
62 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
63 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
);
65 static int cfi_intelext_read_fact_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
66 static int cfi_intelext_read_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
67 static int cfi_intelext_write_user_prot_reg (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
68 static int cfi_intelext_lock_user_prot_reg (struct mtd_info
*, loff_t
, size_t);
69 static int cfi_intelext_get_fact_prot_info (struct mtd_info
*,
70 struct otp_info
*, size_t);
71 static int cfi_intelext_get_user_prot_info (struct mtd_info
*,
72 struct otp_info
*, size_t);
74 static int cfi_intelext_suspend (struct mtd_info
*);
75 static void cfi_intelext_resume (struct mtd_info
*);
76 static int cfi_intelext_reboot (struct notifier_block
*, unsigned long, void *);
78 static void cfi_intelext_destroy(struct mtd_info
*);
80 struct mtd_info
*cfi_cmdset_0001(struct map_info
*, int);
82 static struct mtd_info
*cfi_intelext_setup (struct mtd_info
*);
83 static int cfi_intelext_partition_fixup(struct mtd_info
*, struct cfi_private
**);
85 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
86 size_t *retlen
, void **virt
, resource_size_t
*phys
);
87 static void cfi_intelext_unpoint(struct mtd_info
*mtd
, loff_t from
, size_t len
);
89 static int chip_ready (struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
90 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
91 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
);
97 * *********** SETUP AND PROBE BITS ***********
100 static struct mtd_chip_driver cfi_intelext_chipdrv
= {
101 .probe
= NULL
, /* Not usable directly */
102 .destroy
= cfi_intelext_destroy
,
103 .name
= "cfi_cmdset_0001",
104 .module
= THIS_MODULE
107 /* #define DEBUG_LOCK_BITS */
108 /* #define DEBUG_CFI_FEATURES */
110 #ifdef DEBUG_CFI_FEATURES
111 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
114 printk(" Extended Query version %c.%c\n", extp
->MajorVersion
, extp
->MinorVersion
);
115 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
116 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
117 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
118 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
119 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
120 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
121 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
122 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
123 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
124 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
125 printk(" - Simultaneous operations: %s\n", extp
->FeatureSupport
&512?"supported":"unsupported");
126 printk(" - Extended Flash Array: %s\n", extp
->FeatureSupport
&1024?"supported":"unsupported");
127 for (i
=11; i
<32; i
++) {
128 if (extp
->FeatureSupport
& (1<<i
))
129 printk(" - Unknown Bit %X: supported\n", i
);
132 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
133 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
134 for (i
=1; i
<8; i
++) {
135 if (extp
->SuspendCmdSupport
& (1<<i
))
136 printk(" - Unknown Bit %X: supported\n", i
);
139 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
140 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
141 printk(" - Lock-Down Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
142 for (i
=2; i
<3; i
++) {
143 if (extp
->BlkStatusRegMask
& (1<<i
))
144 printk(" - Unknown Bit %X Active: yes\n",i
);
146 printk(" - EFA Lock Bit: %s\n", extp
->BlkStatusRegMask
&16?"yes":"no");
147 printk(" - EFA Lock-Down Bit: %s\n", extp
->BlkStatusRegMask
&32?"yes":"no");
148 for (i
=6; i
<16; i
++) {
149 if (extp
->BlkStatusRegMask
& (1<<i
))
150 printk(" - Unknown Bit %X Active: yes\n",i
);
153 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
154 extp
->VccOptimal
>> 4, extp
->VccOptimal
& 0xf);
155 if (extp
->VppOptimal
)
156 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
157 extp
->VppOptimal
>> 4, extp
->VppOptimal
& 0xf);
161 /* Atmel chips don't use the same PRI format as Intel chips */
162 static void fixup_convert_atmel_pri(struct mtd_info
*mtd
, void *param
)
164 struct map_info
*map
= mtd
->priv
;
165 struct cfi_private
*cfi
= map
->fldrv_priv
;
166 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
167 struct cfi_pri_atmel atmel_pri
;
168 uint32_t features
= 0;
170 /* Reverse byteswapping */
171 extp
->FeatureSupport
= cpu_to_le32(extp
->FeatureSupport
);
172 extp
->BlkStatusRegMask
= cpu_to_le16(extp
->BlkStatusRegMask
);
173 extp
->ProtRegAddr
= cpu_to_le16(extp
->ProtRegAddr
);
175 memcpy(&atmel_pri
, extp
, sizeof(atmel_pri
));
176 memset((char *)extp
+ 5, 0, sizeof(*extp
) - 5);
178 printk(KERN_ERR
"atmel Features: %02x\n", atmel_pri
.Features
);
180 if (atmel_pri
.Features
& 0x01) /* chip erase supported */
182 if (atmel_pri
.Features
& 0x02) /* erase suspend supported */
184 if (atmel_pri
.Features
& 0x04) /* program suspend supported */
186 if (atmel_pri
.Features
& 0x08) /* simultaneous operations supported */
188 if (atmel_pri
.Features
& 0x20) /* page mode read supported */
190 if (atmel_pri
.Features
& 0x40) /* queued erase supported */
192 if (atmel_pri
.Features
& 0x80) /* Protection bits supported */
195 extp
->FeatureSupport
= features
;
197 /* burst write mode not supported */
198 cfi
->cfiq
->BufWriteTimeoutTyp
= 0;
199 cfi
->cfiq
->BufWriteTimeoutMax
= 0;
202 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
203 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
204 static void fixup_intel_strataflash(struct mtd_info
*mtd
, void* param
)
206 struct map_info
*map
= mtd
->priv
;
207 struct cfi_private
*cfi
= map
->fldrv_priv
;
208 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
210 printk(KERN_WARNING
"cfi_cmdset_0001: Suspend "
211 "erase on write disabled.\n");
212 extp
->SuspendCmdSupport
&= ~1;
216 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
217 static void fixup_no_write_suspend(struct mtd_info
*mtd
, void* param
)
219 struct map_info
*map
= mtd
->priv
;
220 struct cfi_private
*cfi
= map
->fldrv_priv
;
221 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
223 if (cfip
&& (cfip
->FeatureSupport
&4)) {
224 cfip
->FeatureSupport
&= ~4;
225 printk(KERN_WARNING
"cfi_cmdset_0001: write suspend disabled\n");
230 static void fixup_st_m28w320ct(struct mtd_info
*mtd
, void* param
)
232 struct map_info
*map
= mtd
->priv
;
233 struct cfi_private
*cfi
= map
->fldrv_priv
;
235 cfi
->cfiq
->BufWriteTimeoutTyp
= 0; /* Not supported */
236 cfi
->cfiq
->BufWriteTimeoutMax
= 0; /* Not supported */
239 static void fixup_st_m28w320cb(struct mtd_info
*mtd
, void* param
)
241 struct map_info
*map
= mtd
->priv
;
242 struct cfi_private
*cfi
= map
->fldrv_priv
;
244 /* Note this is done after the region info is endian swapped */
245 cfi
->cfiq
->EraseRegionInfo
[1] =
246 (cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff0000) | 0x3e;
249 static void fixup_use_point(struct mtd_info
*mtd
, void *param
)
251 struct map_info
*map
= mtd
->priv
;
252 if (!mtd
->point
&& map_is_linear(map
)) {
253 mtd
->point
= cfi_intelext_point
;
254 mtd
->unpoint
= cfi_intelext_unpoint
;
258 static void fixup_use_write_buffers(struct mtd_info
*mtd
, void *param
)
260 struct map_info
*map
= mtd
->priv
;
261 struct cfi_private
*cfi
= map
->fldrv_priv
;
262 if (cfi
->cfiq
->BufWriteTimeoutTyp
) {
263 printk(KERN_INFO
"Using buffer write method\n" );
264 mtd
->write
= cfi_intelext_write_buffers
;
265 mtd
->writev
= cfi_intelext_writev
;
270 * Some chips power-up with all sectors locked by default.
272 static void fixup_unlock_powerup_lock(struct mtd_info
*mtd
, void *param
)
274 struct map_info
*map
= mtd
->priv
;
275 struct cfi_private
*cfi
= map
->fldrv_priv
;
276 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
278 if (cfip
->FeatureSupport
&32) {
279 printk(KERN_INFO
"Using auto-unlock on power-up/resume\n" );
280 mtd
->flags
|= MTD_POWERUP_LOCK
;
284 static struct cfi_fixup cfi_fixup_table
[] = {
285 { CFI_MFR_ATMEL
, CFI_ID_ANY
, fixup_convert_atmel_pri
, NULL
},
286 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
287 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_intel_strataflash
, NULL
},
289 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
290 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_no_write_suspend
, NULL
},
292 #if !FORCE_WORD_WRITE
293 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_write_buffers
, NULL
},
295 { CFI_MFR_ST
, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct
, NULL
},
296 { CFI_MFR_ST
, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb
, NULL
},
297 { MANUFACTURER_INTEL
, CFI_ID_ANY
, fixup_unlock_powerup_lock
, NULL
, },
301 static struct cfi_fixup jedec_fixup_table
[] = {
302 { MANUFACTURER_INTEL
, I82802AB
, fixup_use_fwh_lock
, NULL
, },
303 { MANUFACTURER_INTEL
, I82802AC
, fixup_use_fwh_lock
, NULL
, },
304 { MANUFACTURER_ST
, M50LPW080
, fixup_use_fwh_lock
, NULL
, },
305 { MANUFACTURER_ST
, M50FLW080A
, fixup_use_fwh_lock
, NULL
, },
306 { MANUFACTURER_ST
, M50FLW080B
, fixup_use_fwh_lock
, NULL
, },
309 static struct cfi_fixup fixup_table
[] = {
310 /* The CFI vendor ids and the JEDEC vendor IDs appear
311 * to be common. It is like the devices id's are as
312 * well. This table is to pick all cases where
313 * we know that is the case.
315 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_point
, NULL
},
319 static void cfi_fixup_major_minor(struct cfi_private
*cfi
,
320 struct cfi_pri_intelext
*extp
)
322 if (cfi
->mfr
== MANUFACTURER_INTEL
&&
323 cfi
->id
== PF38F4476
&& extp
->MinorVersion
== '3')
324 extp
->MinorVersion
= '1';
327 static inline struct cfi_pri_intelext
*
328 read_pri_intelext(struct map_info
*map
, __u16 adr
)
330 struct cfi_private
*cfi
= map
->fldrv_priv
;
331 struct cfi_pri_intelext
*extp
;
332 unsigned int extra_size
= 0;
333 unsigned int extp_size
= sizeof(*extp
);
336 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, extp_size
, "Intel/Sharp");
340 cfi_fixup_major_minor(cfi
, extp
);
342 if (extp
->MajorVersion
!= '1' ||
343 (extp
->MinorVersion
< '0' || extp
->MinorVersion
> '5')) {
344 printk(KERN_ERR
" Unknown Intel/Sharp Extended Query "
345 "version %c.%c.\n", extp
->MajorVersion
,
351 /* Do some byteswapping if necessary */
352 extp
->FeatureSupport
= le32_to_cpu(extp
->FeatureSupport
);
353 extp
->BlkStatusRegMask
= le16_to_cpu(extp
->BlkStatusRegMask
);
354 extp
->ProtRegAddr
= le16_to_cpu(extp
->ProtRegAddr
);
356 if (extp
->MinorVersion
>= '0') {
359 /* Protection Register info */
360 extra_size
+= (extp
->NumProtectionFields
- 1) *
361 sizeof(struct cfi_intelext_otpinfo
);
364 if (extp
->MinorVersion
>= '1') {
365 /* Burst Read info */
367 if (extp_size
< sizeof(*extp
) + extra_size
)
369 extra_size
+= extp
->extra
[extra_size
- 1];
372 if (extp
->MinorVersion
>= '3') {
375 /* Number of hardware-partitions */
377 if (extp_size
< sizeof(*extp
) + extra_size
)
379 nb_parts
= extp
->extra
[extra_size
- 1];
381 /* skip the sizeof(partregion) field in CFI 1.4 */
382 if (extp
->MinorVersion
>= '4')
385 for (i
= 0; i
< nb_parts
; i
++) {
386 struct cfi_intelext_regioninfo
*rinfo
;
387 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[extra_size
];
388 extra_size
+= sizeof(*rinfo
);
389 if (extp_size
< sizeof(*extp
) + extra_size
)
391 rinfo
->NumIdentPartitions
=le16_to_cpu(rinfo
->NumIdentPartitions
);
392 extra_size
+= (rinfo
->NumBlockTypes
- 1)
393 * sizeof(struct cfi_intelext_blockinfo
);
396 if (extp
->MinorVersion
>= '4')
397 extra_size
+= sizeof(struct cfi_intelext_programming_regioninfo
);
399 if (extp_size
< sizeof(*extp
) + extra_size
) {
401 extp_size
= sizeof(*extp
) + extra_size
;
403 if (extp_size
> 4096) {
405 "%s: cfi_pri_intelext is too fat\n",
416 struct mtd_info
*cfi_cmdset_0001(struct map_info
*map
, int primary
)
418 struct cfi_private
*cfi
= map
->fldrv_priv
;
419 struct mtd_info
*mtd
;
422 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
424 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
428 mtd
->type
= MTD_NORFLASH
;
430 /* Fill in the default mtd operations */
431 mtd
->erase
= cfi_intelext_erase_varsize
;
432 mtd
->read
= cfi_intelext_read
;
433 mtd
->write
= cfi_intelext_write_words
;
434 mtd
->sync
= cfi_intelext_sync
;
435 mtd
->lock
= cfi_intelext_lock
;
436 mtd
->unlock
= cfi_intelext_unlock
;
437 mtd
->suspend
= cfi_intelext_suspend
;
438 mtd
->resume
= cfi_intelext_resume
;
439 mtd
->flags
= MTD_CAP_NORFLASH
;
440 mtd
->name
= map
->name
;
443 mtd
->reboot_notifier
.notifier_call
= cfi_intelext_reboot
;
445 if (cfi
->cfi_mode
== CFI_MODE_CFI
) {
447 * It's a real CFI chip, not one for which the probe
448 * routine faked a CFI structure. So we read the feature
451 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
452 struct cfi_pri_intelext
*extp
;
454 extp
= read_pri_intelext(map
, adr
);
460 /* Install our own private info structure */
461 cfi
->cmdset_priv
= extp
;
463 cfi_fixup(mtd
, cfi_fixup_table
);
465 #ifdef DEBUG_CFI_FEATURES
466 /* Tell the user about it in lots of lovely detail */
467 cfi_tell_features(extp
);
470 if(extp
->SuspendCmdSupport
& 1) {
471 printk(KERN_NOTICE
"cfi_cmdset_0001: Erase suspend on write enabled\n");
474 else if (cfi
->cfi_mode
== CFI_MODE_JEDEC
) {
475 /* Apply jedec specific fixups */
476 cfi_fixup(mtd
, jedec_fixup_table
);
478 /* Apply generic fixups */
479 cfi_fixup(mtd
, fixup_table
);
481 for (i
=0; i
< cfi
->numchips
; i
++) {
482 if (cfi
->cfiq
->WordWriteTimeoutTyp
)
483 cfi
->chips
[i
].word_write_time
=
484 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
486 cfi
->chips
[i
].word_write_time
= 50000;
488 if (cfi
->cfiq
->BufWriteTimeoutTyp
)
489 cfi
->chips
[i
].buffer_write_time
=
490 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
491 /* No default; if it isn't specified, we won't use it */
493 if (cfi
->cfiq
->BlockEraseTimeoutTyp
)
494 cfi
->chips
[i
].erase_time
=
495 1000<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
497 cfi
->chips
[i
].erase_time
= 2000000;
499 if (cfi
->cfiq
->WordWriteTimeoutTyp
&&
500 cfi
->cfiq
->WordWriteTimeoutMax
)
501 cfi
->chips
[i
].word_write_time_max
=
502 1<<(cfi
->cfiq
->WordWriteTimeoutTyp
+
503 cfi
->cfiq
->WordWriteTimeoutMax
);
505 cfi
->chips
[i
].word_write_time_max
= 50000 * 8;
507 if (cfi
->cfiq
->BufWriteTimeoutTyp
&&
508 cfi
->cfiq
->BufWriteTimeoutMax
)
509 cfi
->chips
[i
].buffer_write_time_max
=
510 1<<(cfi
->cfiq
->BufWriteTimeoutTyp
+
511 cfi
->cfiq
->BufWriteTimeoutMax
);
513 if (cfi
->cfiq
->BlockEraseTimeoutTyp
&&
514 cfi
->cfiq
->BlockEraseTimeoutMax
)
515 cfi
->chips
[i
].erase_time_max
=
516 1000<<(cfi
->cfiq
->BlockEraseTimeoutTyp
+
517 cfi
->cfiq
->BlockEraseTimeoutMax
);
519 cfi
->chips
[i
].erase_time_max
= 2000000 * 8;
521 cfi
->chips
[i
].ref_point_counter
= 0;
522 init_waitqueue_head(&(cfi
->chips
[i
].wq
));
525 map
->fldrv
= &cfi_intelext_chipdrv
;
527 return cfi_intelext_setup(mtd
);
529 struct mtd_info
*cfi_cmdset_0003(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
530 struct mtd_info
*cfi_cmdset_0200(struct map_info
*map
, int primary
) __attribute__((alias("cfi_cmdset_0001")));
531 EXPORT_SYMBOL_GPL(cfi_cmdset_0001
);
532 EXPORT_SYMBOL_GPL(cfi_cmdset_0003
);
533 EXPORT_SYMBOL_GPL(cfi_cmdset_0200
);
535 static struct mtd_info
*cfi_intelext_setup(struct mtd_info
*mtd
)
537 struct map_info
*map
= mtd
->priv
;
538 struct cfi_private
*cfi
= map
->fldrv_priv
;
539 unsigned long offset
= 0;
541 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
543 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
545 mtd
->size
= devsize
* cfi
->numchips
;
547 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
548 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
549 * mtd
->numeraseregions
, GFP_KERNEL
);
550 if (!mtd
->eraseregions
) {
551 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
555 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
556 unsigned long ernum
, ersize
;
557 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
558 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
560 if (mtd
->erasesize
< ersize
) {
561 mtd
->erasesize
= ersize
;
563 for (j
=0; j
<cfi
->numchips
; j
++) {
564 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
565 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
566 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
567 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].lockmap
= kmalloc(ernum
/ 8 + 1, GFP_KERNEL
);
569 offset
+= (ersize
* ernum
);
572 if (offset
!= devsize
) {
574 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
578 for (i
=0; i
<mtd
->numeraseregions
;i
++){
579 printk(KERN_DEBUG
"erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
580 i
,(unsigned long long)mtd
->eraseregions
[i
].offset
,
581 mtd
->eraseregions
[i
].erasesize
,
582 mtd
->eraseregions
[i
].numblocks
);
585 #ifdef CONFIG_MTD_OTP
586 mtd
->read_fact_prot_reg
= cfi_intelext_read_fact_prot_reg
;
587 mtd
->read_user_prot_reg
= cfi_intelext_read_user_prot_reg
;
588 mtd
->write_user_prot_reg
= cfi_intelext_write_user_prot_reg
;
589 mtd
->lock_user_prot_reg
= cfi_intelext_lock_user_prot_reg
;
590 mtd
->get_fact_prot_info
= cfi_intelext_get_fact_prot_info
;
591 mtd
->get_user_prot_info
= cfi_intelext_get_user_prot_info
;
594 /* This function has the potential to distort the reality
595 a bit and therefore should be called last. */
596 if (cfi_intelext_partition_fixup(mtd
, &cfi
) != 0)
599 __module_get(THIS_MODULE
);
600 register_reboot_notifier(&mtd
->reboot_notifier
);
605 kfree(mtd
->eraseregions
);
608 kfree(cfi
->cmdset_priv
);
612 static int cfi_intelext_partition_fixup(struct mtd_info
*mtd
,
613 struct cfi_private
**pcfi
)
615 struct map_info
*map
= mtd
->priv
;
616 struct cfi_private
*cfi
= *pcfi
;
617 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
620 * Probing of multi-partition flash chips.
622 * To support multiple partitions when available, we simply arrange
623 * for each of them to have their own flchip structure even if they
624 * are on the same physical chip. This means completely recreating
625 * a new cfi_private structure right here which is a blatent code
626 * layering violation, but this is still the least intrusive
627 * arrangement at this point. This can be rearranged in the future
628 * if someone feels motivated enough. --nico
630 if (extp
&& extp
->MajorVersion
== '1' && extp
->MinorVersion
>= '3'
631 && extp
->FeatureSupport
& (1 << 9)) {
632 struct cfi_private
*newcfi
;
634 struct flchip_shared
*shared
;
635 int offs
, numregions
, numparts
, partshift
, numvirtchips
, i
, j
;
637 /* Protection Register info */
638 offs
= (extp
->NumProtectionFields
- 1) *
639 sizeof(struct cfi_intelext_otpinfo
);
641 /* Burst Read info */
642 offs
+= extp
->extra
[offs
+1]+2;
644 /* Number of partition regions */
645 numregions
= extp
->extra
[offs
];
648 /* skip the sizeof(partregion) field in CFI 1.4 */
649 if (extp
->MinorVersion
>= '4')
652 /* Number of hardware partitions */
654 for (i
= 0; i
< numregions
; i
++) {
655 struct cfi_intelext_regioninfo
*rinfo
;
656 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[offs
];
657 numparts
+= rinfo
->NumIdentPartitions
;
658 offs
+= sizeof(*rinfo
)
659 + (rinfo
->NumBlockTypes
- 1) *
660 sizeof(struct cfi_intelext_blockinfo
);
666 /* Programming Region info */
667 if (extp
->MinorVersion
>= '4') {
668 struct cfi_intelext_programming_regioninfo
*prinfo
;
669 prinfo
= (struct cfi_intelext_programming_regioninfo
*)&extp
->extra
[offs
];
670 mtd
->writesize
= cfi
->interleave
<< prinfo
->ProgRegShift
;
671 mtd
->flags
&= ~MTD_BIT_WRITEABLE
;
672 printk(KERN_DEBUG
"%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
673 map
->name
, mtd
->writesize
,
674 cfi
->interleave
* prinfo
->ControlValid
,
675 cfi
->interleave
* prinfo
->ControlInvalid
);
679 * All functions below currently rely on all chips having
680 * the same geometry so we'll just assume that all hardware
681 * partitions are of the same size too.
683 partshift
= cfi
->chipshift
- __ffs(numparts
);
685 if ((1 << partshift
) < mtd
->erasesize
) {
687 "%s: bad number of hw partitions (%d)\n",
692 numvirtchips
= cfi
->numchips
* numparts
;
693 newcfi
= kmalloc(sizeof(struct cfi_private
) + numvirtchips
* sizeof(struct flchip
), GFP_KERNEL
);
696 shared
= kmalloc(sizeof(struct flchip_shared
) * cfi
->numchips
, GFP_KERNEL
);
701 memcpy(newcfi
, cfi
, sizeof(struct cfi_private
));
702 newcfi
->numchips
= numvirtchips
;
703 newcfi
->chipshift
= partshift
;
705 chip
= &newcfi
->chips
[0];
706 for (i
= 0; i
< cfi
->numchips
; i
++) {
707 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
708 spin_lock_init(&shared
[i
].lock
);
709 for (j
= 0; j
< numparts
; j
++) {
710 *chip
= cfi
->chips
[i
];
711 chip
->start
+= j
<< partshift
;
712 chip
->priv
= &shared
[i
];
713 /* those should be reset too since
714 they create memory references. */
715 init_waitqueue_head(&chip
->wq
);
716 spin_lock_init(&chip
->_spinlock
);
717 chip
->mutex
= &chip
->_spinlock
;
722 printk(KERN_DEBUG
"%s: %d set(s) of %d interleaved chips "
723 "--> %d partitions of %d KiB\n",
724 map
->name
, cfi
->numchips
, cfi
->interleave
,
725 newcfi
->numchips
, 1<<(newcfi
->chipshift
-10));
727 map
->fldrv_priv
= newcfi
;
736 * *********** CHIP ACCESS FUNCTIONS ***********
738 static int chip_ready (struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
740 DECLARE_WAITQUEUE(wait
, current
);
741 struct cfi_private
*cfi
= map
->fldrv_priv
;
742 map_word status
, status_OK
= CMD(0x80), status_PWS
= CMD(0x01);
743 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
744 unsigned long timeo
= jiffies
+ HZ
;
746 /* Prevent setting state FL_SYNCING for chip in suspended state. */
747 if (mode
== FL_SYNCING
&& chip
->oldstate
!= FL_READY
)
750 switch (chip
->state
) {
754 status
= map_read(map
, adr
);
755 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
758 /* At this point we're fine with write operations
759 in other partitions as they don't conflict. */
760 if (chip
->priv
&& map_word_andequal(map
, status
, status_PWS
, status_PWS
))
763 spin_unlock(chip
->mutex
);
765 spin_lock(chip
->mutex
);
766 /* Someone else might have been playing with it. */
777 !(cfip
->FeatureSupport
& 2) ||
778 !(mode
== FL_READY
|| mode
== FL_POINT
||
779 (mode
== FL_WRITING
&& (cfip
->SuspendCmdSupport
& 1))))
784 map_write(map
, CMD(0xB0), adr
);
786 /* If the flash has finished erasing, then 'erase suspend'
787 * appears to make some (28F320) flash devices switch to
788 * 'read' mode. Make sure that we switch to 'read status'
789 * mode so we get the right data. --rmk
791 map_write(map
, CMD(0x70), adr
);
792 chip
->oldstate
= FL_ERASING
;
793 chip
->state
= FL_ERASE_SUSPENDING
;
794 chip
->erase_suspended
= 1;
796 status
= map_read(map
, adr
);
797 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
800 if (time_after(jiffies
, timeo
)) {
801 /* Urgh. Resume and pretend we weren't here. */
802 map_write(map
, CMD(0xd0), adr
);
803 /* Make sure we're in 'read status' mode if it had finished */
804 map_write(map
, CMD(0x70), adr
);
805 chip
->state
= FL_ERASING
;
806 chip
->oldstate
= FL_READY
;
807 printk(KERN_ERR
"%s: Chip not ready after erase "
808 "suspended: status = 0x%lx\n", map
->name
, status
.x
[0]);
812 spin_unlock(chip
->mutex
);
814 spin_lock(chip
->mutex
);
815 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
816 So we can just loop here. */
818 chip
->state
= FL_STATUS
;
821 case FL_XIP_WHILE_ERASING
:
822 if (mode
!= FL_READY
&& mode
!= FL_POINT
&&
823 (mode
!= FL_WRITING
|| !cfip
|| !(cfip
->SuspendCmdSupport
&1)))
825 chip
->oldstate
= chip
->state
;
826 chip
->state
= FL_READY
;
830 /* The machine is rebooting now,so no one can get chip anymore */
833 /* Only if there's no operation suspended... */
834 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
839 set_current_state(TASK_UNINTERRUPTIBLE
);
840 add_wait_queue(&chip
->wq
, &wait
);
841 spin_unlock(chip
->mutex
);
843 remove_wait_queue(&chip
->wq
, &wait
);
844 spin_lock(chip
->mutex
);
849 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
852 DECLARE_WAITQUEUE(wait
, current
);
856 (mode
== FL_WRITING
|| mode
== FL_ERASING
|| mode
== FL_OTP_WRITE
857 || mode
== FL_SHUTDOWN
) && chip
->state
!= FL_SYNCING
) {
859 * OK. We have possibility for contention on the write/erase
860 * operations which are global to the real chip and not per
861 * partition. So let's fight it over in the partition which
862 * currently has authority on the operation.
864 * The rules are as follows:
866 * - any write operation must own shared->writing.
868 * - any erase operation must own _both_ shared->writing and
871 * - contention arbitration is handled in the owner's context.
873 * The 'shared' struct can be read and/or written only when
876 struct flchip_shared
*shared
= chip
->priv
;
877 struct flchip
*contender
;
878 spin_lock(&shared
->lock
);
879 contender
= shared
->writing
;
880 if (contender
&& contender
!= chip
) {
882 * The engine to perform desired operation on this
883 * partition is already in use by someone else.
884 * Let's fight over it in the context of the chip
885 * currently using it. If it is possible to suspend,
886 * that other partition will do just that, otherwise
887 * it'll happily send us to sleep. In any case, when
888 * get_chip returns success we're clear to go ahead.
890 ret
= spin_trylock(contender
->mutex
);
891 spin_unlock(&shared
->lock
);
894 spin_unlock(chip
->mutex
);
895 ret
= chip_ready(map
, contender
, contender
->start
, mode
);
896 spin_lock(chip
->mutex
);
898 if (ret
== -EAGAIN
) {
899 spin_unlock(contender
->mutex
);
903 spin_unlock(contender
->mutex
);
906 spin_lock(&shared
->lock
);
908 /* We should not own chip if it is already
909 * in FL_SYNCING state. Put contender and retry. */
910 if (chip
->state
== FL_SYNCING
) {
911 put_chip(map
, contender
, contender
->start
);
912 spin_unlock(contender
->mutex
);
915 spin_unlock(contender
->mutex
);
918 /* Check if we already have suspended erase
919 * on this chip. Sleep. */
920 if (mode
== FL_ERASING
&& shared
->erasing
921 && shared
->erasing
->oldstate
== FL_ERASING
) {
922 spin_unlock(&shared
->lock
);
923 set_current_state(TASK_UNINTERRUPTIBLE
);
924 add_wait_queue(&chip
->wq
, &wait
);
925 spin_unlock(chip
->mutex
);
927 remove_wait_queue(&chip
->wq
, &wait
);
928 spin_lock(chip
->mutex
);
933 shared
->writing
= chip
;
934 if (mode
== FL_ERASING
)
935 shared
->erasing
= chip
;
936 spin_unlock(&shared
->lock
);
938 ret
= chip_ready(map
, chip
, adr
, mode
);
945 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
947 struct cfi_private
*cfi
= map
->fldrv_priv
;
950 struct flchip_shared
*shared
= chip
->priv
;
951 spin_lock(&shared
->lock
);
952 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
953 /* We own the ability to write, but we're done */
954 shared
->writing
= shared
->erasing
;
955 if (shared
->writing
&& shared
->writing
!= chip
) {
956 /* give back ownership to who we loaned it from */
957 struct flchip
*loaner
= shared
->writing
;
958 spin_lock(loaner
->mutex
);
959 spin_unlock(&shared
->lock
);
960 spin_unlock(chip
->mutex
);
961 put_chip(map
, loaner
, loaner
->start
);
962 spin_lock(chip
->mutex
);
963 spin_unlock(loaner
->mutex
);
967 shared
->erasing
= NULL
;
968 shared
->writing
= NULL
;
969 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
971 * We own the ability to erase without the ability
972 * to write, which means the erase was suspended
973 * and some other partition is currently writing.
974 * Don't let the switch below mess things up since
975 * we don't have ownership to resume anything.
977 spin_unlock(&shared
->lock
);
981 spin_unlock(&shared
->lock
);
984 switch(chip
->oldstate
) {
986 chip
->state
= chip
->oldstate
;
987 /* What if one interleaved chip has finished and the
988 other hasn't? The old code would leave the finished
989 one in READY mode. That's bad, and caused -EROFS
990 errors to be returned from do_erase_oneblock because
991 that's the only bit it checked for at the time.
992 As the state machine appears to explicitly allow
993 sending the 0x70 (Read Status) command to an erasing
994 chip and expecting it to be ignored, that's what we
996 map_write(map
, CMD(0xd0), adr
);
997 map_write(map
, CMD(0x70), adr
);
998 chip
->oldstate
= FL_READY
;
999 chip
->state
= FL_ERASING
;
1002 case FL_XIP_WHILE_ERASING
:
1003 chip
->state
= chip
->oldstate
;
1004 chip
->oldstate
= FL_READY
;
1009 case FL_JEDEC_QUERY
:
1010 /* We should really make set_vpp() count, rather than doing this */
1014 printk(KERN_ERR
"%s: put_chip() called with oldstate %d!!\n", map
->name
, chip
->oldstate
);
1019 #ifdef CONFIG_MTD_XIP
1022 * No interrupt what so ever can be serviced while the flash isn't in array
1023 * mode. This is ensured by the xip_disable() and xip_enable() functions
1024 * enclosing any code path where the flash is known not to be in array mode.
1025 * And within a XIP disabled code path, only functions marked with __xipram
1026 * may be called and nothing else (it's a good thing to inspect generated
1027 * assembly to make sure inline functions were actually inlined and that gcc
1028 * didn't emit calls to its own support functions). Also configuring MTD CFI
1029 * support to a single buswidth and a single interleave is also recommended.
1032 static void xip_disable(struct map_info
*map
, struct flchip
*chip
,
1035 /* TODO: chips with no XIP use should ignore and return */
1036 (void) map_read(map
, adr
); /* ensure mmu mapping is up to date */
1037 local_irq_disable();
1040 static void __xipram
xip_enable(struct map_info
*map
, struct flchip
*chip
,
1043 struct cfi_private
*cfi
= map
->fldrv_priv
;
1044 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
1045 map_write(map
, CMD(0xff), adr
);
1046 chip
->state
= FL_READY
;
1048 (void) map_read(map
, adr
);
1054 * When a delay is required for the flash operation to complete, the
1055 * xip_wait_for_operation() function is polling for both the given timeout
1056 * and pending (but still masked) hardware interrupts. Whenever there is an
1057 * interrupt pending then the flash erase or write operation is suspended,
1058 * array mode restored and interrupts unmasked. Task scheduling might also
1059 * happen at that point. The CPU eventually returns from the interrupt or
1060 * the call to schedule() and the suspended flash operation is resumed for
1061 * the remaining of the delay period.
1063 * Warning: this function _will_ fool interrupt latency tracing tools.
1066 static int __xipram
xip_wait_for_operation(
1067 struct map_info
*map
, struct flchip
*chip
,
1068 unsigned long adr
, unsigned int chip_op_time_max
)
1070 struct cfi_private
*cfi
= map
->fldrv_priv
;
1071 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
1072 map_word status
, OK
= CMD(0x80);
1073 unsigned long usec
, suspended
, start
, done
;
1074 flstate_t oldstate
, newstate
;
1076 start
= xip_currtime();
1077 usec
= chip_op_time_max
;
1084 if (xip_irqpending() && cfip
&&
1085 ((chip
->state
== FL_ERASING
&& (cfip
->FeatureSupport
&2)) ||
1086 (chip
->state
== FL_WRITING
&& (cfip
->FeatureSupport
&4))) &&
1087 (cfi_interleave_is_1(cfi
) || chip
->oldstate
== FL_READY
)) {
1089 * Let's suspend the erase or write operation when
1090 * supported. Note that we currently don't try to
1091 * suspend interleaved chips if there is already
1092 * another operation suspended (imagine what happens
1093 * when one chip was already done with the current
1094 * operation while another chip suspended it, then
1095 * we resume the whole thing at once). Yes, it
1099 map_write(map
, CMD(0xb0), adr
);
1100 map_write(map
, CMD(0x70), adr
);
1101 suspended
= xip_currtime();
1103 if (xip_elapsed_since(suspended
) > 100000) {
1105 * The chip doesn't want to suspend
1106 * after waiting for 100 msecs.
1107 * This is a critical error but there
1108 * is not much we can do here.
1112 status
= map_read(map
, adr
);
1113 } while (!map_word_andequal(map
, status
, OK
, OK
));
1115 /* Suspend succeeded */
1116 oldstate
= chip
->state
;
1117 if (oldstate
== FL_ERASING
) {
1118 if (!map_word_bitsset(map
, status
, CMD(0x40)))
1120 newstate
= FL_XIP_WHILE_ERASING
;
1121 chip
->erase_suspended
= 1;
1123 if (!map_word_bitsset(map
, status
, CMD(0x04)))
1125 newstate
= FL_XIP_WHILE_WRITING
;
1126 chip
->write_suspended
= 1;
1128 chip
->state
= newstate
;
1129 map_write(map
, CMD(0xff), adr
);
1130 (void) map_read(map
, adr
);
1133 spin_unlock(chip
->mutex
);
1138 * We're back. However someone else might have
1139 * decided to go write to the chip if we are in
1140 * a suspended erase state. If so let's wait
1143 spin_lock(chip
->mutex
);
1144 while (chip
->state
!= newstate
) {
1145 DECLARE_WAITQUEUE(wait
, current
);
1146 set_current_state(TASK_UNINTERRUPTIBLE
);
1147 add_wait_queue(&chip
->wq
, &wait
);
1148 spin_unlock(chip
->mutex
);
1150 remove_wait_queue(&chip
->wq
, &wait
);
1151 spin_lock(chip
->mutex
);
1153 /* Disallow XIP again */
1154 local_irq_disable();
1156 /* Resume the write or erase operation */
1157 map_write(map
, CMD(0xd0), adr
);
1158 map_write(map
, CMD(0x70), adr
);
1159 chip
->state
= oldstate
;
1160 start
= xip_currtime();
1161 } else if (usec
>= 1000000/HZ
) {
1163 * Try to save on CPU power when waiting delay
1164 * is at least a system timer tick period.
1165 * No need to be extremely accurate here.
1169 status
= map_read(map
, adr
);
1170 done
= xip_elapsed_since(start
);
1171 } while (!map_word_andequal(map
, status
, OK
, OK
)
1174 return (done
>= usec
) ? -ETIME
: 0;
1178 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1179 * the flash is actively programming or erasing since we have to poll for
1180 * the operation to complete anyway. We can't do that in a generic way with
1181 * a XIP setup so do it before the actual flash operation in this case
1182 * and stub it out from INVAL_CACHE_AND_WAIT.
1184 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1185 INVALIDATE_CACHED_RANGE(map, from, size)
1187 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1188 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1192 #define xip_disable(map, chip, adr)
1193 #define xip_enable(map, chip, adr)
1194 #define XIP_INVAL_CACHED_RANGE(x...)
1195 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1197 static int inval_cache_and_wait_for_operation(
1198 struct map_info
*map
, struct flchip
*chip
,
1199 unsigned long cmd_adr
, unsigned long inval_adr
, int inval_len
,
1200 unsigned int chip_op_time
, unsigned int chip_op_time_max
)
1202 struct cfi_private
*cfi
= map
->fldrv_priv
;
1203 map_word status
, status_OK
= CMD(0x80);
1204 int chip_state
= chip
->state
;
1205 unsigned int timeo
, sleep_time
, reset_timeo
;
1207 spin_unlock(chip
->mutex
);
1209 INVALIDATE_CACHED_RANGE(map
, inval_adr
, inval_len
);
1210 spin_lock(chip
->mutex
);
1212 timeo
= chip_op_time_max
;
1215 reset_timeo
= timeo
;
1216 sleep_time
= chip_op_time
/ 2;
1219 status
= map_read(map
, cmd_adr
);
1220 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1224 map_write(map
, CMD(0x70), cmd_adr
);
1225 chip
->state
= FL_STATUS
;
1229 /* OK Still waiting. Drop the lock, wait a while and retry. */
1230 spin_unlock(chip
->mutex
);
1231 if (sleep_time
>= 1000000/HZ
) {
1233 * Half of the normal delay still remaining
1234 * can be performed with a sleeping delay instead
1237 msleep(sleep_time
/1000);
1238 timeo
-= sleep_time
;
1239 sleep_time
= 1000000/HZ
;
1245 spin_lock(chip
->mutex
);
1247 while (chip
->state
!= chip_state
) {
1248 /* Someone's suspended the operation: sleep */
1249 DECLARE_WAITQUEUE(wait
, current
);
1250 set_current_state(TASK_UNINTERRUPTIBLE
);
1251 add_wait_queue(&chip
->wq
, &wait
);
1252 spin_unlock(chip
->mutex
);
1254 remove_wait_queue(&chip
->wq
, &wait
);
1255 spin_lock(chip
->mutex
);
1257 if (chip
->erase_suspended
&& chip_state
== FL_ERASING
) {
1258 /* Erase suspend occured while sleep: reset timeout */
1259 timeo
= reset_timeo
;
1260 chip
->erase_suspended
= 0;
1262 if (chip
->write_suspended
&& chip_state
== FL_WRITING
) {
1263 /* Write suspend occured while sleep: reset timeout */
1264 timeo
= reset_timeo
;
1265 chip
->write_suspended
= 0;
1269 /* Done and happy. */
1270 chip
->state
= FL_STATUS
;
1276 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1277 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1280 static int do_point_onechip (struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
)
1282 unsigned long cmd_addr
;
1283 struct cfi_private
*cfi
= map
->fldrv_priv
;
1288 /* Ensure cmd read/writes are aligned. */
1289 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1291 spin_lock(chip
->mutex
);
1293 ret
= get_chip(map
, chip
, cmd_addr
, FL_POINT
);
1296 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
)
1297 map_write(map
, CMD(0xff), cmd_addr
);
1299 chip
->state
= FL_POINT
;
1300 chip
->ref_point_counter
++;
1302 spin_unlock(chip
->mutex
);
1307 static int cfi_intelext_point(struct mtd_info
*mtd
, loff_t from
, size_t len
,
1308 size_t *retlen
, void **virt
, resource_size_t
*phys
)
1310 struct map_info
*map
= mtd
->priv
;
1311 struct cfi_private
*cfi
= map
->fldrv_priv
;
1312 unsigned long ofs
, last_end
= 0;
1316 if (!map
->virt
|| (from
+ len
> mtd
->size
))
1319 /* Now lock the chip(s) to POINT state */
1321 /* ofs: offset within the first chip that the first read should start */
1322 chipnum
= (from
>> cfi
->chipshift
);
1323 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1325 *virt
= map
->virt
+ cfi
->chips
[chipnum
].start
+ ofs
;
1328 *phys
= map
->phys
+ cfi
->chips
[chipnum
].start
+ ofs
;
1331 unsigned long thislen
;
1333 if (chipnum
>= cfi
->numchips
)
1336 /* We cannot point across chips that are virtually disjoint */
1338 last_end
= cfi
->chips
[chipnum
].start
;
1339 else if (cfi
->chips
[chipnum
].start
!= last_end
)
1342 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1343 thislen
= (1<<cfi
->chipshift
) - ofs
;
1347 ret
= do_point_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
);
1355 last_end
+= 1 << cfi
->chipshift
;
1361 static void cfi_intelext_unpoint(struct mtd_info
*mtd
, loff_t from
, size_t len
)
1363 struct map_info
*map
= mtd
->priv
;
1364 struct cfi_private
*cfi
= map
->fldrv_priv
;
1368 /* Now unlock the chip(s) POINT state */
1370 /* ofs: offset within the first chip that the first read should start */
1371 chipnum
= (from
>> cfi
->chipshift
);
1372 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1375 unsigned long thislen
;
1376 struct flchip
*chip
;
1378 chip
= &cfi
->chips
[chipnum
];
1379 if (chipnum
>= cfi
->numchips
)
1382 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1383 thislen
= (1<<cfi
->chipshift
) - ofs
;
1387 spin_lock(chip
->mutex
);
1388 if (chip
->state
== FL_POINT
) {
1389 chip
->ref_point_counter
--;
1390 if(chip
->ref_point_counter
== 0)
1391 chip
->state
= FL_READY
;
1393 printk(KERN_ERR
"%s: Warning: unpoint called on non pointed region\n", map
->name
); /* Should this give an error? */
1395 put_chip(map
, chip
, chip
->start
);
1396 spin_unlock(chip
->mutex
);
1404 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
1406 unsigned long cmd_addr
;
1407 struct cfi_private
*cfi
= map
->fldrv_priv
;
1412 /* Ensure cmd read/writes are aligned. */
1413 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1415 spin_lock(chip
->mutex
);
1416 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
1418 spin_unlock(chip
->mutex
);
1422 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
1423 map_write(map
, CMD(0xff), cmd_addr
);
1425 chip
->state
= FL_READY
;
1428 map_copy_from(map
, buf
, adr
, len
);
1430 put_chip(map
, chip
, cmd_addr
);
1432 spin_unlock(chip
->mutex
);
1436 static int cfi_intelext_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1438 struct map_info
*map
= mtd
->priv
;
1439 struct cfi_private
*cfi
= map
->fldrv_priv
;
1444 /* ofs: offset within the first chip that the first read should start */
1445 chipnum
= (from
>> cfi
->chipshift
);
1446 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1451 unsigned long thislen
;
1453 if (chipnum
>= cfi
->numchips
)
1456 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1457 thislen
= (1<<cfi
->chipshift
) - ofs
;
1461 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1475 static int __xipram
do_write_oneword(struct map_info
*map
, struct flchip
*chip
,
1476 unsigned long adr
, map_word datum
, int mode
)
1478 struct cfi_private
*cfi
= map
->fldrv_priv
;
1479 map_word status
, write_cmd
;
1486 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0x40) : CMD(0x41);
1489 write_cmd
= CMD(0xc0);
1495 spin_lock(chip
->mutex
);
1496 ret
= get_chip(map
, chip
, adr
, mode
);
1498 spin_unlock(chip
->mutex
);
1502 XIP_INVAL_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1504 xip_disable(map
, chip
, adr
);
1505 map_write(map
, write_cmd
, adr
);
1506 map_write(map
, datum
, adr
);
1509 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1510 adr
, map_bankwidth(map
),
1511 chip
->word_write_time
,
1512 chip
->word_write_time_max
);
1514 xip_enable(map
, chip
, adr
);
1515 printk(KERN_ERR
"%s: word write error (status timeout)\n", map
->name
);
1519 /* check for errors */
1520 status
= map_read(map
, adr
);
1521 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1522 unsigned long chipstatus
= MERGESTATUS(status
);
1525 map_write(map
, CMD(0x50), adr
);
1526 map_write(map
, CMD(0x70), adr
);
1527 xip_enable(map
, chip
, adr
);
1529 if (chipstatus
& 0x02) {
1531 } else if (chipstatus
& 0x08) {
1532 printk(KERN_ERR
"%s: word write error (bad VPP)\n", map
->name
);
1535 printk(KERN_ERR
"%s: word write error (status 0x%lx)\n", map
->name
, chipstatus
);
1542 xip_enable(map
, chip
, adr
);
1543 out
: put_chip(map
, chip
, adr
);
1544 spin_unlock(chip
->mutex
);
1549 static int cfi_intelext_write_words (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
1551 struct map_info
*map
= mtd
->priv
;
1552 struct cfi_private
*cfi
= map
->fldrv_priv
;
1561 chipnum
= to
>> cfi
->chipshift
;
1562 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1564 /* If it's not bus-aligned, do the first byte write */
1565 if (ofs
& (map_bankwidth(map
)-1)) {
1566 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1567 int gap
= ofs
- bus_ofs
;
1571 n
= min_t(int, len
, map_bankwidth(map
)-gap
);
1572 datum
= map_word_ff(map
);
1573 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
1575 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1576 bus_ofs
, datum
, FL_WRITING
);
1585 if (ofs
>> cfi
->chipshift
) {
1588 if (chipnum
== cfi
->numchips
)
1593 while(len
>= map_bankwidth(map
)) {
1594 map_word datum
= map_word_load(map
, buf
);
1596 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1597 ofs
, datum
, FL_WRITING
);
1601 ofs
+= map_bankwidth(map
);
1602 buf
+= map_bankwidth(map
);
1603 (*retlen
) += map_bankwidth(map
);
1604 len
-= map_bankwidth(map
);
1606 if (ofs
>> cfi
->chipshift
) {
1609 if (chipnum
== cfi
->numchips
)
1614 if (len
& (map_bankwidth(map
)-1)) {
1617 datum
= map_word_ff(map
);
1618 datum
= map_word_load_partial(map
, datum
, buf
, 0, len
);
1620 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1621 ofs
, datum
, FL_WRITING
);
1632 static int __xipram
do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1633 unsigned long adr
, const struct kvec
**pvec
,
1634 unsigned long *pvec_seek
, int len
)
1636 struct cfi_private
*cfi
= map
->fldrv_priv
;
1637 map_word status
, write_cmd
, datum
;
1638 unsigned long cmd_adr
;
1639 int ret
, wbufsize
, word_gap
, words
;
1640 const struct kvec
*vec
;
1641 unsigned long vec_seek
;
1642 unsigned long initial_adr
;
1643 int initial_len
= len
;
1645 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1648 cmd_adr
= adr
& ~(wbufsize
-1);
1650 /* Let's determine this according to the interleave only once */
1651 write_cmd
= (cfi
->cfiq
->P_ID
!= 0x0200) ? CMD(0xe8) : CMD(0xe9);
1653 spin_lock(chip
->mutex
);
1654 ret
= get_chip(map
, chip
, cmd_adr
, FL_WRITING
);
1656 spin_unlock(chip
->mutex
);
1660 XIP_INVAL_CACHED_RANGE(map
, initial_adr
, initial_len
);
1662 xip_disable(map
, chip
, cmd_adr
);
1664 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1665 [...], the device will not accept any more Write to Buffer commands".
1666 So we must check here and reset those bits if they're set. Otherwise
1667 we're just pissing in the wind */
1668 if (chip
->state
!= FL_STATUS
) {
1669 map_write(map
, CMD(0x70), cmd_adr
);
1670 chip
->state
= FL_STATUS
;
1672 status
= map_read(map
, cmd_adr
);
1673 if (map_word_bitsset(map
, status
, CMD(0x30))) {
1674 xip_enable(map
, chip
, cmd_adr
);
1675 printk(KERN_WARNING
"SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status
.x
[0]);
1676 xip_disable(map
, chip
, cmd_adr
);
1677 map_write(map
, CMD(0x50), cmd_adr
);
1678 map_write(map
, CMD(0x70), cmd_adr
);
1681 chip
->state
= FL_WRITING_TO_BUFFER
;
1682 map_write(map
, write_cmd
, cmd_adr
);
1683 ret
= WAIT_TIMEOUT(map
, chip
, cmd_adr
, 0, 0);
1685 /* Argh. Not ready for write to buffer */
1686 map_word Xstatus
= map_read(map
, cmd_adr
);
1687 map_write(map
, CMD(0x70), cmd_adr
);
1688 chip
->state
= FL_STATUS
;
1689 status
= map_read(map
, cmd_adr
);
1690 map_write(map
, CMD(0x50), cmd_adr
);
1691 map_write(map
, CMD(0x70), cmd_adr
);
1692 xip_enable(map
, chip
, cmd_adr
);
1693 printk(KERN_ERR
"%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1694 map
->name
, Xstatus
.x
[0], status
.x
[0]);
1698 /* Figure out the number of words to write */
1699 word_gap
= (-adr
& (map_bankwidth(map
)-1));
1700 words
= DIV_ROUND_UP(len
- word_gap
, map_bankwidth(map
));
1704 word_gap
= map_bankwidth(map
) - word_gap
;
1706 datum
= map_word_ff(map
);
1709 /* Write length of data to come */
1710 map_write(map
, CMD(words
), cmd_adr
);
1714 vec_seek
= *pvec_seek
;
1716 int n
= map_bankwidth(map
) - word_gap
;
1717 if (n
> vec
->iov_len
- vec_seek
)
1718 n
= vec
->iov_len
- vec_seek
;
1722 if (!word_gap
&& len
< map_bankwidth(map
))
1723 datum
= map_word_ff(map
);
1725 datum
= map_word_load_partial(map
, datum
,
1726 vec
->iov_base
+ vec_seek
,
1731 if (!len
|| word_gap
== map_bankwidth(map
)) {
1732 map_write(map
, datum
, adr
);
1733 adr
+= map_bankwidth(map
);
1738 if (vec_seek
== vec
->iov_len
) {
1744 *pvec_seek
= vec_seek
;
1747 map_write(map
, CMD(0xd0), cmd_adr
);
1748 chip
->state
= FL_WRITING
;
1750 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, cmd_adr
,
1751 initial_adr
, initial_len
,
1752 chip
->buffer_write_time
,
1753 chip
->buffer_write_time_max
);
1755 map_write(map
, CMD(0x70), cmd_adr
);
1756 chip
->state
= FL_STATUS
;
1757 xip_enable(map
, chip
, cmd_adr
);
1758 printk(KERN_ERR
"%s: buffer write error (status timeout)\n", map
->name
);
1762 /* check for errors */
1763 status
= map_read(map
, cmd_adr
);
1764 if (map_word_bitsset(map
, status
, CMD(0x1a))) {
1765 unsigned long chipstatus
= MERGESTATUS(status
);
1768 map_write(map
, CMD(0x50), cmd_adr
);
1769 map_write(map
, CMD(0x70), cmd_adr
);
1770 xip_enable(map
, chip
, cmd_adr
);
1772 if (chipstatus
& 0x02) {
1774 } else if (chipstatus
& 0x08) {
1775 printk(KERN_ERR
"%s: buffer write error (bad VPP)\n", map
->name
);
1778 printk(KERN_ERR
"%s: buffer write error (status 0x%lx)\n", map
->name
, chipstatus
);
1785 xip_enable(map
, chip
, cmd_adr
);
1786 out
: put_chip(map
, chip
, cmd_adr
);
1787 spin_unlock(chip
->mutex
);
1791 static int cfi_intelext_writev (struct mtd_info
*mtd
, const struct kvec
*vecs
,
1792 unsigned long count
, loff_t to
, size_t *retlen
)
1794 struct map_info
*map
= mtd
->priv
;
1795 struct cfi_private
*cfi
= map
->fldrv_priv
;
1796 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1799 unsigned long ofs
, vec_seek
, i
;
1802 for (i
= 0; i
< count
; i
++)
1803 len
+= vecs
[i
].iov_len
;
1809 chipnum
= to
>> cfi
->chipshift
;
1810 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1814 /* We must not cross write block boundaries */
1815 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1819 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1820 ofs
, &vecs
, &vec_seek
, size
);
1828 if (ofs
>> cfi
->chipshift
) {
1831 if (chipnum
== cfi
->numchips
)
1835 /* Be nice and reschedule with the chip in a usable state for other
1844 static int cfi_intelext_write_buffers (struct mtd_info
*mtd
, loff_t to
,
1845 size_t len
, size_t *retlen
, const u_char
*buf
)
1849 vec
.iov_base
= (void *) buf
;
1852 return cfi_intelext_writev(mtd
, &vec
, 1, to
, retlen
);
1855 static int __xipram
do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
,
1856 unsigned long adr
, int len
, void *thunk
)
1858 struct cfi_private
*cfi
= map
->fldrv_priv
;
1866 spin_lock(chip
->mutex
);
1867 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1869 spin_unlock(chip
->mutex
);
1873 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1875 xip_disable(map
, chip
, adr
);
1877 /* Clear the status register first */
1878 map_write(map
, CMD(0x50), adr
);
1881 map_write(map
, CMD(0x20), adr
);
1882 map_write(map
, CMD(0xD0), adr
);
1883 chip
->state
= FL_ERASING
;
1884 chip
->erase_suspended
= 0;
1886 ret
= INVAL_CACHE_AND_WAIT(map
, chip
, adr
,
1889 chip
->erase_time_max
);
1891 map_write(map
, CMD(0x70), adr
);
1892 chip
->state
= FL_STATUS
;
1893 xip_enable(map
, chip
, adr
);
1894 printk(KERN_ERR
"%s: block erase error: (status timeout)\n", map
->name
);
1898 /* We've broken this before. It doesn't hurt to be safe */
1899 map_write(map
, CMD(0x70), adr
);
1900 chip
->state
= FL_STATUS
;
1901 status
= map_read(map
, adr
);
1903 /* check for errors */
1904 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
1905 unsigned long chipstatus
= MERGESTATUS(status
);
1907 /* Reset the error bits */
1908 map_write(map
, CMD(0x50), adr
);
1909 map_write(map
, CMD(0x70), adr
);
1910 xip_enable(map
, chip
, adr
);
1912 if ((chipstatus
& 0x30) == 0x30) {
1913 printk(KERN_ERR
"%s: block erase error: (bad command sequence, status 0x%lx)\n", map
->name
, chipstatus
);
1915 } else if (chipstatus
& 0x02) {
1916 /* Protection bit set */
1918 } else if (chipstatus
& 0x8) {
1920 printk(KERN_ERR
"%s: block erase error: (bad VPP)\n", map
->name
);
1922 } else if (chipstatus
& 0x20 && retries
--) {
1923 printk(KERN_DEBUG
"block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr
, chipstatus
);
1924 put_chip(map
, chip
, adr
);
1925 spin_unlock(chip
->mutex
);
1928 printk(KERN_ERR
"%s: block erase failed at 0x%08lx (status 0x%lx)\n", map
->name
, adr
, chipstatus
);
1935 xip_enable(map
, chip
, adr
);
1936 out
: put_chip(map
, chip
, adr
);
1937 spin_unlock(chip
->mutex
);
1941 static int cfi_intelext_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1943 unsigned long ofs
, len
;
1949 ret
= cfi_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1953 instr
->state
= MTD_ERASE_DONE
;
1954 mtd_erase_callback(instr
);
1959 static void cfi_intelext_sync (struct mtd_info
*mtd
)
1961 struct map_info
*map
= mtd
->priv
;
1962 struct cfi_private
*cfi
= map
->fldrv_priv
;
1964 struct flchip
*chip
;
1967 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1968 chip
= &cfi
->chips
[i
];
1970 spin_lock(chip
->mutex
);
1971 ret
= get_chip(map
, chip
, chip
->start
, FL_SYNCING
);
1974 chip
->oldstate
= chip
->state
;
1975 chip
->state
= FL_SYNCING
;
1976 /* No need to wake_up() on this state change -
1977 * as the whole point is that nobody can do anything
1978 * with the chip now anyway.
1981 spin_unlock(chip
->mutex
);
1984 /* Unlock the chips again */
1986 for (i
--; i
>=0; i
--) {
1987 chip
= &cfi
->chips
[i
];
1989 spin_lock(chip
->mutex
);
1991 if (chip
->state
== FL_SYNCING
) {
1992 chip
->state
= chip
->oldstate
;
1993 chip
->oldstate
= FL_READY
;
1996 spin_unlock(chip
->mutex
);
2000 static int __xipram
do_getlockstatus_oneblock(struct map_info
*map
,
2001 struct flchip
*chip
,
2003 int len
, void *thunk
)
2005 struct cfi_private
*cfi
= map
->fldrv_priv
;
2006 int status
, ofs_factor
= cfi
->interleave
* cfi
->device_type
;
2009 xip_disable(map
, chip
, adr
+(2*ofs_factor
));
2010 map_write(map
, CMD(0x90), adr
+(2*ofs_factor
));
2011 chip
->state
= FL_JEDEC_QUERY
;
2012 status
= cfi_read_query(map
, adr
+(2*ofs_factor
));
2013 xip_enable(map
, chip
, 0);
2017 #ifdef DEBUG_LOCK_BITS
2018 static int __xipram
do_printlockstatus_oneblock(struct map_info
*map
,
2019 struct flchip
*chip
,
2021 int len
, void *thunk
)
2023 printk(KERN_DEBUG
"block status register for 0x%08lx is %x\n",
2024 adr
, do_getlockstatus_oneblock(map
, chip
, adr
, len
, thunk
));
2029 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2030 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2032 static int __xipram
do_xxlock_oneblock(struct map_info
*map
, struct flchip
*chip
,
2033 unsigned long adr
, int len
, void *thunk
)
2035 struct cfi_private
*cfi
= map
->fldrv_priv
;
2036 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2042 spin_lock(chip
->mutex
);
2043 ret
= get_chip(map
, chip
, adr
, FL_LOCKING
);
2045 spin_unlock(chip
->mutex
);
2050 xip_disable(map
, chip
, adr
);
2052 map_write(map
, CMD(0x60), adr
);
2053 if (thunk
== DO_XXLOCK_ONEBLOCK_LOCK
) {
2054 map_write(map
, CMD(0x01), adr
);
2055 chip
->state
= FL_LOCKING
;
2056 } else if (thunk
== DO_XXLOCK_ONEBLOCK_UNLOCK
) {
2057 map_write(map
, CMD(0xD0), adr
);
2058 chip
->state
= FL_UNLOCKING
;
2063 * If Instant Individual Block Locking supported then no need
2066 udelay
= (!extp
|| !(extp
->FeatureSupport
& (1 << 5))) ? 1000000/HZ
: 0;
2068 ret
= WAIT_TIMEOUT(map
, chip
, adr
, udelay
, udelay
* 100);
2070 map_write(map
, CMD(0x70), adr
);
2071 chip
->state
= FL_STATUS
;
2072 xip_enable(map
, chip
, adr
);
2073 printk(KERN_ERR
"%s: block unlock error: (status timeout)\n", map
->name
);
2077 xip_enable(map
, chip
, adr
);
2078 out
: put_chip(map
, chip
, adr
);
2079 spin_unlock(chip
->mutex
);
2083 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2087 #ifdef DEBUG_LOCK_BITS
2088 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2089 __func__
, ofs
, len
);
2090 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2094 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
2095 ofs
, len
, DO_XXLOCK_ONEBLOCK_LOCK
);
2097 #ifdef DEBUG_LOCK_BITS
2098 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
2100 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2107 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, uint64_t len
)
2111 #ifdef DEBUG_LOCK_BITS
2112 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2113 __func__
, ofs
, len
);
2114 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2118 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
2119 ofs
, len
, DO_XXLOCK_ONEBLOCK_UNLOCK
);
2121 #ifdef DEBUG_LOCK_BITS
2122 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
2124 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2131 #ifdef CONFIG_MTD_OTP
2133 typedef int (*otp_op_t
)(struct map_info
*map
, struct flchip
*chip
,
2134 u_long data_offset
, u_char
*buf
, u_int size
,
2135 u_long prot_offset
, u_int groupno
, u_int groupsize
);
2138 do_otp_read(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2139 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2141 struct cfi_private
*cfi
= map
->fldrv_priv
;
2144 spin_lock(chip
->mutex
);
2145 ret
= get_chip(map
, chip
, chip
->start
, FL_JEDEC_QUERY
);
2147 spin_unlock(chip
->mutex
);
2151 /* let's ensure we're not reading back cached data from array mode */
2152 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2154 xip_disable(map
, chip
, chip
->start
);
2155 if (chip
->state
!= FL_JEDEC_QUERY
) {
2156 map_write(map
, CMD(0x90), chip
->start
);
2157 chip
->state
= FL_JEDEC_QUERY
;
2159 map_copy_from(map
, buf
, chip
->start
+ offset
, size
);
2160 xip_enable(map
, chip
, chip
->start
);
2162 /* then ensure we don't keep OTP data in the cache */
2163 INVALIDATE_CACHED_RANGE(map
, chip
->start
+ offset
, size
);
2165 put_chip(map
, chip
, chip
->start
);
2166 spin_unlock(chip
->mutex
);
2171 do_otp_write(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2172 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2177 unsigned long bus_ofs
= offset
& ~(map_bankwidth(map
)-1);
2178 int gap
= offset
- bus_ofs
;
2179 int n
= min_t(int, size
, map_bankwidth(map
)-gap
);
2180 map_word datum
= map_word_ff(map
);
2182 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
2183 ret
= do_write_oneword(map
, chip
, bus_ofs
, datum
, FL_OTP_WRITE
);
2196 do_otp_lock(struct map_info
*map
, struct flchip
*chip
, u_long offset
,
2197 u_char
*buf
, u_int size
, u_long prot
, u_int grpno
, u_int grpsz
)
2199 struct cfi_private
*cfi
= map
->fldrv_priv
;
2202 /* make sure area matches group boundaries */
2206 datum
= map_word_ff(map
);
2207 datum
= map_word_clr(map
, datum
, CMD(1 << grpno
));
2208 return do_write_oneword(map
, chip
, prot
, datum
, FL_OTP_WRITE
);
2211 static int cfi_intelext_otp_walk(struct mtd_info
*mtd
, loff_t from
, size_t len
,
2212 size_t *retlen
, u_char
*buf
,
2213 otp_op_t action
, int user_regs
)
2215 struct map_info
*map
= mtd
->priv
;
2216 struct cfi_private
*cfi
= map
->fldrv_priv
;
2217 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2218 struct flchip
*chip
;
2219 struct cfi_intelext_otpinfo
*otp
;
2220 u_long devsize
, reg_prot_offset
, data_offset
;
2221 u_int chip_num
, chip_step
, field
, reg_fact_size
, reg_user_size
;
2222 u_int groups
, groupno
, groupsize
, reg_fact_groups
, reg_user_groups
;
2227 /* Check that we actually have some OTP registers */
2228 if (!extp
|| !(extp
->FeatureSupport
& 64) || !extp
->NumProtectionFields
)
2231 /* we need real chips here not virtual ones */
2232 devsize
= (1 << cfi
->cfiq
->DevSize
) * cfi
->interleave
;
2233 chip_step
= devsize
>> cfi
->chipshift
;
2236 /* Some chips have OTP located in the _top_ partition only.
2237 For example: Intel 28F256L18T (T means top-parameter device) */
2238 if (cfi
->mfr
== MANUFACTURER_INTEL
) {
2243 chip_num
= chip_step
- 1;
2247 for ( ; chip_num
< cfi
->numchips
; chip_num
+= chip_step
) {
2248 chip
= &cfi
->chips
[chip_num
];
2249 otp
= (struct cfi_intelext_otpinfo
*)&extp
->extra
[0];
2251 /* first OTP region */
2253 reg_prot_offset
= extp
->ProtRegAddr
;
2254 reg_fact_groups
= 1;
2255 reg_fact_size
= 1 << extp
->FactProtRegSize
;
2256 reg_user_groups
= 1;
2257 reg_user_size
= 1 << extp
->UserProtRegSize
;
2260 /* flash geometry fixup */
2261 data_offset
= reg_prot_offset
+ 1;
2262 data_offset
*= cfi
->interleave
* cfi
->device_type
;
2263 reg_prot_offset
*= cfi
->interleave
* cfi
->device_type
;
2264 reg_fact_size
*= cfi
->interleave
;
2265 reg_user_size
*= cfi
->interleave
;
2268 groups
= reg_user_groups
;
2269 groupsize
= reg_user_size
;
2270 /* skip over factory reg area */
2271 groupno
= reg_fact_groups
;
2272 data_offset
+= reg_fact_groups
* reg_fact_size
;
2274 groups
= reg_fact_groups
;
2275 groupsize
= reg_fact_size
;
2279 while (len
> 0 && groups
> 0) {
2282 * Special case: if action is NULL
2283 * we fill buf with otp_info records.
2285 struct otp_info
*otpinfo
;
2287 len
-= sizeof(struct otp_info
);
2290 ret
= do_otp_read(map
, chip
,
2292 (u_char
*)&lockword
,
2297 otpinfo
= (struct otp_info
*)buf
;
2298 otpinfo
->start
= from
;
2299 otpinfo
->length
= groupsize
;
2301 !map_word_bitsset(map
, lockword
,
2304 buf
+= sizeof(*otpinfo
);
2305 *retlen
+= sizeof(*otpinfo
);
2306 } else if (from
>= groupsize
) {
2308 data_offset
+= groupsize
;
2310 int size
= groupsize
;
2311 data_offset
+= from
;
2316 ret
= action(map
, chip
, data_offset
,
2317 buf
, size
, reg_prot_offset
,
2318 groupno
, groupsize
);
2324 data_offset
+= size
;
2330 /* next OTP region */
2331 if (++field
== extp
->NumProtectionFields
)
2333 reg_prot_offset
= otp
->ProtRegAddr
;
2334 reg_fact_groups
= otp
->FactGroups
;
2335 reg_fact_size
= 1 << otp
->FactProtRegSize
;
2336 reg_user_groups
= otp
->UserGroups
;
2337 reg_user_size
= 1 << otp
->UserProtRegSize
;
2345 static int cfi_intelext_read_fact_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2346 size_t len
, size_t *retlen
,
2349 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2350 buf
, do_otp_read
, 0);
2353 static int cfi_intelext_read_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2354 size_t len
, size_t *retlen
,
2357 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2358 buf
, do_otp_read
, 1);
2361 static int cfi_intelext_write_user_prot_reg(struct mtd_info
*mtd
, loff_t from
,
2362 size_t len
, size_t *retlen
,
2365 return cfi_intelext_otp_walk(mtd
, from
, len
, retlen
,
2366 buf
, do_otp_write
, 1);
2369 static int cfi_intelext_lock_user_prot_reg(struct mtd_info
*mtd
,
2370 loff_t from
, size_t len
)
2373 return cfi_intelext_otp_walk(mtd
, from
, len
, &retlen
,
2374 NULL
, do_otp_lock
, 1);
2377 static int cfi_intelext_get_fact_prot_info(struct mtd_info
*mtd
,
2378 struct otp_info
*buf
, size_t len
)
2383 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 0);
2384 return ret
? : retlen
;
2387 static int cfi_intelext_get_user_prot_info(struct mtd_info
*mtd
,
2388 struct otp_info
*buf
, size_t len
)
2393 ret
= cfi_intelext_otp_walk(mtd
, 0, len
, &retlen
, (u_char
*)buf
, NULL
, 1);
2394 return ret
? : retlen
;
2399 static void cfi_intelext_save_locks(struct mtd_info
*mtd
)
2401 struct mtd_erase_region_info
*region
;
2402 int block
, status
, i
;
2406 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2407 region
= &mtd
->eraseregions
[i
];
2408 if (!region
->lockmap
)
2411 for (block
= 0; block
< region
->numblocks
; block
++){
2412 len
= region
->erasesize
;
2413 adr
= region
->offset
+ block
* len
;
2415 status
= cfi_varsize_frob(mtd
,
2416 do_getlockstatus_oneblock
, adr
, len
, NULL
);
2418 set_bit(block
, region
->lockmap
);
2420 clear_bit(block
, region
->lockmap
);
2425 static int cfi_intelext_suspend(struct mtd_info
*mtd
)
2427 struct map_info
*map
= mtd
->priv
;
2428 struct cfi_private
*cfi
= map
->fldrv_priv
;
2429 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2431 struct flchip
*chip
;
2434 if ((mtd
->flags
& MTD_POWERUP_LOCK
)
2435 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2436 cfi_intelext_save_locks(mtd
);
2438 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
2439 chip
= &cfi
->chips
[i
];
2441 spin_lock(chip
->mutex
);
2443 switch (chip
->state
) {
2447 case FL_JEDEC_QUERY
:
2448 if (chip
->oldstate
== FL_READY
) {
2449 /* place the chip in a known state before suspend */
2450 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2451 chip
->oldstate
= chip
->state
;
2452 chip
->state
= FL_PM_SUSPENDED
;
2453 /* No need to wake_up() on this state change -
2454 * as the whole point is that nobody can do anything
2455 * with the chip now anyway.
2458 /* There seems to be an operation pending. We must wait for it. */
2459 printk(KERN_NOTICE
"Flash device refused suspend due to pending operation (oldstate %d)\n", chip
->oldstate
);
2464 /* Should we actually wait? Once upon a time these routines weren't
2465 allowed to. Or should we return -EAGAIN, because the upper layers
2466 ought to have already shut down anything which was using the device
2467 anyway? The latter for now. */
2468 printk(KERN_NOTICE
"Flash device refused suspend due to active operation (state %d)\n", chip
->oldstate
);
2470 case FL_PM_SUSPENDED
:
2473 spin_unlock(chip
->mutex
);
2476 /* Unlock the chips again */
2479 for (i
--; i
>=0; i
--) {
2480 chip
= &cfi
->chips
[i
];
2482 spin_lock(chip
->mutex
);
2484 if (chip
->state
== FL_PM_SUSPENDED
) {
2485 /* No need to force it into a known state here,
2486 because we're returning failure, and it didn't
2488 chip
->state
= chip
->oldstate
;
2489 chip
->oldstate
= FL_READY
;
2492 spin_unlock(chip
->mutex
);
2499 static void cfi_intelext_restore_locks(struct mtd_info
*mtd
)
2501 struct mtd_erase_region_info
*region
;
2506 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2507 region
= &mtd
->eraseregions
[i
];
2508 if (!region
->lockmap
)
2511 for (block
= 0; block
< region
->numblocks
; block
++) {
2512 len
= region
->erasesize
;
2513 adr
= region
->offset
+ block
* len
;
2515 if (!test_bit(block
, region
->lockmap
))
2516 cfi_intelext_unlock(mtd
, adr
, len
);
2521 static void cfi_intelext_resume(struct mtd_info
*mtd
)
2523 struct map_info
*map
= mtd
->priv
;
2524 struct cfi_private
*cfi
= map
->fldrv_priv
;
2525 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
2527 struct flchip
*chip
;
2529 for (i
=0; i
<cfi
->numchips
; i
++) {
2531 chip
= &cfi
->chips
[i
];
2533 spin_lock(chip
->mutex
);
2535 /* Go to known state. Chip may have been power cycled */
2536 if (chip
->state
== FL_PM_SUSPENDED
) {
2537 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2538 chip
->oldstate
= chip
->state
= FL_READY
;
2542 spin_unlock(chip
->mutex
);
2545 if ((mtd
->flags
& MTD_POWERUP_LOCK
)
2546 && extp
&& (extp
->FeatureSupport
& (1 << 5)))
2547 cfi_intelext_restore_locks(mtd
);
2550 static int cfi_intelext_reset(struct mtd_info
*mtd
)
2552 struct map_info
*map
= mtd
->priv
;
2553 struct cfi_private
*cfi
= map
->fldrv_priv
;
2556 for (i
=0; i
< cfi
->numchips
; i
++) {
2557 struct flchip
*chip
= &cfi
->chips
[i
];
2559 /* force the completion of any ongoing operation
2560 and switch to array mode so any bootloader in
2561 flash is accessible for soft reboot. */
2562 spin_lock(chip
->mutex
);
2563 ret
= get_chip(map
, chip
, chip
->start
, FL_SHUTDOWN
);
2565 map_write(map
, CMD(0xff), chip
->start
);
2566 chip
->state
= FL_SHUTDOWN
;
2568 spin_unlock(chip
->mutex
);
2574 static int cfi_intelext_reboot(struct notifier_block
*nb
, unsigned long val
,
2577 struct mtd_info
*mtd
;
2579 mtd
= container_of(nb
, struct mtd_info
, reboot_notifier
);
2580 cfi_intelext_reset(mtd
);
2584 static void cfi_intelext_destroy(struct mtd_info
*mtd
)
2586 struct map_info
*map
= mtd
->priv
;
2587 struct cfi_private
*cfi
= map
->fldrv_priv
;
2588 struct mtd_erase_region_info
*region
;
2590 cfi_intelext_reset(mtd
);
2591 unregister_reboot_notifier(&mtd
->reboot_notifier
);
2592 kfree(cfi
->cmdset_priv
);
2594 kfree(cfi
->chips
[0].priv
);
2596 for (i
= 0; i
< mtd
->numeraseregions
; i
++) {
2597 region
= &mtd
->eraseregions
[i
];
2598 if (region
->lockmap
)
2599 kfree(region
->lockmap
);
2601 kfree(mtd
->eraseregions
);
2604 MODULE_LICENSE("GPL");
2605 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2606 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2607 MODULE_ALIAS("cfi_cmdset_0003");
2608 MODULE_ALIAS("cfi_cmdset_0200");