2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
44 #define MANUFACTURER_INTEL 0x0089
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define MANUFACTURER_ST 0x0020
48 #define M50LPW080 0x002F
50 static int cfi_intelext_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
51 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_intelext_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
54 static int cfi_intelext_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
55 static int cfi_intelext_erase_varsize(struct mtd_info
*, struct erase_info
*);
56 static void cfi_intelext_sync (struct mtd_info
*);
57 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
58 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
59 static int cfi_intelext_suspend (struct mtd_info
*);
60 static void cfi_intelext_resume (struct mtd_info
*);
62 static void cfi_intelext_destroy(struct mtd_info
*);
64 struct mtd_info
*cfi_cmdset_0001(struct map_info
*, int);
66 static struct mtd_info
*cfi_intelext_setup (struct mtd_info
*);
67 static int cfi_intelext_partition_fixup(struct mtd_info
*, struct cfi_private
**);
69 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
70 size_t *retlen
, u_char
**mtdbuf
);
71 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
,
74 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
);
75 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
);
81 * *********** SETUP AND PROBE BITS ***********
84 static struct mtd_chip_driver cfi_intelext_chipdrv
= {
85 .probe
= NULL
, /* Not usable directly */
86 .destroy
= cfi_intelext_destroy
,
87 .name
= "cfi_cmdset_0001",
91 /* #define DEBUG_LOCK_BITS */
92 /* #define DEBUG_CFI_FEATURES */
94 #ifdef DEBUG_CFI_FEATURES
95 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
98 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
99 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp
->FeatureSupport
&512?"supported":"unsupported");
109 for (i
=10; i
<32; i
++) {
110 if (extp
->FeatureSupport
& (1<<i
))
111 printk(" - Unknown Bit %X: supported\n", i
);
114 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
115 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
116 for (i
=1; i
<8; i
++) {
117 if (extp
->SuspendCmdSupport
& (1<<i
))
118 printk(" - Unknown Bit %X: supported\n", i
);
121 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
122 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
124 for (i
=2; i
<16; i
++) {
125 if (extp
->BlkStatusRegMask
& (1<<i
))
126 printk(" - Unknown Bit %X Active: yes\n",i
);
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp
->VccOptimal
>> 4, extp
->VccOptimal
& 0xf);
131 if (extp
->VppOptimal
)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp
->VppOptimal
>> 4, extp
->VppOptimal
& 0xf);
137 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139 static void fixup_intel_strataflash(struct mtd_info
*mtd
, void* param
)
141 struct map_info
*map
= mtd
->priv
;
142 struct cfi_private
*cfi
= map
->fldrv_priv
;
143 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
145 printk(KERN_WARNING
"cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp
->SuspendCmdSupport
&= ~1;
151 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152 static void fixup_no_write_suspend(struct mtd_info
*mtd
, void* param
)
154 struct map_info
*map
= mtd
->priv
;
155 struct cfi_private
*cfi
= map
->fldrv_priv
;
156 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
158 if (cfip
&& (cfip
->FeatureSupport
&4)) {
159 cfip
->FeatureSupport
&= ~4;
160 printk(KERN_WARNING
"cfi_cmdset_0001: write suspend disabled\n");
165 static void fixup_st_m28w320ct(struct mtd_info
*mtd
, void* param
)
167 struct map_info
*map
= mtd
->priv
;
168 struct cfi_private
*cfi
= map
->fldrv_priv
;
170 cfi
->cfiq
->BufWriteTimeoutTyp
= 0; /* Not supported */
171 cfi
->cfiq
->BufWriteTimeoutMax
= 0; /* Not supported */
174 static void fixup_st_m28w320cb(struct mtd_info
*mtd
, void* param
)
176 struct map_info
*map
= mtd
->priv
;
177 struct cfi_private
*cfi
= map
->fldrv_priv
;
179 /* Note this is done after the region info is endian swapped */
180 cfi
->cfiq
->EraseRegionInfo
[1] =
181 (cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff0000) | 0x3e;
184 static void fixup_use_point(struct mtd_info
*mtd
, void *param
)
186 struct map_info
*map
= mtd
->priv
;
187 if (!mtd
->point
&& map_is_linear(map
)) {
188 mtd
->point
= cfi_intelext_point
;
189 mtd
->unpoint
= cfi_intelext_unpoint
;
193 static void fixup_use_write_buffers(struct mtd_info
*mtd
, void *param
)
195 struct map_info
*map
= mtd
->priv
;
196 struct cfi_private
*cfi
= map
->fldrv_priv
;
197 if (cfi
->cfiq
->BufWriteTimeoutTyp
) {
198 printk(KERN_INFO
"Using buffer write method\n" );
199 mtd
->write
= cfi_intelext_write_buffers
;
203 static struct cfi_fixup cfi_fixup_table
[] = {
204 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_intel_strataflash
, NULL
},
207 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_no_write_suspend
, NULL
},
210 #if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_write_buffers
, NULL
},
213 { CFI_MFR_ST
, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct
, NULL
},
214 { CFI_MFR_ST
, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb
, NULL
},
218 static struct cfi_fixup jedec_fixup_table
[] = {
219 { MANUFACTURER_INTEL
, I82802AB
, fixup_use_fwh_lock
, NULL
, },
220 { MANUFACTURER_INTEL
, I82802AC
, fixup_use_fwh_lock
, NULL
, },
221 { MANUFACTURER_ST
, M50LPW080
, fixup_use_fwh_lock
, NULL
, },
224 static struct cfi_fixup fixup_table
[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
230 { CFI_MFR_ANY
, CFI_ID_ANY
, fixup_use_point
, NULL
},
234 static inline struct cfi_pri_intelext
*
235 read_pri_intelext(struct map_info
*map
, __u16 adr
)
237 struct cfi_pri_intelext
*extp
;
238 unsigned int extp_size
= sizeof(*extp
);
241 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, extp_size
, "Intel/Sharp");
245 /* Do some byteswapping if necessary */
246 extp
->FeatureSupport
= le32_to_cpu(extp
->FeatureSupport
);
247 extp
->BlkStatusRegMask
= le16_to_cpu(extp
->BlkStatusRegMask
);
248 extp
->ProtRegAddr
= le16_to_cpu(extp
->ProtRegAddr
);
250 if (extp
->MajorVersion
== '1' && extp
->MinorVersion
== '3') {
251 unsigned int extra_size
= 0;
254 /* Protection Register info */
255 extra_size
+= (extp
->NumProtectionFields
- 1) * (4 + 6);
257 /* Burst Read info */
260 /* Number of hardware-partitions */
262 if (extp_size
< sizeof(*extp
) + extra_size
)
264 nb_parts
= extp
->extra
[extra_size
- 1];
266 for (i
= 0; i
< nb_parts
; i
++) {
267 struct cfi_intelext_regioninfo
*rinfo
;
268 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[extra_size
];
269 extra_size
+= sizeof(*rinfo
);
270 if (extp_size
< sizeof(*extp
) + extra_size
)
272 rinfo
->NumIdentPartitions
=le16_to_cpu(rinfo
->NumIdentPartitions
);
273 extra_size
+= (rinfo
->NumBlockTypes
- 1)
274 * sizeof(struct cfi_intelext_blockinfo
);
277 if (extp_size
< sizeof(*extp
) + extra_size
) {
279 extp_size
= sizeof(*extp
) + extra_size
;
281 if (extp_size
> 4096) {
283 "%s: cfi_pri_intelext is too fat\n",
294 /* This routine is made available to other mtd code via
295 * inter_module_register. It must only be accessed through
296 * inter_module_get which will bump the use count of this module. The
297 * addresses passed back in cfi are valid as long as the use count of
298 * this module is non-zero, i.e. between inter_module_get and
299 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
301 struct mtd_info
*cfi_cmdset_0001(struct map_info
*map
, int primary
)
303 struct cfi_private
*cfi
= map
->fldrv_priv
;
304 struct mtd_info
*mtd
;
307 mtd
= kmalloc(sizeof(*mtd
), GFP_KERNEL
);
309 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
312 memset(mtd
, 0, sizeof(*mtd
));
314 mtd
->type
= MTD_NORFLASH
;
316 /* Fill in the default mtd operations */
317 mtd
->erase
= cfi_intelext_erase_varsize
;
318 mtd
->read
= cfi_intelext_read
;
319 mtd
->write
= cfi_intelext_write_words
;
320 mtd
->sync
= cfi_intelext_sync
;
321 mtd
->lock
= cfi_intelext_lock
;
322 mtd
->unlock
= cfi_intelext_unlock
;
323 mtd
->suspend
= cfi_intelext_suspend
;
324 mtd
->resume
= cfi_intelext_resume
;
325 mtd
->flags
= MTD_CAP_NORFLASH
;
326 mtd
->name
= map
->name
;
328 if (cfi
->cfi_mode
== CFI_MODE_CFI
) {
330 * It's a real CFI chip, not one for which the probe
331 * routine faked a CFI structure. So we read the feature
334 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
335 struct cfi_pri_intelext
*extp
;
337 extp
= read_pri_intelext(map
, adr
);
343 /* Install our own private info structure */
344 cfi
->cmdset_priv
= extp
;
346 cfi_fixup(mtd
, cfi_fixup_table
);
348 #ifdef DEBUG_CFI_FEATURES
349 /* Tell the user about it in lots of lovely detail */
350 cfi_tell_features(extp
);
353 if(extp
->SuspendCmdSupport
& 1) {
354 printk(KERN_NOTICE
"cfi_cmdset_0001: Erase suspend on write enabled\n");
357 else if (cfi
->cfi_mode
== CFI_MODE_JEDEC
) {
358 /* Apply jedec specific fixups */
359 cfi_fixup(mtd
, jedec_fixup_table
);
361 /* Apply generic fixups */
362 cfi_fixup(mtd
, fixup_table
);
364 for (i
=0; i
< cfi
->numchips
; i
++) {
365 cfi
->chips
[i
].word_write_time
= 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
366 cfi
->chips
[i
].buffer_write_time
= 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
367 cfi
->chips
[i
].erase_time
= 1<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
368 cfi
->chips
[i
].ref_point_counter
= 0;
371 map
->fldrv
= &cfi_intelext_chipdrv
;
373 return cfi_intelext_setup(mtd
);
376 static struct mtd_info
*cfi_intelext_setup(struct mtd_info
*mtd
)
378 struct map_info
*map
= mtd
->priv
;
379 struct cfi_private
*cfi
= map
->fldrv_priv
;
380 unsigned long offset
= 0;
382 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
384 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
386 mtd
->size
= devsize
* cfi
->numchips
;
388 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
389 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
390 * mtd
->numeraseregions
, GFP_KERNEL
);
391 if (!mtd
->eraseregions
) {
392 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
396 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
397 unsigned long ernum
, ersize
;
398 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
399 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
401 if (mtd
->erasesize
< ersize
) {
402 mtd
->erasesize
= ersize
;
404 for (j
=0; j
<cfi
->numchips
; j
++) {
405 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
406 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
407 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
409 offset
+= (ersize
* ernum
);
412 if (offset
!= devsize
) {
414 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
418 for (i
=0; i
<mtd
->numeraseregions
;i
++){
419 printk(KERN_DEBUG
"%d: offset=0x%x,size=0x%x,blocks=%d\n",
420 i
,mtd
->eraseregions
[i
].offset
,
421 mtd
->eraseregions
[i
].erasesize
,
422 mtd
->eraseregions
[i
].numblocks
);
426 mtd
->read_user_prot_reg
= cfi_intelext_read_user_prot_reg
;
427 mtd
->read_fact_prot_reg
= cfi_intelext_read_fact_prot_reg
;
430 /* This function has the potential to distort the reality
431 a bit and therefore should be called last. */
432 if (cfi_intelext_partition_fixup(mtd
, &cfi
) != 0)
435 __module_get(THIS_MODULE
);
440 if(mtd
->eraseregions
)
441 kfree(mtd
->eraseregions
);
444 kfree(cfi
->cmdset_priv
);
448 static int cfi_intelext_partition_fixup(struct mtd_info
*mtd
,
449 struct cfi_private
**pcfi
)
451 struct map_info
*map
= mtd
->priv
;
452 struct cfi_private
*cfi
= *pcfi
;
453 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
456 * Probing of multi-partition flash ships.
458 * To support multiple partitions when available, we simply arrange
459 * for each of them to have their own flchip structure even if they
460 * are on the same physical chip. This means completely recreating
461 * a new cfi_private structure right here which is a blatent code
462 * layering violation, but this is still the least intrusive
463 * arrangement at this point. This can be rearranged in the future
464 * if someone feels motivated enough. --nico
466 if (extp
&& extp
->MajorVersion
== '1' && extp
->MinorVersion
== '3'
467 && extp
->FeatureSupport
& (1 << 9)) {
468 struct cfi_private
*newcfi
;
470 struct flchip_shared
*shared
;
471 int offs
, numregions
, numparts
, partshift
, numvirtchips
, i
, j
;
473 /* Protection Register info */
474 offs
= (extp
->NumProtectionFields
- 1) * (4 + 6);
476 /* Burst Read info */
479 /* Number of partition regions */
480 numregions
= extp
->extra
[offs
];
483 /* Number of hardware partitions */
485 for (i
= 0; i
< numregions
; i
++) {
486 struct cfi_intelext_regioninfo
*rinfo
;
487 rinfo
= (struct cfi_intelext_regioninfo
*)&extp
->extra
[offs
];
488 numparts
+= rinfo
->NumIdentPartitions
;
489 offs
+= sizeof(*rinfo
)
490 + (rinfo
->NumBlockTypes
- 1) *
491 sizeof(struct cfi_intelext_blockinfo
);
495 * All functions below currently rely on all chips having
496 * the same geometry so we'll just assume that all hardware
497 * partitions are of the same size too.
499 partshift
= cfi
->chipshift
- __ffs(numparts
);
501 if ((1 << partshift
) < mtd
->erasesize
) {
503 "%s: bad number of hw partitions (%d)\n",
504 __FUNCTION__
, numparts
);
508 numvirtchips
= cfi
->numchips
* numparts
;
509 newcfi
= kmalloc(sizeof(struct cfi_private
) + numvirtchips
* sizeof(struct flchip
), GFP_KERNEL
);
512 shared
= kmalloc(sizeof(struct flchip_shared
) * cfi
->numchips
, GFP_KERNEL
);
517 memcpy(newcfi
, cfi
, sizeof(struct cfi_private
));
518 newcfi
->numchips
= numvirtchips
;
519 newcfi
->chipshift
= partshift
;
521 chip
= &newcfi
->chips
[0];
522 for (i
= 0; i
< cfi
->numchips
; i
++) {
523 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
524 spin_lock_init(&shared
[i
].lock
);
525 for (j
= 0; j
< numparts
; j
++) {
526 *chip
= cfi
->chips
[i
];
527 chip
->start
+= j
<< partshift
;
528 chip
->priv
= &shared
[i
];
529 /* those should be reset too since
530 they create memory references. */
531 init_waitqueue_head(&chip
->wq
);
532 spin_lock_init(&chip
->_spinlock
);
533 chip
->mutex
= &chip
->_spinlock
;
538 printk(KERN_DEBUG
"%s: %d set(s) of %d interleaved chips "
539 "--> %d partitions of %d KiB\n",
540 map
->name
, cfi
->numchips
, cfi
->interleave
,
541 newcfi
->numchips
, 1<<(newcfi
->chipshift
-10));
543 map
->fldrv_priv
= newcfi
;
552 * *********** CHIP ACCESS FUNCTIONS ***********
555 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
557 DECLARE_WAITQUEUE(wait
, current
);
558 struct cfi_private
*cfi
= map
->fldrv_priv
;
559 map_word status
, status_OK
= CMD(0x80), status_PWS
= CMD(0x01);
561 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
564 timeo
= jiffies
+ HZ
;
566 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
)) {
568 * OK. We have possibility for contension on the write/erase
569 * operations which are global to the real chip and not per
570 * partition. So let's fight it over in the partition which
571 * currently has authority on the operation.
573 * The rules are as follows:
575 * - any write operation must own shared->writing.
577 * - any erase operation must own _both_ shared->writing and
580 * - contension arbitration is handled in the owner's context.
582 * The 'shared' struct can be read when its lock is taken.
583 * However any writes to it can only be made when the current
584 * owner's lock is also held.
586 struct flchip_shared
*shared
= chip
->priv
;
587 struct flchip
*contender
;
588 spin_lock(&shared
->lock
);
589 contender
= shared
->writing
;
590 if (contender
&& contender
!= chip
) {
592 * The engine to perform desired operation on this
593 * partition is already in use by someone else.
594 * Let's fight over it in the context of the chip
595 * currently using it. If it is possible to suspend,
596 * that other partition will do just that, otherwise
597 * it'll happily send us to sleep. In any case, when
598 * get_chip returns success we're clear to go ahead.
600 int ret
= spin_trylock(contender
->mutex
);
601 spin_unlock(&shared
->lock
);
604 spin_unlock(chip
->mutex
);
605 ret
= get_chip(map
, contender
, contender
->start
, mode
);
606 spin_lock(chip
->mutex
);
608 spin_unlock(contender
->mutex
);
611 timeo
= jiffies
+ HZ
;
612 spin_lock(&shared
->lock
);
616 shared
->writing
= chip
;
617 if (mode
== FL_ERASING
)
618 shared
->erasing
= chip
;
619 if (contender
&& contender
!= chip
)
620 spin_unlock(contender
->mutex
);
621 spin_unlock(&shared
->lock
);
624 switch (chip
->state
) {
628 status
= map_read(map
, adr
);
629 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
632 /* At this point we're fine with write operations
633 in other partitions as they don't conflict. */
634 if (chip
->priv
&& map_word_andequal(map
, status
, status_PWS
, status_PWS
))
637 if (time_after(jiffies
, timeo
)) {
638 printk(KERN_ERR
"Waiting for chip to be ready timed out. Status %lx\n",
642 spin_unlock(chip
->mutex
);
644 spin_lock(chip
->mutex
);
645 /* Someone else might have been playing with it. */
656 !(cfip
->FeatureSupport
& 2) ||
657 !(mode
== FL_READY
|| mode
== FL_POINT
||
658 (mode
== FL_WRITING
&& (cfip
->SuspendCmdSupport
& 1))))
663 map_write(map
, CMD(0xB0), adr
);
665 /* If the flash has finished erasing, then 'erase suspend'
666 * appears to make some (28F320) flash devices switch to
667 * 'read' mode. Make sure that we switch to 'read status'
668 * mode so we get the right data. --rmk
670 map_write(map
, CMD(0x70), adr
);
671 chip
->oldstate
= FL_ERASING
;
672 chip
->state
= FL_ERASE_SUSPENDING
;
673 chip
->erase_suspended
= 1;
675 status
= map_read(map
, adr
);
676 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
679 if (time_after(jiffies
, timeo
)) {
680 /* Urgh. Resume and pretend we weren't here. */
681 map_write(map
, CMD(0xd0), adr
);
682 /* Make sure we're in 'read status' mode if it had finished */
683 map_write(map
, CMD(0x70), adr
);
684 chip
->state
= FL_ERASING
;
685 chip
->oldstate
= FL_READY
;
686 printk(KERN_ERR
"Chip not ready after erase "
687 "suspended: status = 0x%lx\n", status
.x
[0]);
691 spin_unlock(chip
->mutex
);
693 spin_lock(chip
->mutex
);
694 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
695 So we can just loop here. */
697 chip
->state
= FL_STATUS
;
700 case FL_XIP_WHILE_ERASING
:
701 if (mode
!= FL_READY
&& mode
!= FL_POINT
&&
702 (mode
!= FL_WRITING
|| !cfip
|| !(cfip
->SuspendCmdSupport
&1)))
704 chip
->oldstate
= chip
->state
;
705 chip
->state
= FL_READY
;
709 /* Only if there's no operation suspended... */
710 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
715 set_current_state(TASK_UNINTERRUPTIBLE
);
716 add_wait_queue(&chip
->wq
, &wait
);
717 spin_unlock(chip
->mutex
);
719 remove_wait_queue(&chip
->wq
, &wait
);
720 spin_lock(chip
->mutex
);
725 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
727 struct cfi_private
*cfi
= map
->fldrv_priv
;
730 struct flchip_shared
*shared
= chip
->priv
;
731 spin_lock(&shared
->lock
);
732 if (shared
->writing
== chip
&& chip
->oldstate
== FL_READY
) {
733 /* We own the ability to write, but we're done */
734 shared
->writing
= shared
->erasing
;
735 if (shared
->writing
&& shared
->writing
!= chip
) {
736 /* give back ownership to who we loaned it from */
737 struct flchip
*loaner
= shared
->writing
;
738 spin_lock(loaner
->mutex
);
739 spin_unlock(&shared
->lock
);
740 spin_unlock(chip
->mutex
);
741 put_chip(map
, loaner
, loaner
->start
);
742 spin_lock(chip
->mutex
);
743 spin_unlock(loaner
->mutex
);
747 shared
->erasing
= NULL
;
748 shared
->writing
= NULL
;
749 } else if (shared
->erasing
== chip
&& shared
->writing
!= chip
) {
751 * We own the ability to erase without the ability
752 * to write, which means the erase was suspended
753 * and some other partition is currently writing.
754 * Don't let the switch below mess things up since
755 * we don't have ownership to resume anything.
757 spin_unlock(&shared
->lock
);
761 spin_unlock(&shared
->lock
);
764 switch(chip
->oldstate
) {
766 chip
->state
= chip
->oldstate
;
767 /* What if one interleaved chip has finished and the
768 other hasn't? The old code would leave the finished
769 one in READY mode. That's bad, and caused -EROFS
770 errors to be returned from do_erase_oneblock because
771 that's the only bit it checked for at the time.
772 As the state machine appears to explicitly allow
773 sending the 0x70 (Read Status) command to an erasing
774 chip and expecting it to be ignored, that's what we
776 map_write(map
, CMD(0xd0), adr
);
777 map_write(map
, CMD(0x70), adr
);
778 chip
->oldstate
= FL_READY
;
779 chip
->state
= FL_ERASING
;
782 case FL_XIP_WHILE_ERASING
:
783 chip
->state
= chip
->oldstate
;
784 chip
->oldstate
= FL_READY
;
790 /* We should really make set_vpp() count, rather than doing this */
794 printk(KERN_ERR
"put_chip() called with oldstate %d!!\n", chip
->oldstate
);
799 #ifdef CONFIG_MTD_XIP
802 * No interrupt what so ever can be serviced while the flash isn't in array
803 * mode. This is ensured by the xip_disable() and xip_enable() functions
804 * enclosing any code path where the flash is known not to be in array mode.
805 * And within a XIP disabled code path, only functions marked with __xipram
806 * may be called and nothing else (it's a good thing to inspect generated
807 * assembly to make sure inline functions were actually inlined and that gcc
808 * didn't emit calls to its own support functions). Also configuring MTD CFI
809 * support to a single buswidth and a single interleave is also recommended.
810 * Note that not only IRQs are disabled but the preemption count is also
811 * increased to prevent other locking primitives (namely spin_unlock) from
812 * decrementing the preempt count to zero and scheduling the CPU away while
816 static void xip_disable(struct map_info
*map
, struct flchip
*chip
,
819 /* TODO: chips with no XIP use should ignore and return */
820 (void) map_read(map
, adr
); /* ensure mmu mapping is up to date */
825 static void __xipram
xip_enable(struct map_info
*map
, struct flchip
*chip
,
828 struct cfi_private
*cfi
= map
->fldrv_priv
;
829 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
830 map_write(map
, CMD(0xff), adr
);
831 chip
->state
= FL_READY
;
833 (void) map_read(map
, adr
);
834 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
840 * When a delay is required for the flash operation to complete, the
841 * xip_udelay() function is polling for both the given timeout and pending
842 * (but still masked) hardware interrupts. Whenever there is an interrupt
843 * pending then the flash erase or write operation is suspended, array mode
844 * restored and interrupts unmasked. Task scheduling might also happen at that
845 * point. The CPU eventually returns from the interrupt or the call to
846 * schedule() and the suspended flash operation is resumed for the remaining
847 * of the delay period.
849 * Warning: this function _will_ fool interrupt latency tracing tools.
852 static void __xipram
xip_udelay(struct map_info
*map
, struct flchip
*chip
,
853 unsigned long adr
, int usec
)
855 struct cfi_private
*cfi
= map
->fldrv_priv
;
856 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
857 map_word status
, OK
= CMD(0x80);
858 unsigned long suspended
, start
= xip_currtime();
859 flstate_t oldstate
, newstate
;
863 if (xip_irqpending() && cfip
&&
864 ((chip
->state
== FL_ERASING
&& (cfip
->FeatureSupport
&2)) ||
865 (chip
->state
== FL_WRITING
&& (cfip
->FeatureSupport
&4))) &&
866 (cfi_interleave_is_1(cfi
) || chip
->oldstate
== FL_READY
)) {
868 * Let's suspend the erase or write operation when
869 * supported. Note that we currently don't try to
870 * suspend interleaved chips if there is already
871 * another operation suspended (imagine what happens
872 * when one chip was already done with the current
873 * operation while another chip suspended it, then
874 * we resume the whole thing at once). Yes, it
877 map_write(map
, CMD(0xb0), adr
);
878 map_write(map
, CMD(0x70), adr
);
879 usec
-= xip_elapsed_since(start
);
880 suspended
= xip_currtime();
882 if (xip_elapsed_since(suspended
) > 100000) {
884 * The chip doesn't want to suspend
885 * after waiting for 100 msecs.
886 * This is a critical error but there
887 * is not much we can do here.
891 status
= map_read(map
, adr
);
892 } while (!map_word_andequal(map
, status
, OK
, OK
));
894 /* Suspend succeeded */
895 oldstate
= chip
->state
;
896 if (oldstate
== FL_ERASING
) {
897 if (!map_word_bitsset(map
, status
, CMD(0x40)))
899 newstate
= FL_XIP_WHILE_ERASING
;
900 chip
->erase_suspended
= 1;
902 if (!map_word_bitsset(map
, status
, CMD(0x04)))
904 newstate
= FL_XIP_WHILE_WRITING
;
905 chip
->write_suspended
= 1;
907 chip
->state
= newstate
;
908 map_write(map
, CMD(0xff), adr
);
909 (void) map_read(map
, adr
);
910 asm volatile (".rep 8; nop; .endr");
913 asm volatile (".rep 8; nop; .endr");
917 * We're back. However someone else might have
918 * decided to go write to the chip if we are in
919 * a suspended erase state. If so let's wait
923 while (chip
->state
!= newstate
) {
924 DECLARE_WAITQUEUE(wait
, current
);
925 set_current_state(TASK_UNINTERRUPTIBLE
);
926 add_wait_queue(&chip
->wq
, &wait
);
929 remove_wait_queue(&chip
->wq
, &wait
);
932 /* Disallow XIP again */
935 /* Resume the write or erase operation */
936 map_write(map
, CMD(0xd0), adr
);
937 map_write(map
, CMD(0x70), adr
);
938 chip
->state
= oldstate
;
939 start
= xip_currtime();
940 } else if (usec
>= 1000000/HZ
) {
942 * Try to save on CPU power when waiting delay
943 * is at least a system timer tick period.
944 * No need to be extremely accurate here.
948 status
= map_read(map
, adr
);
949 } while (!map_word_andequal(map
, status
, OK
, OK
)
950 && xip_elapsed_since(start
) < usec
);
953 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
956 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
957 * the flash is actively programming or erasing since we have to poll for
958 * the operation to complete anyway. We can't do that in a generic way with
959 * a XIP setup so do it before the actual flash operation in this case.
961 #undef INVALIDATE_CACHED_RANGE
962 #define INVALIDATE_CACHED_RANGE(x...)
963 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
964 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
969 * Activating this XIP support changes the way the code works a bit. For
970 * example the code to suspend the current process when concurrent access
971 * happens is never executed because xip_udelay() will always return with the
972 * same chip state as it was entered with. This is why there is no care for
973 * the presence of add_wait_queue() or schedule() calls from within a couple
974 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
975 * The queueing and scheduling are always happening within xip_udelay().
977 * Similarly, get_chip() and put_chip() just happen to always be executed
978 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
979 * is in array mode, therefore never executing many cases therein and not
980 * causing any problem with XIP.
985 #define xip_disable(map, chip, adr)
986 #define xip_enable(map, chip, adr)
988 #define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
990 #define XIP_INVAL_CACHED_RANGE(x...)
994 static int do_point_onechip (struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
)
996 unsigned long cmd_addr
;
997 struct cfi_private
*cfi
= map
->fldrv_priv
;
1002 /* Ensure cmd read/writes are aligned. */
1003 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1005 spin_lock(chip
->mutex
);
1007 ret
= get_chip(map
, chip
, cmd_addr
, FL_POINT
);
1010 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
)
1011 map_write(map
, CMD(0xff), cmd_addr
);
1013 chip
->state
= FL_POINT
;
1014 chip
->ref_point_counter
++;
1016 spin_unlock(chip
->mutex
);
1021 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
**mtdbuf
)
1023 struct map_info
*map
= mtd
->priv
;
1024 struct cfi_private
*cfi
= map
->fldrv_priv
;
1029 if (!map
->virt
|| (from
+ len
> mtd
->size
))
1032 *mtdbuf
= (void *)map
->virt
+ from
;
1035 /* Now lock the chip(s) to POINT state */
1037 /* ofs: offset within the first chip that the first read should start */
1038 chipnum
= (from
>> cfi
->chipshift
);
1039 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1042 unsigned long thislen
;
1044 if (chipnum
>= cfi
->numchips
)
1047 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1048 thislen
= (1<<cfi
->chipshift
) - ofs
;
1052 ret
= do_point_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
);
1065 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
, size_t len
)
1067 struct map_info
*map
= mtd
->priv
;
1068 struct cfi_private
*cfi
= map
->fldrv_priv
;
1072 /* Now unlock the chip(s) POINT state */
1074 /* ofs: offset within the first chip that the first read should start */
1075 chipnum
= (from
>> cfi
->chipshift
);
1076 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1079 unsigned long thislen
;
1080 struct flchip
*chip
;
1082 chip
= &cfi
->chips
[chipnum
];
1083 if (chipnum
>= cfi
->numchips
)
1086 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1087 thislen
= (1<<cfi
->chipshift
) - ofs
;
1091 spin_lock(chip
->mutex
);
1092 if (chip
->state
== FL_POINT
) {
1093 chip
->ref_point_counter
--;
1094 if(chip
->ref_point_counter
== 0)
1095 chip
->state
= FL_READY
;
1097 printk(KERN_ERR
"Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1099 put_chip(map
, chip
, chip
->start
);
1100 spin_unlock(chip
->mutex
);
1108 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
1110 unsigned long cmd_addr
;
1111 struct cfi_private
*cfi
= map
->fldrv_priv
;
1116 /* Ensure cmd read/writes are aligned. */
1117 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
1119 spin_lock(chip
->mutex
);
1120 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
1122 spin_unlock(chip
->mutex
);
1126 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
1127 map_write(map
, CMD(0xff), cmd_addr
);
1129 chip
->state
= FL_READY
;
1132 map_copy_from(map
, buf
, adr
, len
);
1134 put_chip(map
, chip
, cmd_addr
);
1136 spin_unlock(chip
->mutex
);
1140 static int cfi_intelext_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1142 struct map_info
*map
= mtd
->priv
;
1143 struct cfi_private
*cfi
= map
->fldrv_priv
;
1148 /* ofs: offset within the first chip that the first read should start */
1149 chipnum
= (from
>> cfi
->chipshift
);
1150 ofs
= from
- (chipnum
<< cfi
->chipshift
);
1155 unsigned long thislen
;
1157 if (chipnum
>= cfi
->numchips
)
1160 if ((len
+ ofs
-1) >> cfi
->chipshift
)
1161 thislen
= (1<<cfi
->chipshift
) - ofs
;
1165 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
1180 static int __xipram
cfi_intelext_read_prot_reg (struct mtd_info
*mtd
,
1181 loff_t from
, size_t len
,
1184 int base_offst
, int reg_sz
)
1186 struct map_info
*map
= mtd
->priv
;
1187 struct cfi_private
*cfi
= map
->fldrv_priv
;
1188 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
1189 struct flchip
*chip
;
1190 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1192 int chip_num
, offst
;
1195 chip_num
= ((unsigned int)from
/reg_sz
);
1196 offst
= from
- (reg_sz
*chip_num
)+base_offst
;
1199 /* Calculate which chip & protection register offset we need */
1201 if (chip_num
>= cfi
->numchips
)
1204 chip
= &cfi
->chips
[chip_num
];
1206 spin_lock(chip
->mutex
);
1207 ret
= get_chip(map
, chip
, chip
->start
, FL_JEDEC_QUERY
);
1209 spin_unlock(chip
->mutex
);
1210 return (len
-count
)?:ret
;
1213 xip_disable(map
, chip
, chip
->start
);
1215 if (chip
->state
!= FL_JEDEC_QUERY
) {
1216 map_write(map
, CMD(0x90), chip
->start
);
1217 chip
->state
= FL_JEDEC_QUERY
;
1220 while (count
&& ((offst
-base_offst
) < reg_sz
)) {
1221 *buf
= map_read8(map
,(chip
->start
+((extp
->ProtRegAddr
+1)*ofs_factor
)+offst
));
1227 xip_enable(map
, chip
, chip
->start
);
1228 put_chip(map
, chip
, chip
->start
);
1229 spin_unlock(chip
->mutex
);
1231 /* Move on to the next chip */
1240 static int cfi_intelext_read_user_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1242 struct map_info
*map
= mtd
->priv
;
1243 struct cfi_private
*cfi
= map
->fldrv_priv
;
1244 struct cfi_pri_intelext
*extp
=cfi
->cmdset_priv
;
1245 int base_offst
,reg_sz
;
1247 /* Check that we actually have some protection registers */
1248 if(!extp
|| !(extp
->FeatureSupport
&64)){
1249 printk(KERN_WARNING
"%s: This flash device has no protection data to read!\n",map
->name
);
1253 base_offst
=(1<<extp
->FactProtRegSize
);
1254 reg_sz
=(1<<extp
->UserProtRegSize
);
1256 return cfi_intelext_read_prot_reg(mtd
, from
, len
, retlen
, buf
, base_offst
, reg_sz
);
1259 static int cfi_intelext_read_fact_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
1261 struct map_info
*map
= mtd
->priv
;
1262 struct cfi_private
*cfi
= map
->fldrv_priv
;
1263 struct cfi_pri_intelext
*extp
=cfi
->cmdset_priv
;
1264 int base_offst
,reg_sz
;
1266 /* Check that we actually have some protection registers */
1267 if(!extp
|| !(extp
->FeatureSupport
&64)){
1268 printk(KERN_WARNING
"%s: This flash device has no protection data to read!\n",map
->name
);
1273 reg_sz
=(1<<extp
->FactProtRegSize
);
1275 return cfi_intelext_read_prot_reg(mtd
, from
, len
, retlen
, buf
, base_offst
, reg_sz
);
1279 static int __xipram
do_write_oneword(struct map_info
*map
, struct flchip
*chip
,
1280 unsigned long adr
, map_word datum
)
1282 struct cfi_private
*cfi
= map
->fldrv_priv
;
1283 map_word status
, status_OK
;
1284 unsigned long timeo
;
1289 /* Let's determine this according to the interleave only once */
1290 status_OK
= CMD(0x80);
1292 spin_lock(chip
->mutex
);
1293 ret
= get_chip(map
, chip
, adr
, FL_WRITING
);
1295 spin_unlock(chip
->mutex
);
1299 XIP_INVAL_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1301 xip_disable(map
, chip
, adr
);
1302 map_write(map
, CMD(0x40), adr
);
1303 map_write(map
, datum
, adr
);
1304 chip
->state
= FL_WRITING
;
1306 spin_unlock(chip
->mutex
);
1307 INVALIDATE_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
1308 UDELAY(map
, chip
, adr
, chip
->word_write_time
);
1309 spin_lock(chip
->mutex
);
1311 timeo
= jiffies
+ (HZ
/2);
1314 if (chip
->state
!= FL_WRITING
) {
1315 /* Someone's suspended the write. Sleep */
1316 DECLARE_WAITQUEUE(wait
, current
);
1318 set_current_state(TASK_UNINTERRUPTIBLE
);
1319 add_wait_queue(&chip
->wq
, &wait
);
1320 spin_unlock(chip
->mutex
);
1322 remove_wait_queue(&chip
->wq
, &wait
);
1323 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
1324 spin_lock(chip
->mutex
);
1328 status
= map_read(map
, adr
);
1329 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1332 /* OK Still waiting */
1333 if (time_after(jiffies
, timeo
)) {
1334 chip
->state
= FL_STATUS
;
1335 xip_enable(map
, chip
, adr
);
1336 printk(KERN_ERR
"waiting for chip to be ready timed out in word write\n");
1341 /* Latency issues. Drop the lock, wait a while and retry */
1342 spin_unlock(chip
->mutex
);
1344 UDELAY(map
, chip
, adr
, 1);
1345 spin_lock(chip
->mutex
);
1348 chip
->word_write_time
--;
1349 if (!chip
->word_write_time
)
1350 chip
->word_write_time
++;
1353 chip
->word_write_time
++;
1355 /* Done and happy. */
1356 chip
->state
= FL_STATUS
;
1358 /* check for lock bit */
1359 if (map_word_bitsset(map
, status
, CMD(0x02))) {
1361 map_write(map
, CMD(0x50), adr
);
1362 /* put back into read status register mode */
1363 map_write(map
, CMD(0x70), adr
);
1367 xip_enable(map
, chip
, adr
);
1368 out
: put_chip(map
, chip
, adr
);
1369 spin_unlock(chip
->mutex
);
1375 static int cfi_intelext_write_words (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
1377 struct map_info
*map
= mtd
->priv
;
1378 struct cfi_private
*cfi
= map
->fldrv_priv
;
1387 chipnum
= to
>> cfi
->chipshift
;
1388 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1390 /* If it's not bus-aligned, do the first byte write */
1391 if (ofs
& (map_bankwidth(map
)-1)) {
1392 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1393 int gap
= ofs
- bus_ofs
;
1397 n
= min_t(int, len
, map_bankwidth(map
)-gap
);
1398 datum
= map_word_ff(map
);
1399 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
1401 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1411 if (ofs
>> cfi
->chipshift
) {
1414 if (chipnum
== cfi
->numchips
)
1419 while(len
>= map_bankwidth(map
)) {
1420 map_word datum
= map_word_load(map
, buf
);
1422 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1427 ofs
+= map_bankwidth(map
);
1428 buf
+= map_bankwidth(map
);
1429 (*retlen
) += map_bankwidth(map
);
1430 len
-= map_bankwidth(map
);
1432 if (ofs
>> cfi
->chipshift
) {
1435 if (chipnum
== cfi
->numchips
)
1440 if (len
& (map_bankwidth(map
)-1)) {
1443 datum
= map_word_ff(map
);
1444 datum
= map_word_load_partial(map
, datum
, buf
, 0, len
);
1446 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1458 static int __xipram
do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1459 unsigned long adr
, const u_char
*buf
, int len
)
1461 struct cfi_private
*cfi
= map
->fldrv_priv
;
1462 map_word status
, status_OK
;
1463 unsigned long cmd_adr
, timeo
;
1464 int wbufsize
, z
, ret
=0, bytes
, words
;
1466 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1468 cmd_adr
= adr
& ~(wbufsize
-1);
1470 /* Let's determine this according to the interleave only once */
1471 status_OK
= CMD(0x80);
1473 spin_lock(chip
->mutex
);
1474 ret
= get_chip(map
, chip
, cmd_adr
, FL_WRITING
);
1476 spin_unlock(chip
->mutex
);
1480 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1482 xip_disable(map
, chip
, cmd_adr
);
1484 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1485 [...], the device will not accept any more Write to Buffer commands".
1486 So we must check here and reset those bits if they're set. Otherwise
1487 we're just pissing in the wind */
1488 if (chip
->state
!= FL_STATUS
)
1489 map_write(map
, CMD(0x70), cmd_adr
);
1490 status
= map_read(map
, cmd_adr
);
1491 if (map_word_bitsset(map
, status
, CMD(0x30))) {
1492 xip_enable(map
, chip
, cmd_adr
);
1493 printk(KERN_WARNING
"SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status
.x
[0]);
1494 xip_disable(map
, chip
, cmd_adr
);
1495 map_write(map
, CMD(0x50), cmd_adr
);
1496 map_write(map
, CMD(0x70), cmd_adr
);
1499 chip
->state
= FL_WRITING_TO_BUFFER
;
1503 map_write(map
, CMD(0xe8), cmd_adr
);
1505 status
= map_read(map
, cmd_adr
);
1506 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1509 spin_unlock(chip
->mutex
);
1510 UDELAY(map
, chip
, cmd_adr
, 1);
1511 spin_lock(chip
->mutex
);
1514 /* Argh. Not ready for write to buffer */
1516 map_write(map
, CMD(0x70), cmd_adr
);
1517 chip
->state
= FL_STATUS
;
1518 Xstatus
= map_read(map
, cmd_adr
);
1519 /* Odd. Clear status bits */
1520 map_write(map
, CMD(0x50), cmd_adr
);
1521 map_write(map
, CMD(0x70), cmd_adr
);
1522 xip_enable(map
, chip
, cmd_adr
);
1523 printk(KERN_ERR
"Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1524 status
.x
[0], Xstatus
.x
[0]);
1530 /* Write length of data to come */
1531 bytes
= len
& (map_bankwidth(map
)-1);
1532 words
= len
/ map_bankwidth(map
);
1533 map_write(map
, CMD(words
- !bytes
), cmd_adr
);
1537 while(z
< words
* map_bankwidth(map
)) {
1538 map_word datum
= map_word_load(map
, buf
);
1539 map_write(map
, datum
, adr
+z
);
1541 z
+= map_bankwidth(map
);
1542 buf
+= map_bankwidth(map
);
1548 datum
= map_word_ff(map
);
1549 datum
= map_word_load_partial(map
, datum
, buf
, 0, bytes
);
1550 map_write(map
, datum
, adr
+z
);
1554 map_write(map
, CMD(0xd0), cmd_adr
);
1555 chip
->state
= FL_WRITING
;
1557 spin_unlock(chip
->mutex
);
1558 INVALIDATE_CACHED_RANGE(map
, adr
, len
);
1559 UDELAY(map
, chip
, cmd_adr
, chip
->buffer_write_time
);
1560 spin_lock(chip
->mutex
);
1562 timeo
= jiffies
+ (HZ
/2);
1565 if (chip
->state
!= FL_WRITING
) {
1566 /* Someone's suspended the write. Sleep */
1567 DECLARE_WAITQUEUE(wait
, current
);
1568 set_current_state(TASK_UNINTERRUPTIBLE
);
1569 add_wait_queue(&chip
->wq
, &wait
);
1570 spin_unlock(chip
->mutex
);
1572 remove_wait_queue(&chip
->wq
, &wait
);
1573 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
1574 spin_lock(chip
->mutex
);
1578 status
= map_read(map
, cmd_adr
);
1579 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1582 /* OK Still waiting */
1583 if (time_after(jiffies
, timeo
)) {
1584 chip
->state
= FL_STATUS
;
1585 xip_enable(map
, chip
, cmd_adr
);
1586 printk(KERN_ERR
"waiting for chip to be ready timed out in bufwrite\n");
1591 /* Latency issues. Drop the lock, wait a while and retry */
1592 spin_unlock(chip
->mutex
);
1593 UDELAY(map
, chip
, cmd_adr
, 1);
1595 spin_lock(chip
->mutex
);
1598 chip
->buffer_write_time
--;
1599 if (!chip
->buffer_write_time
)
1600 chip
->buffer_write_time
++;
1603 chip
->buffer_write_time
++;
1605 /* Done and happy. */
1606 chip
->state
= FL_STATUS
;
1608 /* check for lock bit */
1609 if (map_word_bitsset(map
, status
, CMD(0x02))) {
1611 map_write(map
, CMD(0x50), cmd_adr
);
1612 /* put back into read status register mode */
1613 map_write(map
, CMD(0x70), adr
);
1617 xip_enable(map
, chip
, cmd_adr
);
1618 out
: put_chip(map
, chip
, cmd_adr
);
1619 spin_unlock(chip
->mutex
);
1623 static int cfi_intelext_write_buffers (struct mtd_info
*mtd
, loff_t to
,
1624 size_t len
, size_t *retlen
, const u_char
*buf
)
1626 struct map_info
*map
= mtd
->priv
;
1627 struct cfi_private
*cfi
= map
->fldrv_priv
;
1628 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1637 chipnum
= to
>> cfi
->chipshift
;
1638 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1640 /* If it's not bus-aligned, do the first word write */
1641 if (ofs
& (map_bankwidth(map
)-1)) {
1642 size_t local_len
= (-ofs
)&(map_bankwidth(map
)-1);
1643 if (local_len
> len
)
1645 ret
= cfi_intelext_write_words(mtd
, to
, local_len
,
1653 if (ofs
>> cfi
->chipshift
) {
1656 if (chipnum
== cfi
->numchips
)
1662 /* We must not cross write block boundaries */
1663 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1667 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1677 if (ofs
>> cfi
->chipshift
) {
1680 if (chipnum
== cfi
->numchips
)
1687 static int __xipram
do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
,
1688 unsigned long adr
, int len
, void *thunk
)
1690 struct cfi_private
*cfi
= map
->fldrv_priv
;
1691 map_word status
, status_OK
;
1692 unsigned long timeo
;
1694 DECLARE_WAITQUEUE(wait
, current
);
1699 /* Let's determine this according to the interleave only once */
1700 status_OK
= CMD(0x80);
1703 spin_lock(chip
->mutex
);
1704 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1706 spin_unlock(chip
->mutex
);
1710 XIP_INVAL_CACHED_RANGE(map
, adr
, len
);
1712 xip_disable(map
, chip
, adr
);
1714 /* Clear the status register first */
1715 map_write(map
, CMD(0x50), adr
);
1718 map_write(map
, CMD(0x20), adr
);
1719 map_write(map
, CMD(0xD0), adr
);
1720 chip
->state
= FL_ERASING
;
1721 chip
->erase_suspended
= 0;
1723 spin_unlock(chip
->mutex
);
1724 INVALIDATE_CACHED_RANGE(map
, adr
, len
);
1725 UDELAY(map
, chip
, adr
, chip
->erase_time
*1000/2);
1726 spin_lock(chip
->mutex
);
1728 /* FIXME. Use a timer to check this, and return immediately. */
1729 /* Once the state machine's known to be working I'll do that */
1731 timeo
= jiffies
+ (HZ
*20);
1733 if (chip
->state
!= FL_ERASING
) {
1734 /* Someone's suspended the erase. Sleep */
1735 set_current_state(TASK_UNINTERRUPTIBLE
);
1736 add_wait_queue(&chip
->wq
, &wait
);
1737 spin_unlock(chip
->mutex
);
1739 remove_wait_queue(&chip
->wq
, &wait
);
1740 spin_lock(chip
->mutex
);
1743 if (chip
->erase_suspended
) {
1744 /* This erase was suspended and resumed.
1745 Adjust the timeout */
1746 timeo
= jiffies
+ (HZ
*20); /* FIXME */
1747 chip
->erase_suspended
= 0;
1750 status
= map_read(map
, adr
);
1751 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1754 /* OK Still waiting */
1755 if (time_after(jiffies
, timeo
)) {
1757 map_write(map
, CMD(0x70), adr
);
1758 chip
->state
= FL_STATUS
;
1759 Xstatus
= map_read(map
, adr
);
1760 /* Clear status bits */
1761 map_write(map
, CMD(0x50), adr
);
1762 map_write(map
, CMD(0x70), adr
);
1763 xip_enable(map
, chip
, adr
);
1764 printk(KERN_ERR
"waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1765 adr
, status
.x
[0], Xstatus
.x
[0]);
1770 /* Latency issues. Drop the lock, wait a while and retry */
1771 spin_unlock(chip
->mutex
);
1772 UDELAY(map
, chip
, adr
, 1000000/HZ
);
1773 spin_lock(chip
->mutex
);
1776 /* We've broken this before. It doesn't hurt to be safe */
1777 map_write(map
, CMD(0x70), adr
);
1778 chip
->state
= FL_STATUS
;
1779 status
= map_read(map
, adr
);
1781 /* check for lock bit */
1782 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
1783 unsigned char chipstatus
;
1785 /* Reset the error bits */
1786 map_write(map
, CMD(0x50), adr
);
1787 map_write(map
, CMD(0x70), adr
);
1788 xip_enable(map
, chip
, adr
);
1790 chipstatus
= status
.x
[0];
1791 if (!map_word_equal(map
, status
, CMD(chipstatus
))) {
1793 for (w
=0; w
<map_words(map
); w
++) {
1794 for (i
= 0; i
<cfi_interleave(cfi
); i
++) {
1795 chipstatus
|= status
.x
[w
] >> (cfi
->device_type
* 8);
1798 printk(KERN_WARNING
"Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1799 status
.x
[0], chipstatus
);
1802 if ((chipstatus
& 0x30) == 0x30) {
1803 printk(KERN_NOTICE
"Chip reports improper command sequence: status 0x%x\n", chipstatus
);
1805 } else if (chipstatus
& 0x02) {
1806 /* Protection bit set */
1808 } else if (chipstatus
& 0x8) {
1810 printk(KERN_WARNING
"Chip reports voltage low on erase: status 0x%x\n", chipstatus
);
1812 } else if (chipstatus
& 0x20) {
1814 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr
, chipstatus
);
1815 timeo
= jiffies
+ HZ
;
1816 put_chip(map
, chip
, adr
);
1817 spin_unlock(chip
->mutex
);
1820 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x\n", adr
, chipstatus
);
1824 xip_enable(map
, chip
, adr
);
1828 out
: put_chip(map
, chip
, adr
);
1829 spin_unlock(chip
->mutex
);
1833 int cfi_intelext_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1835 unsigned long ofs
, len
;
1841 ret
= cfi_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1845 instr
->state
= MTD_ERASE_DONE
;
1846 mtd_erase_callback(instr
);
1851 static void cfi_intelext_sync (struct mtd_info
*mtd
)
1853 struct map_info
*map
= mtd
->priv
;
1854 struct cfi_private
*cfi
= map
->fldrv_priv
;
1856 struct flchip
*chip
;
1859 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1860 chip
= &cfi
->chips
[i
];
1862 spin_lock(chip
->mutex
);
1863 ret
= get_chip(map
, chip
, chip
->start
, FL_SYNCING
);
1866 chip
->oldstate
= chip
->state
;
1867 chip
->state
= FL_SYNCING
;
1868 /* No need to wake_up() on this state change -
1869 * as the whole point is that nobody can do anything
1870 * with the chip now anyway.
1873 spin_unlock(chip
->mutex
);
1876 /* Unlock the chips again */
1878 for (i
--; i
>=0; i
--) {
1879 chip
= &cfi
->chips
[i
];
1881 spin_lock(chip
->mutex
);
1883 if (chip
->state
== FL_SYNCING
) {
1884 chip
->state
= chip
->oldstate
;
1887 spin_unlock(chip
->mutex
);
1891 #ifdef DEBUG_LOCK_BITS
1892 static int __xipram
do_printlockstatus_oneblock(struct map_info
*map
,
1893 struct flchip
*chip
,
1895 int len
, void *thunk
)
1897 struct cfi_private
*cfi
= map
->fldrv_priv
;
1898 int status
, ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1900 xip_disable(map
, chip
, adr
+(2*ofs_factor
));
1901 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1902 chip
->state
= FL_JEDEC_QUERY
;
1903 status
= cfi_read_query(map
, adr
+(2*ofs_factor
));
1904 xip_enable(map
, chip
, 0);
1905 printk(KERN_DEBUG
"block status register for 0x%08lx is %x\n",
1911 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1912 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1914 static int __xipram
do_xxlock_oneblock(struct map_info
*map
, struct flchip
*chip
,
1915 unsigned long adr
, int len
, void *thunk
)
1917 struct cfi_private
*cfi
= map
->fldrv_priv
;
1918 map_word status
, status_OK
;
1919 unsigned long timeo
= jiffies
+ HZ
;
1924 /* Let's determine this according to the interleave only once */
1925 status_OK
= CMD(0x80);
1927 spin_lock(chip
->mutex
);
1928 ret
= get_chip(map
, chip
, adr
, FL_LOCKING
);
1930 spin_unlock(chip
->mutex
);
1935 xip_disable(map
, chip
, adr
);
1937 map_write(map
, CMD(0x60), adr
);
1938 if (thunk
== DO_XXLOCK_ONEBLOCK_LOCK
) {
1939 map_write(map
, CMD(0x01), adr
);
1940 chip
->state
= FL_LOCKING
;
1941 } else if (thunk
== DO_XXLOCK_ONEBLOCK_UNLOCK
) {
1942 map_write(map
, CMD(0xD0), adr
);
1943 chip
->state
= FL_UNLOCKING
;
1947 spin_unlock(chip
->mutex
);
1948 UDELAY(map
, chip
, adr
, 1000000/HZ
);
1949 spin_lock(chip
->mutex
);
1951 /* FIXME. Use a timer to check this, and return immediately. */
1952 /* Once the state machine's known to be working I'll do that */
1954 timeo
= jiffies
+ (HZ
*20);
1957 status
= map_read(map
, adr
);
1958 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1961 /* OK Still waiting */
1962 if (time_after(jiffies
, timeo
)) {
1964 map_write(map
, CMD(0x70), adr
);
1965 chip
->state
= FL_STATUS
;
1966 Xstatus
= map_read(map
, adr
);
1967 xip_enable(map
, chip
, adr
);
1968 printk(KERN_ERR
"waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1969 status
.x
[0], Xstatus
.x
[0]);
1970 put_chip(map
, chip
, adr
);
1971 spin_unlock(chip
->mutex
);
1975 /* Latency issues. Drop the lock, wait a while and retry */
1976 spin_unlock(chip
->mutex
);
1977 UDELAY(map
, chip
, adr
, 1);
1978 spin_lock(chip
->mutex
);
1981 /* Done and happy. */
1982 chip
->state
= FL_STATUS
;
1983 xip_enable(map
, chip
, adr
);
1984 put_chip(map
, chip
, adr
);
1985 spin_unlock(chip
->mutex
);
1989 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1993 #ifdef DEBUG_LOCK_BITS
1994 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1995 __FUNCTION__
, ofs
, len
);
1996 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2000 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
2001 ofs
, len
, DO_XXLOCK_ONEBLOCK_LOCK
);
2003 #ifdef DEBUG_LOCK_BITS
2004 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
2006 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2013 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
2017 #ifdef DEBUG_LOCK_BITS
2018 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2019 __FUNCTION__
, ofs
, len
);
2020 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2024 ret
= cfi_varsize_frob(mtd
, do_xxlock_oneblock
,
2025 ofs
, len
, DO_XXLOCK_ONEBLOCK_UNLOCK
);
2027 #ifdef DEBUG_LOCK_BITS
2028 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
2030 cfi_varsize_frob(mtd
, do_printlockstatus_oneblock
,
2037 static int cfi_intelext_suspend(struct mtd_info
*mtd
)
2039 struct map_info
*map
= mtd
->priv
;
2040 struct cfi_private
*cfi
= map
->fldrv_priv
;
2042 struct flchip
*chip
;
2045 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
2046 chip
= &cfi
->chips
[i
];
2048 spin_lock(chip
->mutex
);
2050 switch (chip
->state
) {
2054 case FL_JEDEC_QUERY
:
2055 if (chip
->oldstate
== FL_READY
) {
2056 chip
->oldstate
= chip
->state
;
2057 chip
->state
= FL_PM_SUSPENDED
;
2058 /* No need to wake_up() on this state change -
2059 * as the whole point is that nobody can do anything
2060 * with the chip now anyway.
2063 /* There seems to be an operation pending. We must wait for it. */
2064 printk(KERN_NOTICE
"Flash device refused suspend due to pending operation (oldstate %d)\n", chip
->oldstate
);
2069 /* Should we actually wait? Once upon a time these routines weren't
2070 allowed to. Or should we return -EAGAIN, because the upper layers
2071 ought to have already shut down anything which was using the device
2072 anyway? The latter for now. */
2073 printk(KERN_NOTICE
"Flash device refused suspend due to active operation (state %d)\n", chip
->oldstate
);
2075 case FL_PM_SUSPENDED
:
2078 spin_unlock(chip
->mutex
);
2081 /* Unlock the chips again */
2084 for (i
--; i
>=0; i
--) {
2085 chip
= &cfi
->chips
[i
];
2087 spin_lock(chip
->mutex
);
2089 if (chip
->state
== FL_PM_SUSPENDED
) {
2090 /* No need to force it into a known state here,
2091 because we're returning failure, and it didn't
2093 chip
->state
= chip
->oldstate
;
2094 chip
->oldstate
= FL_READY
;
2097 spin_unlock(chip
->mutex
);
2104 static void cfi_intelext_resume(struct mtd_info
*mtd
)
2106 struct map_info
*map
= mtd
->priv
;
2107 struct cfi_private
*cfi
= map
->fldrv_priv
;
2109 struct flchip
*chip
;
2111 for (i
=0; i
<cfi
->numchips
; i
++) {
2113 chip
= &cfi
->chips
[i
];
2115 spin_lock(chip
->mutex
);
2117 /* Go to known state. Chip may have been power cycled */
2118 if (chip
->state
== FL_PM_SUSPENDED
) {
2119 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
2120 chip
->oldstate
= chip
->state
= FL_READY
;
2124 spin_unlock(chip
->mutex
);
2128 static void cfi_intelext_destroy(struct mtd_info
*mtd
)
2130 struct map_info
*map
= mtd
->priv
;
2131 struct cfi_private
*cfi
= map
->fldrv_priv
;
2132 kfree(cfi
->cmdset_priv
);
2134 kfree(cfi
->chips
[0].priv
);
2136 kfree(mtd
->eraseregions
);
2139 static char im_name_1
[]="cfi_cmdset_0001";
2140 static char im_name_3
[]="cfi_cmdset_0003";
2142 static int __init
cfi_intelext_init(void)
2144 inter_module_register(im_name_1
, THIS_MODULE
, &cfi_cmdset_0001
);
2145 inter_module_register(im_name_3
, THIS_MODULE
, &cfi_cmdset_0001
);
2149 static void __exit
cfi_intelext_exit(void)
2151 inter_module_unregister(im_name_1
);
2152 inter_module_unregister(im_name_3
);
2155 module_init(cfi_intelext_init
);
2156 module_exit(cfi_intelext_exit
);
2158 MODULE_LICENSE("GPL");
2159 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2160 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");