2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.154 2004/08/09 13:19:43 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/map.h>
33 #include <linux/mtd/mtd.h>
34 #include <linux/mtd/compatmac.h>
35 #include <linux/mtd/cfi.h>
37 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 // debugging, turns off buffer write mode if set to 1
40 #define FORCE_WORD_WRITE 0
42 static int cfi_intelext_read (struct mtd_info
*, loff_t
, size_t, size_t *, u_char
*);
43 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
44 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
45 static int cfi_intelext_write_words(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
46 static int cfi_intelext_write_buffers(struct mtd_info
*, loff_t
, size_t, size_t *, const u_char
*);
47 static int cfi_intelext_erase_varsize(struct mtd_info
*, struct erase_info
*);
48 static void cfi_intelext_sync (struct mtd_info
*);
49 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
50 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
);
51 static int cfi_intelext_suspend (struct mtd_info
*);
52 static void cfi_intelext_resume (struct mtd_info
*);
54 static void cfi_intelext_destroy(struct mtd_info
*);
56 struct mtd_info
*cfi_cmdset_0001(struct map_info
*, int);
58 static struct mtd_info
*cfi_intelext_setup (struct map_info
*);
59 static int cfi_intelext_partition_fixup(struct map_info
*, struct cfi_private
**);
61 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
,
62 size_t *retlen
, u_char
**mtdbuf
);
63 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
,
68 * *********** SETUP AND PROBE BITS ***********
71 static struct mtd_chip_driver cfi_intelext_chipdrv
= {
72 .probe
= NULL
, /* Not usable directly */
73 .destroy
= cfi_intelext_destroy
,
74 .name
= "cfi_cmdset_0001",
78 /* #define DEBUG_LOCK_BITS */
79 /* #define DEBUG_CFI_FEATURES */
81 #ifdef DEBUG_CFI_FEATURES
82 static void cfi_tell_features(struct cfi_pri_intelext
*extp
)
85 printk(" Feature/Command Support: %4.4X\n", extp
->FeatureSupport
);
86 printk(" - Chip Erase: %s\n", extp
->FeatureSupport
&1?"supported":"unsupported");
87 printk(" - Suspend Erase: %s\n", extp
->FeatureSupport
&2?"supported":"unsupported");
88 printk(" - Suspend Program: %s\n", extp
->FeatureSupport
&4?"supported":"unsupported");
89 printk(" - Legacy Lock/Unlock: %s\n", extp
->FeatureSupport
&8?"supported":"unsupported");
90 printk(" - Queued Erase: %s\n", extp
->FeatureSupport
&16?"supported":"unsupported");
91 printk(" - Instant block lock: %s\n", extp
->FeatureSupport
&32?"supported":"unsupported");
92 printk(" - Protection Bits: %s\n", extp
->FeatureSupport
&64?"supported":"unsupported");
93 printk(" - Page-mode read: %s\n", extp
->FeatureSupport
&128?"supported":"unsupported");
94 printk(" - Synchronous read: %s\n", extp
->FeatureSupport
&256?"supported":"unsupported");
95 printk(" - Simultaneous operations: %s\n", extp
->FeatureSupport
&512?"supported":"unsupported");
96 for (i
=10; i
<32; i
++) {
97 if (extp
->FeatureSupport
& (1<<i
))
98 printk(" - Unknown Bit %X: supported\n", i
);
101 printk(" Supported functions after Suspend: %2.2X\n", extp
->SuspendCmdSupport
);
102 printk(" - Program after Erase Suspend: %s\n", extp
->SuspendCmdSupport
&1?"supported":"unsupported");
103 for (i
=1; i
<8; i
++) {
104 if (extp
->SuspendCmdSupport
& (1<<i
))
105 printk(" - Unknown Bit %X: supported\n", i
);
108 printk(" Block Status Register Mask: %4.4X\n", extp
->BlkStatusRegMask
);
109 printk(" - Lock Bit Active: %s\n", extp
->BlkStatusRegMask
&1?"yes":"no");
110 printk(" - Valid Bit Active: %s\n", extp
->BlkStatusRegMask
&2?"yes":"no");
111 for (i
=2; i
<16; i
++) {
112 if (extp
->BlkStatusRegMask
& (1<<i
))
113 printk(" - Unknown Bit %X Active: yes\n",i
);
116 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
117 extp
->VccOptimal
>> 4, extp
->VccOptimal
& 0xf);
118 if (extp
->VppOptimal
)
119 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
120 extp
->VppOptimal
>> 4, extp
->VppOptimal
& 0xf);
124 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
125 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
126 static void fixup_intel_strataflash(struct map_info
*map
, void* param
)
128 struct cfi_private
*cfi
= map
->fldrv_priv
;
129 struct cfi_pri_amdstd
*extp
= cfi
->cmdset_priv
;
131 printk(KERN_WARNING
"cfi_cmdset_0001: Suspend "
132 "erase on write disabled.\n");
133 extp
->SuspendCmdSupport
&= ~1;
137 static void fixup_st_m28w320ct(struct map_info
*map
, void* param
)
139 struct cfi_private
*cfi
= map
->fldrv_priv
;
141 cfi
->cfiq
->BufWriteTimeoutTyp
= 0; /* Not supported */
142 cfi
->cfiq
->BufWriteTimeoutMax
= 0; /* Not supported */
145 static void fixup_st_m28w320cb(struct map_info
*map
, void* param
)
147 struct cfi_private
*cfi
= map
->fldrv_priv
;
149 /* Note this is done after the region info is endian swapped */
150 cfi
->cfiq
->EraseRegionInfo
[1] =
151 (cfi
->cfiq
->EraseRegionInfo
[1] & 0xffff0000) | 0x3e;
154 static struct cfi_fixup fixup_table
[] = {
155 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 CFI_MFR_ANY
, CFI_ID_ANY
,
158 fixup_intel_strataflash
, NULL
162 0x0020, /* STMicroelectronics */
163 0x00ba, /* M28W320CT */
164 fixup_st_m28w320ct
, NULL
166 0x0020, /* STMicroelectronics */
167 0x00bb, /* M28W320CB */
168 fixup_st_m28w320cb
, NULL
174 /* This routine is made available to other mtd code via
175 * inter_module_register. It must only be accessed through
176 * inter_module_get which will bump the use count of this module. The
177 * addresses passed back in cfi are valid as long as the use count of
178 * this module is non-zero, i.e. between inter_module_get and
179 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
181 struct mtd_info
*cfi_cmdset_0001(struct map_info
*map
, int primary
)
183 struct cfi_private
*cfi
= map
->fldrv_priv
;
186 if (cfi
->cfi_mode
== CFI_MODE_CFI
) {
188 * It's a real CFI chip, not one for which the probe
189 * routine faked a CFI structure. So we read the feature
192 __u16 adr
= primary
?cfi
->cfiq
->P_ADR
:cfi
->cfiq
->A_ADR
;
193 struct cfi_pri_intelext
*extp
;
195 extp
= (struct cfi_pri_intelext
*)cfi_read_pri(map
, adr
, sizeof(*extp
), "Intel/Sharp");
199 /* Do some byteswapping if necessary */
200 extp
->FeatureSupport
= le32_to_cpu(extp
->FeatureSupport
);
201 extp
->BlkStatusRegMask
= le16_to_cpu(extp
->BlkStatusRegMask
);
202 extp
->ProtRegAddr
= le16_to_cpu(extp
->ProtRegAddr
);
204 /* Install our own private info structure */
205 cfi
->cmdset_priv
= extp
;
207 cfi_fixup(map
, fixup_table
);
209 #ifdef DEBUG_CFI_FEATURES
210 /* Tell the user about it in lots of lovely detail */
211 cfi_tell_features(extp
);
214 if(extp
->SuspendCmdSupport
& 1) {
215 printk(KERN_NOTICE
"cfi_cmdset_0001: Erase suspend on write enabled\n");
219 for (i
=0; i
< cfi
->numchips
; i
++) {
220 cfi
->chips
[i
].word_write_time
= 1<<cfi
->cfiq
->WordWriteTimeoutTyp
;
221 cfi
->chips
[i
].buffer_write_time
= 1<<cfi
->cfiq
->BufWriteTimeoutTyp
;
222 cfi
->chips
[i
].erase_time
= 1<<cfi
->cfiq
->BlockEraseTimeoutTyp
;
223 cfi
->chips
[i
].ref_point_counter
= 0;
226 map
->fldrv
= &cfi_intelext_chipdrv
;
228 return cfi_intelext_setup(map
);
231 static struct mtd_info
*cfi_intelext_setup(struct map_info
*map
)
233 struct cfi_private
*cfi
= map
->fldrv_priv
;
234 struct mtd_info
*mtd
;
235 unsigned long offset
= 0;
237 unsigned long devsize
= (1<<cfi
->cfiq
->DevSize
) * cfi
->interleave
;
239 mtd
= kmalloc(sizeof(*mtd
), GFP_KERNEL
);
240 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
243 printk(KERN_ERR
"Failed to allocate memory for MTD device\n");
247 memset(mtd
, 0, sizeof(*mtd
));
249 mtd
->type
= MTD_NORFLASH
;
250 mtd
->size
= devsize
* cfi
->numchips
;
252 mtd
->numeraseregions
= cfi
->cfiq
->NumEraseRegions
* cfi
->numchips
;
253 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
)
254 * mtd
->numeraseregions
, GFP_KERNEL
);
255 if (!mtd
->eraseregions
) {
256 printk(KERN_ERR
"Failed to allocate memory for MTD erase region info\n");
260 for (i
=0; i
<cfi
->cfiq
->NumEraseRegions
; i
++) {
261 unsigned long ernum
, ersize
;
262 ersize
= ((cfi
->cfiq
->EraseRegionInfo
[i
] >> 8) & ~0xff) * cfi
->interleave
;
263 ernum
= (cfi
->cfiq
->EraseRegionInfo
[i
] & 0xffff) + 1;
265 if (mtd
->erasesize
< ersize
) {
266 mtd
->erasesize
= ersize
;
268 for (j
=0; j
<cfi
->numchips
; j
++) {
269 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].offset
= (j
*devsize
)+offset
;
270 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].erasesize
= ersize
;
271 mtd
->eraseregions
[(j
*cfi
->cfiq
->NumEraseRegions
)+i
].numblocks
= ernum
;
273 offset
+= (ersize
* ernum
);
276 if (offset
!= devsize
) {
278 printk(KERN_WARNING
"Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset
, devsize
);
282 for (i
=0; i
<mtd
->numeraseregions
;i
++){
283 printk(KERN_DEBUG
"%d: offset=0x%x,size=0x%x,blocks=%d\n",
284 i
,mtd
->eraseregions
[i
].offset
,
285 mtd
->eraseregions
[i
].erasesize
,
286 mtd
->eraseregions
[i
].numblocks
);
289 /* Also select the correct geometry setup too */
290 mtd
->erase
= cfi_intelext_erase_varsize
;
291 mtd
->read
= cfi_intelext_read
;
293 if (map_is_linear(map
)) {
294 mtd
->point
= cfi_intelext_point
;
295 mtd
->unpoint
= cfi_intelext_unpoint
;
298 if ( cfi
->cfiq
->BufWriteTimeoutTyp
&& !FORCE_WORD_WRITE
) {
299 printk(KERN_INFO
"Using buffer write method\n" );
300 mtd
->write
= cfi_intelext_write_buffers
;
302 printk(KERN_INFO
"Using word write method\n" );
303 mtd
->write
= cfi_intelext_write_words
;
306 mtd
->read_user_prot_reg
= cfi_intelext_read_user_prot_reg
;
307 mtd
->read_fact_prot_reg
= cfi_intelext_read_fact_prot_reg
;
309 mtd
->sync
= cfi_intelext_sync
;
310 mtd
->lock
= cfi_intelext_lock
;
311 mtd
->unlock
= cfi_intelext_unlock
;
312 mtd
->suspend
= cfi_intelext_suspend
;
313 mtd
->resume
= cfi_intelext_resume
;
314 mtd
->flags
= MTD_CAP_NORFLASH
;
315 map
->fldrv
= &cfi_intelext_chipdrv
;
316 mtd
->name
= map
->name
;
318 /* This function has the potential to distort the reality
319 a bit and therefore should be called last. */
320 if (cfi_intelext_partition_fixup(map
, &cfi
) != 0)
323 __module_get(THIS_MODULE
);
328 if(mtd
->eraseregions
)
329 kfree(mtd
->eraseregions
);
332 kfree(cfi
->cmdset_priv
);
336 static int cfi_intelext_partition_fixup(struct map_info
*map
,
337 struct cfi_private
**pcfi
)
339 struct cfi_private
*cfi
= *pcfi
;
340 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
343 * Probing of multi-partition flash ships.
345 * This is extremely crude at the moment and should probably be
346 * extracted entirely from the Intel extended query data instead.
347 * Right now a L18 flash is assumed if multiple operations is
350 * To support multiple partitions when available, we simply arrange
351 * for each of them to have their own flchip structure even if they
352 * are on the same physical chip. This means completely recreating
353 * a new cfi_private structure right here which is a blatent code
354 * layering violation, but this is still the least intrusive
355 * arrangement at this point. This can be rearranged in the future
356 * if someone feels motivated enough. --nico
358 if (extp
&& extp
->FeatureSupport
& (1 << 9)) {
359 struct cfi_private
*newcfi
;
361 struct flchip_shared
*shared
;
362 int numparts
, partshift
, numvirtchips
, i
, j
;
365 * The L18 flash memory array is divided
366 * into multiple 8-Mbit partitions.
368 numparts
= 1 << (cfi
->cfiq
->DevSize
- 20);
369 partshift
= 20 + __ffs(cfi
->interleave
);
370 numvirtchips
= cfi
->numchips
* numparts
;
372 newcfi
= kmalloc(sizeof(struct cfi_private
) + numvirtchips
* sizeof(struct flchip
), GFP_KERNEL
);
375 shared
= kmalloc(sizeof(struct flchip_shared
) * cfi
->numchips
, GFP_KERNEL
);
380 memcpy(newcfi
, cfi
, sizeof(struct cfi_private
));
381 newcfi
->numchips
= numvirtchips
;
382 newcfi
->chipshift
= partshift
;
384 chip
= &newcfi
->chips
[0];
385 for (i
= 0; i
< cfi
->numchips
; i
++) {
386 shared
[i
].writing
= shared
[i
].erasing
= NULL
;
387 spin_lock_init(&shared
[i
].lock
);
388 for (j
= 0; j
< numparts
; j
++) {
389 *chip
= cfi
->chips
[i
];
390 chip
->start
+= j
<< partshift
;
391 chip
->priv
= &shared
[i
];
392 /* those should be reset too since
393 they create memory references. */
394 init_waitqueue_head(&chip
->wq
);
395 spin_lock_init(&chip
->_spinlock
);
396 chip
->mutex
= &chip
->_spinlock
;
401 printk(KERN_DEBUG
"%s: %d sets of %d interleaved chips "
402 "--> %d partitions of %#x bytes\n",
403 map
->name
, cfi
->numchips
, cfi
->interleave
,
404 newcfi
->numchips
, 1<<newcfi
->chipshift
);
406 map
->fldrv_priv
= newcfi
;
415 * *********** CHIP ACCESS FUNCTIONS ***********
418 static int get_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, int mode
)
420 DECLARE_WAITQUEUE(wait
, current
);
421 struct cfi_private
*cfi
= map
->fldrv_priv
;
422 map_word status
, status_OK
= CMD(0x80), status_PWS
= CMD(0x01);
424 struct cfi_pri_intelext
*cfip
= cfi
->cmdset_priv
;
427 timeo
= jiffies
+ HZ
;
429 if (chip
->priv
&& (mode
== FL_WRITING
|| mode
== FL_ERASING
)) {
431 * OK. We have possibility for contension on the write/erase
432 * operations which are global to the real chip and not per
433 * partition. So let's fight it over in the partition which
434 * currently has authority on the operation.
436 * The rules are as follows:
438 * - any write operation must own shared->writing.
440 * - any erase operation must own _both_ shared->writing and
443 * - contension arbitration is handled in the owner's context.
445 * The 'shared' struct can be read when its lock is taken.
446 * However any writes to it can only be made when the current
447 * owner's lock is also held.
449 struct flchip_shared
*shared
= chip
->priv
;
450 struct flchip
*contender
;
451 spin_lock(&shared
->lock
);
452 contender
= shared
->writing
;
453 if (contender
&& contender
!= chip
) {
455 * The engine to perform desired operation on this
456 * partition is already in use by someone else.
457 * Let's fight over it in the context of the chip
458 * currently using it. If it is possible to suspend,
459 * that other partition will do just that, otherwise
460 * it'll happily send us to sleep. In any case, when
461 * get_chip returns success we're clear to go ahead.
463 int ret
= spin_trylock(contender
->mutex
);
464 spin_unlock(&shared
->lock
);
467 spin_unlock(chip
->mutex
);
468 ret
= get_chip(map
, contender
, contender
->start
, mode
);
469 spin_lock(chip
->mutex
);
471 spin_unlock(contender
->mutex
);
474 timeo
= jiffies
+ HZ
;
475 spin_lock(&shared
->lock
);
479 shared
->writing
= chip
;
480 if (mode
== FL_ERASING
)
481 shared
->erasing
= chip
;
482 if (contender
&& contender
!= chip
)
483 spin_unlock(contender
->mutex
);
484 spin_unlock(&shared
->lock
);
487 switch (chip
->state
) {
491 status
= map_read(map
, adr
);
492 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
495 /* At this point we're fine with write operations
496 in other partitions as they don't conflict. */
497 if (chip
->priv
&& map_word_andequal(map
, status
, status_PWS
, status_PWS
))
500 if (time_after(jiffies
, timeo
)) {
501 printk(KERN_ERR
"Waiting for chip to be ready timed out. Status %lx\n",
505 spin_unlock(chip
->mutex
);
507 spin_lock(chip
->mutex
);
508 /* Someone else might have been playing with it. */
518 if (!(cfip
->FeatureSupport
& 2) ||
519 !(mode
== FL_READY
|| mode
== FL_POINT
||
520 (mode
== FL_WRITING
&& (cfip
->SuspendCmdSupport
& 1))))
525 map_write(map
, CMD(0xB0), adr
);
527 /* If the flash has finished erasing, then 'erase suspend'
528 * appears to make some (28F320) flash devices switch to
529 * 'read' mode. Make sure that we switch to 'read status'
530 * mode so we get the right data. --rmk
532 map_write(map
, CMD(0x70), adr
);
533 chip
->oldstate
= FL_ERASING
;
534 chip
->state
= FL_ERASE_SUSPENDING
;
535 chip
->erase_suspended
= 1;
537 status
= map_read(map
, adr
);
538 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
541 if (time_after(jiffies
, timeo
)) {
542 /* Urgh. Resume and pretend we weren't here. */
543 map_write(map
, CMD(0xd0), adr
);
544 /* Make sure we're in 'read status' mode if it had finished */
545 map_write(map
, CMD(0x70), adr
);
546 chip
->state
= FL_ERASING
;
547 chip
->oldstate
= FL_READY
;
548 printk(KERN_ERR
"Chip not ready after erase "
549 "suspended: status = 0x%lx\n", status
.x
[0]);
553 spin_unlock(chip
->mutex
);
555 spin_lock(chip
->mutex
);
556 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
557 So we can just loop here. */
559 chip
->state
= FL_STATUS
;
563 /* Only if there's no operation suspended... */
564 if (mode
== FL_READY
&& chip
->oldstate
== FL_READY
)
569 #if 1 // mask by Victor Yu. 05-14-2007
570 set_current_state(TASK_UNINTERRUPTIBLE
);
572 add_wait_queue(&chip
->wq
, &wait
);
573 spin_unlock(chip
->mutex
);
575 remove_wait_queue(&chip
->wq
, &wait
);
576 spin_lock(chip
->mutex
);
581 static void put_chip(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
)
583 struct cfi_private
*cfi
= map
->fldrv_priv
;
586 struct flchip_shared
*shared
= chip
->priv
;
587 spin_lock(&shared
->lock
);
588 if (shared
->writing
== chip
) {
589 /* We own the ability to write, but we're done */
590 shared
->writing
= shared
->erasing
;
591 if (shared
->writing
&& shared
->writing
!= chip
) {
592 /* give back ownership to who we loaned it from */
593 struct flchip
*loaner
= shared
->writing
;
594 spin_lock(loaner
->mutex
);
595 spin_unlock(&shared
->lock
);
596 spin_unlock(chip
->mutex
);
597 put_chip(map
, loaner
, loaner
->start
);
598 spin_lock(chip
->mutex
);
599 spin_unlock(loaner
->mutex
);
601 if (chip
->oldstate
!= FL_ERASING
) {
602 shared
->erasing
= NULL
;
603 if (chip
->oldstate
!= FL_WRITING
)
604 shared
->writing
= NULL
;
606 spin_unlock(&shared
->lock
);
609 spin_unlock(&shared
->lock
);
613 switch(chip
->oldstate
) {
615 chip
->state
= chip
->oldstate
;
616 /* What if one interleaved chip has finished and the
617 other hasn't? The old code would leave the finished
618 one in READY mode. That's bad, and caused -EROFS
619 errors to be returned from do_erase_oneblock because
620 that's the only bit it checked for at the time.
621 As the state machine appears to explicitly allow
622 sending the 0x70 (Read Status) command to an erasing
623 chip and expecting it to be ignored, that's what we
625 map_write(map
, CMD(0xd0), adr
);
626 map_write(map
, CMD(0x70), adr
);
627 chip
->oldstate
= FL_READY
;
628 chip
->state
= FL_ERASING
;
634 /* We should really make set_vpp() count, rather than doing this */
638 printk(KERN_ERR
"put_chip() called with oldstate %d!!\n", chip
->oldstate
);
643 static int do_point_onechip (struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
)
645 unsigned long cmd_addr
;
646 struct cfi_private
*cfi
= map
->fldrv_priv
;
651 /* Ensure cmd read/writes are aligned. */
652 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
654 spin_lock(chip
->mutex
);
656 ret
= get_chip(map
, chip
, cmd_addr
, FL_POINT
);
659 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
)
660 map_write(map
, CMD(0xff), cmd_addr
);
662 chip
->state
= FL_POINT
;
663 chip
->ref_point_counter
++;
665 spin_unlock(chip
->mutex
);
670 static int cfi_intelext_point (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
**mtdbuf
)
672 struct map_info
*map
= mtd
->priv
;
673 struct cfi_private
*cfi
= map
->fldrv_priv
;
678 if (!map
->virt
|| (from
+ len
> mtd
->size
))
681 *mtdbuf
= (void *)map
->virt
+ from
;
684 /* Now lock the chip(s) to POINT state */
686 /* ofs: offset within the first chip that the first read should start */
687 chipnum
= (from
>> cfi
->chipshift
);
688 ofs
= from
- (chipnum
<< cfi
->chipshift
);
691 unsigned long thislen
;
693 if (chipnum
>= cfi
->numchips
)
696 if ((len
+ ofs
-1) >> cfi
->chipshift
)
697 thislen
= (1<<cfi
->chipshift
) - ofs
;
701 ret
= do_point_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
);
714 static void cfi_intelext_unpoint (struct mtd_info
*mtd
, u_char
*addr
, loff_t from
, size_t len
)
716 struct map_info
*map
= mtd
->priv
;
717 struct cfi_private
*cfi
= map
->fldrv_priv
;
721 /* Now unlock the chip(s) POINT state */
723 /* ofs: offset within the first chip that the first read should start */
724 chipnum
= (from
>> cfi
->chipshift
);
725 ofs
= from
- (chipnum
<< cfi
->chipshift
);
728 unsigned long thislen
;
731 chip
= &cfi
->chips
[chipnum
];
732 if (chipnum
>= cfi
->numchips
)
735 if ((len
+ ofs
-1) >> cfi
->chipshift
)
736 thislen
= (1<<cfi
->chipshift
) - ofs
;
740 spin_lock(chip
->mutex
);
741 if (chip
->state
== FL_POINT
) {
742 chip
->ref_point_counter
--;
743 if(chip
->ref_point_counter
== 0)
744 chip
->state
= FL_READY
;
746 printk(KERN_ERR
"Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
748 put_chip(map
, chip
, chip
->start
);
749 spin_unlock(chip
->mutex
);
757 static inline int do_read_onechip(struct map_info
*map
, struct flchip
*chip
, loff_t adr
, size_t len
, u_char
*buf
)
759 unsigned long cmd_addr
;
760 struct cfi_private
*cfi
= map
->fldrv_priv
;
765 /* Ensure cmd read/writes are aligned. */
766 cmd_addr
= adr
& ~(map_bankwidth(map
)-1);
768 spin_lock(chip
->mutex
);
769 ret
= get_chip(map
, chip
, cmd_addr
, FL_READY
);
771 spin_unlock(chip
->mutex
);
775 if (chip
->state
!= FL_POINT
&& chip
->state
!= FL_READY
) {
776 map_write(map
, CMD(0xff), cmd_addr
);
778 chip
->state
= FL_READY
;
781 map_copy_from(map
, buf
, adr
, len
);
783 put_chip(map
, chip
, cmd_addr
);
785 spin_unlock(chip
->mutex
);
789 static int cfi_intelext_read (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
791 struct map_info
*map
= mtd
->priv
;
792 struct cfi_private
*cfi
= map
->fldrv_priv
;
797 /* ofs: offset within the first chip that the first read should start */
798 chipnum
= (from
>> cfi
->chipshift
);
799 ofs
= from
- (chipnum
<< cfi
->chipshift
);
804 unsigned long thislen
;
806 if (chipnum
>= cfi
->numchips
)
809 if ((len
+ ofs
-1) >> cfi
->chipshift
)
810 thislen
= (1<<cfi
->chipshift
) - ofs
;
814 ret
= do_read_onechip(map
, &cfi
->chips
[chipnum
], ofs
, thislen
, buf
);
828 static int cfi_intelext_read_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
, int base_offst
, int reg_sz
)
830 struct map_info
*map
= mtd
->priv
;
831 struct cfi_private
*cfi
= map
->fldrv_priv
;
832 struct cfi_pri_intelext
*extp
= cfi
->cmdset_priv
;
834 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
839 chip_num
= ((unsigned int)from
/reg_sz
);
840 offst
= from
- (reg_sz
*chip_num
)+base_offst
;
843 /* Calculate which chip & protection register offset we need */
845 if (chip_num
>= cfi
->numchips
)
848 chip
= &cfi
->chips
[chip_num
];
850 spin_lock(chip
->mutex
);
851 ret
= get_chip(map
, chip
, chip
->start
, FL_JEDEC_QUERY
);
853 spin_unlock(chip
->mutex
);
854 return (len
-count
)?:ret
;
857 if (chip
->state
!= FL_JEDEC_QUERY
) {
858 map_write(map
, CMD(0x90), chip
->start
);
859 chip
->state
= FL_JEDEC_QUERY
;
862 while (count
&& ((offst
-base_offst
) < reg_sz
)) {
863 *buf
= map_read8(map
,(chip
->start
+((extp
->ProtRegAddr
+1)*ofs_factor
)+offst
));
869 put_chip(map
, chip
, chip
->start
);
870 spin_unlock(chip
->mutex
);
872 /* Move on to the next chip */
881 static int cfi_intelext_read_user_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
883 struct map_info
*map
= mtd
->priv
;
884 struct cfi_private
*cfi
= map
->fldrv_priv
;
885 struct cfi_pri_intelext
*extp
=cfi
->cmdset_priv
;
886 int base_offst
,reg_sz
;
888 /* Check that we actually have some protection registers */
889 if(!(extp
->FeatureSupport
&64)){
890 printk(KERN_WARNING
"%s: This flash device has no protection data to read!\n",map
->name
);
894 base_offst
=(1<<extp
->FactProtRegSize
);
895 reg_sz
=(1<<extp
->UserProtRegSize
);
897 return cfi_intelext_read_prot_reg(mtd
, from
, len
, retlen
, buf
, base_offst
, reg_sz
);
900 static int cfi_intelext_read_fact_prot_reg (struct mtd_info
*mtd
, loff_t from
, size_t len
, size_t *retlen
, u_char
*buf
)
902 struct map_info
*map
= mtd
->priv
;
903 struct cfi_private
*cfi
= map
->fldrv_priv
;
904 struct cfi_pri_intelext
*extp
=cfi
->cmdset_priv
;
905 int base_offst
,reg_sz
;
907 /* Check that we actually have some protection registers */
908 if(!(extp
->FeatureSupport
&64)){
909 printk(KERN_WARNING
"%s: This flash device has no protection data to read!\n",map
->name
);
914 reg_sz
=(1<<extp
->FactProtRegSize
);
916 return cfi_intelext_read_prot_reg(mtd
, from
, len
, retlen
, buf
, base_offst
, reg_sz
);
920 static int do_write_oneword(struct map_info
*map
, struct flchip
*chip
, unsigned long adr
, map_word datum
)
922 struct cfi_private
*cfi
= map
->fldrv_priv
;
923 map_word status
, status_OK
;
929 /* Let's determine this according to the interleave only once */
930 status_OK
= CMD(0x80);
932 spin_lock(chip
->mutex
);
933 ret
= get_chip(map
, chip
, adr
, FL_WRITING
);
935 spin_unlock(chip
->mutex
);
940 map_write(map
, CMD(0x40), adr
);
941 map_write(map
, datum
, adr
);
942 chip
->state
= FL_WRITING
;
944 spin_unlock(chip
->mutex
);
945 INVALIDATE_CACHED_RANGE(map
, adr
, map_bankwidth(map
));
946 cfi_udelay(chip
->word_write_time
);
947 spin_lock(chip
->mutex
);
949 timeo
= jiffies
+ (HZ
/2);
952 if (chip
->state
!= FL_WRITING
) {
953 /* Someone's suspended the write. Sleep */
954 DECLARE_WAITQUEUE(wait
, current
);
956 #if 1 // mask by Victor Yu. 05-14-2007
957 set_current_state(TASK_UNINTERRUPTIBLE
);
959 add_wait_queue(&chip
->wq
, &wait
);
960 spin_unlock(chip
->mutex
);
962 remove_wait_queue(&chip
->wq
, &wait
);
963 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
964 spin_lock(chip
->mutex
);
968 status
= map_read(map
, adr
);
969 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
972 /* OK Still waiting */
973 if (time_after(jiffies
, timeo
)) {
974 chip
->state
= FL_STATUS
;
975 printk(KERN_ERR
"waiting for chip to be ready timed out in word write\n");
980 /* Latency issues. Drop the lock, wait a while and retry */
981 spin_unlock(chip
->mutex
);
984 spin_lock(chip
->mutex
);
987 chip
->word_write_time
--;
988 if (!chip
->word_write_time
)
989 chip
->word_write_time
++;
992 chip
->word_write_time
++;
994 /* Done and happy. */
995 chip
->state
= FL_STATUS
;
996 /* check for lock bit */
997 if (map_word_bitsset(map
, status
, CMD(0x02))) {
999 map_write(map
, CMD(0x50), adr
);
1000 /* put back into read status register mode */
1001 map_write(map
, CMD(0x70), adr
);
1005 put_chip(map
, chip
, adr
);
1006 spin_unlock(chip
->mutex
);
1012 static int cfi_intelext_write_words (struct mtd_info
*mtd
, loff_t to
, size_t len
, size_t *retlen
, const u_char
*buf
)
1014 struct map_info
*map
= mtd
->priv
;
1015 struct cfi_private
*cfi
= map
->fldrv_priv
;
1024 chipnum
= to
>> cfi
->chipshift
;
1025 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1027 /* If it's not bus-aligned, do the first byte write */
1028 if (ofs
& (map_bankwidth(map
)-1)) {
1029 unsigned long bus_ofs
= ofs
& ~(map_bankwidth(map
)-1);
1030 int gap
= ofs
- bus_ofs
;
1034 n
= min_t(int, len
, map_bankwidth(map
)-gap
);
1035 datum
= map_word_ff(map
);
1036 datum
= map_word_load_partial(map
, datum
, buf
, gap
, n
);
1038 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1048 if (ofs
>> cfi
->chipshift
) {
1051 if (chipnum
== cfi
->numchips
)
1056 while(len
>= map_bankwidth(map
)) {
1057 map_word datum
= map_word_load(map
, buf
);
1059 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1064 ofs
+= map_bankwidth(map
);
1065 buf
+= map_bankwidth(map
);
1066 (*retlen
) += map_bankwidth(map
);
1067 len
-= map_bankwidth(map
);
1069 if (ofs
>> cfi
->chipshift
) {
1072 if (chipnum
== cfi
->numchips
)
1077 if (len
& (map_bankwidth(map
)-1)) {
1080 datum
= map_word_ff(map
);
1081 datum
= map_word_load_partial(map
, datum
, buf
, 0, len
);
1083 ret
= do_write_oneword(map
, &cfi
->chips
[chipnum
],
1095 static inline int do_write_buffer(struct map_info
*map
, struct flchip
*chip
,
1096 unsigned long adr
, const u_char
*buf
, int len
)
1098 struct cfi_private
*cfi
= map
->fldrv_priv
;
1099 map_word status
, status_OK
;
1100 unsigned long cmd_adr
, timeo
;
1101 int wbufsize
, z
, ret
=0, bytes
, words
;
1103 wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1105 cmd_adr
= adr
& ~(wbufsize
-1);
1107 /* Let's determine this according to the interleave only once */
1108 status_OK
= CMD(0x80);
1110 spin_lock(chip
->mutex
);
1111 ret
= get_chip(map
, chip
, cmd_adr
, FL_WRITING
);
1113 spin_unlock(chip
->mutex
);
1117 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1118 [...], the device will not accept any more Write to Buffer commands".
1119 So we must check here and reset those bits if they're set. Otherwise
1120 we're just pissing in the wind */
1121 if (chip
->state
!= FL_STATUS
)
1122 map_write(map
, CMD(0x70), cmd_adr
);
1123 status
= map_read(map
, cmd_adr
);
1124 if (map_word_bitsset(map
, status
, CMD(0x30))) {
1125 printk(KERN_WARNING
"SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status
.x
[0]);
1126 map_write(map
, CMD(0x50), cmd_adr
);
1127 map_write(map
, CMD(0x70), cmd_adr
);
1131 chip
->state
= FL_WRITING_TO_BUFFER
;
1135 map_write(map
, CMD(0xe8), cmd_adr
);
1137 status
= map_read(map
, cmd_adr
);
1138 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1141 spin_unlock(chip
->mutex
);
1143 spin_lock(chip
->mutex
);
1146 /* Argh. Not ready for write to buffer */
1147 map_write(map
, CMD(0x70), cmd_adr
);
1148 chip
->state
= FL_STATUS
;
1149 printk(KERN_ERR
"Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1150 status
.x
[0], map_read(map
, cmd_adr
).x
[0]);
1151 /* Odd. Clear status bits */
1152 map_write(map
, CMD(0x50), cmd_adr
);
1153 map_write(map
, CMD(0x70), cmd_adr
);
1159 /* Write length of data to come */
1160 bytes
= len
& (map_bankwidth(map
)-1);
1161 words
= len
/ map_bankwidth(map
);
1162 map_write(map
, CMD(words
- !bytes
), cmd_adr
);
1166 while(z
< words
* map_bankwidth(map
)) {
1167 map_word datum
= map_word_load(map
, buf
);
1168 map_write(map
, datum
, adr
+z
);
1170 z
+= map_bankwidth(map
);
1171 buf
+= map_bankwidth(map
);
1177 datum
= map_word_ff(map
);
1178 datum
= map_word_load_partial(map
, datum
, buf
, 0, bytes
);
1179 map_write(map
, datum
, adr
+z
);
1183 map_write(map
, CMD(0xd0), cmd_adr
);
1184 chip
->state
= FL_WRITING
;
1186 spin_unlock(chip
->mutex
);
1187 INVALIDATE_CACHED_RANGE(map
, adr
, len
);
1188 cfi_udelay(chip
->buffer_write_time
);
1189 spin_lock(chip
->mutex
);
1191 timeo
= jiffies
+ (HZ
/2);
1194 if (chip
->state
!= FL_WRITING
) {
1195 /* Someone's suspended the write. Sleep */
1196 DECLARE_WAITQUEUE(wait
, current
);
1197 #if 1 // mask by Victor Yu. 05-14-2007
1198 set_current_state(TASK_UNINTERRUPTIBLE
);
1200 add_wait_queue(&chip
->wq
, &wait
);
1201 spin_unlock(chip
->mutex
);
1203 remove_wait_queue(&chip
->wq
, &wait
);
1204 timeo
= jiffies
+ (HZ
/ 2); /* FIXME */
1205 spin_lock(chip
->mutex
);
1209 status
= map_read(map
, cmd_adr
);
1210 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1213 /* OK Still waiting */
1214 if (time_after(jiffies
, timeo
)) {
1215 chip
->state
= FL_STATUS
;
1216 printk(KERN_ERR
"waiting for chip to be ready timed out in bufwrite\n");
1221 /* Latency issues. Drop the lock, wait a while and retry */
1222 spin_unlock(chip
->mutex
);
1225 spin_lock(chip
->mutex
);
1228 chip
->buffer_write_time
--;
1229 if (!chip
->buffer_write_time
)
1230 chip
->buffer_write_time
++;
1233 chip
->buffer_write_time
++;
1235 /* Done and happy. */
1236 chip
->state
= FL_STATUS
;
1238 /* check for lock bit */
1239 if (map_word_bitsset(map
, status
, CMD(0x02))) {
1241 map_write(map
, CMD(0x50), cmd_adr
);
1242 /* put back into read status register mode */
1243 map_write(map
, CMD(0x70), adr
);
1248 put_chip(map
, chip
, cmd_adr
);
1249 spin_unlock(chip
->mutex
);
1253 static int cfi_intelext_write_buffers (struct mtd_info
*mtd
, loff_t to
,
1254 size_t len
, size_t *retlen
, const u_char
*buf
)
1256 struct map_info
*map
= mtd
->priv
;
1257 struct cfi_private
*cfi
= map
->fldrv_priv
;
1258 int wbufsize
= cfi_interleave(cfi
) << cfi
->cfiq
->MaxBufWriteSize
;
1267 chipnum
= to
>> cfi
->chipshift
;
1268 ofs
= to
- (chipnum
<< cfi
->chipshift
);
1270 /* If it's not bus-aligned, do the first word write */
1271 if (ofs
& (map_bankwidth(map
)-1)) {
1272 size_t local_len
= (-ofs
)&(map_bankwidth(map
)-1);
1273 if (local_len
> len
)
1275 ret
= cfi_intelext_write_words(mtd
, to
, local_len
,
1283 if (ofs
>> cfi
->chipshift
) {
1286 if (chipnum
== cfi
->numchips
)
1292 /* We must not cross write block boundaries */
1293 int size
= wbufsize
- (ofs
& (wbufsize
-1));
1297 ret
= do_write_buffer(map
, &cfi
->chips
[chipnum
],
1307 if (ofs
>> cfi
->chipshift
) {
1310 if (chipnum
== cfi
->numchips
)
1317 typedef int (*varsize_frob_t
)(struct map_info
*map
, struct flchip
*chip
,
1318 unsigned long adr
, int len
, void *thunk
);
1320 static int cfi_intelext_varsize_frob(struct mtd_info
*mtd
, varsize_frob_t frob
,
1321 loff_t ofs
, size_t len
, void *thunk
)
1323 struct map_info
*map
= mtd
->priv
;
1324 struct cfi_private
*cfi
= map
->fldrv_priv
;
1326 int chipnum
, ret
= 0;
1328 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
1330 if (ofs
> mtd
->size
)
1333 if ((len
+ ofs
) > mtd
->size
)
1336 /* Check that both start and end of the requested erase are
1337 * aligned with the erasesize at the appropriate addresses.
1342 /* Skip all erase regions which are ended before the start of
1343 the requested erase. Actually, to save on the calculations,
1344 we skip to the first erase region which starts after the
1345 start of the requested erase, and then go back one.
1348 while (i
< mtd
->numeraseregions
&& ofs
>= regions
[i
].offset
)
1352 /* OK, now i is pointing at the erase region in which this
1353 erase request starts. Check the start of the requested
1354 erase range is aligned with the erase size which is in
1358 if (ofs
& (regions
[i
].erasesize
-1))
1361 /* Remember the erase region we start on */
1364 /* Next, check that the end of the requested erase is aligned
1365 * with the erase region at that address.
1368 while (i
<mtd
->numeraseregions
&& (ofs
+ len
) >= regions
[i
].offset
)
1371 /* As before, drop back one to point at the region in which
1372 the address actually falls
1376 if ((ofs
+ len
) & (regions
[i
].erasesize
-1))
1379 chipnum
= ofs
>> cfi
->chipshift
;
1380 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
1385 unsigned long chipmask
;
1386 int size
= regions
[i
].erasesize
;
1388 ret
= (*frob
)(map
, &cfi
->chips
[chipnum
], adr
, size
, thunk
);
1396 chipmask
= (1 << cfi
->chipshift
) - 1;
1397 if ((adr
& chipmask
) == ((regions
[i
].offset
+ size
* regions
[i
].numblocks
) & chipmask
))
1400 if (adr
>> cfi
->chipshift
) {
1404 if (chipnum
>= cfi
->numchips
)
1413 static int do_erase_oneblock(struct map_info
*map
, struct flchip
*chip
,
1414 unsigned long adr
, int len
, void *thunk
)
1416 struct cfi_private
*cfi
= map
->fldrv_priv
;
1417 map_word status
, status_OK
;
1418 unsigned long timeo
;
1420 DECLARE_WAITQUEUE(wait
, current
);
1425 /* Let's determine this according to the interleave only once */
1426 status_OK
= CMD(0x80);
1429 spin_lock(chip
->mutex
);
1430 ret
= get_chip(map
, chip
, adr
, FL_ERASING
);
1432 spin_unlock(chip
->mutex
);
1437 /* Clear the status register first */
1438 map_write(map
, CMD(0x50), adr
);
1441 map_write(map
, CMD(0x20), adr
);
1442 map_write(map
, CMD(0xD0), adr
);
1443 chip
->state
= FL_ERASING
;
1444 chip
->erase_suspended
= 0;
1446 spin_unlock(chip
->mutex
);
1447 INVALIDATE_CACHED_RANGE(map
, adr
, len
);
1448 msleep(chip
->erase_time
/ 2);
1449 spin_lock(chip
->mutex
);
1451 /* FIXME. Use a timer to check this, and return immediately. */
1452 /* Once the state machine's known to be working I'll do that */
1454 timeo
= jiffies
+ (HZ
*20);
1456 if (chip
->state
!= FL_ERASING
) {
1457 /* Someone's suspended the erase. Sleep */
1458 #if 1 // mask by Victor Yu. 05-14-2007
1459 set_current_state(TASK_UNINTERRUPTIBLE
);
1461 add_wait_queue(&chip
->wq
, &wait
);
1462 spin_unlock(chip
->mutex
);
1464 remove_wait_queue(&chip
->wq
, &wait
);
1465 spin_lock(chip
->mutex
);
1468 if (chip
->erase_suspended
) {
1469 /* This erase was suspended and resumed.
1470 Adjust the timeout */
1471 timeo
= jiffies
+ (HZ
*20); /* FIXME */
1472 chip
->erase_suspended
= 0;
1475 status
= map_read(map
, adr
);
1476 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1479 /* OK Still waiting */
1480 if (time_after(jiffies
, timeo
)) {
1481 map_write(map
, CMD(0x70), adr
);
1482 chip
->state
= FL_STATUS
;
1483 printk(KERN_ERR
"waiting for erase at %08lx to complete timed out. Xstatus = %lx, status = %lx.\n",
1484 adr
, status
.x
[0], map_read(map
, adr
).x
[0]);
1485 /* Clear status bits */
1486 map_write(map
, CMD(0x50), adr
);
1487 map_write(map
, CMD(0x70), adr
);
1489 spin_unlock(chip
->mutex
);
1493 /* Latency issues. Drop the lock, wait a while and retry */
1494 spin_unlock(chip
->mutex
);
1495 #if 1 // mask by Victor Yu. 05-14-2007
1496 set_current_state(TASK_UNINTERRUPTIBLE
);
1498 schedule_timeout(1);
1499 spin_lock(chip
->mutex
);
1505 /* We've broken this before. It doesn't hurt to be safe */
1506 map_write(map
, CMD(0x70), adr
);
1507 chip
->state
= FL_STATUS
;
1508 status
= map_read(map
, adr
);
1510 /* check for lock bit */
1511 if (map_word_bitsset(map
, status
, CMD(0x3a))) {
1512 unsigned char chipstatus
= status
.x
[0];
1513 if (!map_word_equal(map
, status
, CMD(chipstatus
))) {
1515 for (w
=0; w
<map_words(map
); w
++) {
1516 for (i
= 0; i
<cfi_interleave(cfi
); i
++) {
1517 chipstatus
|= status
.x
[w
] >> (cfi
->device_type
* 8);
1520 printk(KERN_WARNING
"Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1521 status
.x
[0], chipstatus
);
1523 /* Reset the error bits */
1524 map_write(map
, CMD(0x50), adr
);
1525 map_write(map
, CMD(0x70), adr
);
1527 if ((chipstatus
& 0x30) == 0x30) {
1528 printk(KERN_NOTICE
"Chip reports improper command sequence: status 0x%x\n", chipstatus
);
1530 } else if (chipstatus
& 0x02) {
1531 /* Protection bit set */
1533 } else if (chipstatus
& 0x8) {
1535 printk(KERN_WARNING
"Chip reports voltage low on erase: status 0x%x\n", chipstatus
);
1537 } else if (chipstatus
& 0x20) {
1539 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr
, chipstatus
);
1540 timeo
= jiffies
+ HZ
;
1541 chip
->state
= FL_STATUS
;
1542 spin_unlock(chip
->mutex
);
1545 printk(KERN_DEBUG
"Chip erase failed at 0x%08lx: status 0x%x\n", adr
, chipstatus
);
1551 spin_unlock(chip
->mutex
);
1555 int cfi_intelext_erase_varsize(struct mtd_info
*mtd
, struct erase_info
*instr
)
1557 unsigned long ofs
, len
;
1563 ret
= cfi_intelext_varsize_frob(mtd
, do_erase_oneblock
, ofs
, len
, NULL
);
1567 instr
->state
= MTD_ERASE_DONE
;
1568 mtd_erase_callback(instr
);
1573 static void cfi_intelext_sync (struct mtd_info
*mtd
)
1575 struct map_info
*map
= mtd
->priv
;
1576 struct cfi_private
*cfi
= map
->fldrv_priv
;
1578 struct flchip
*chip
;
1581 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1582 chip
= &cfi
->chips
[i
];
1584 spin_lock(chip
->mutex
);
1585 ret
= get_chip(map
, chip
, chip
->start
, FL_SYNCING
);
1588 chip
->oldstate
= chip
->state
;
1589 chip
->state
= FL_SYNCING
;
1590 /* No need to wake_up() on this state change -
1591 * as the whole point is that nobody can do anything
1592 * with the chip now anyway.
1595 spin_unlock(chip
->mutex
);
1598 /* Unlock the chips again */
1600 for (i
--; i
>=0; i
--) {
1601 chip
= &cfi
->chips
[i
];
1603 spin_lock(chip
->mutex
);
1605 if (chip
->state
== FL_SYNCING
) {
1606 chip
->state
= chip
->oldstate
;
1609 spin_unlock(chip
->mutex
);
1613 #ifdef DEBUG_LOCK_BITS
1614 static int do_printlockstatus_oneblock(struct map_info
*map
, struct flchip
*chip
,
1615 unsigned long adr
, int len
, void *thunk
)
1617 struct cfi_private
*cfi
= map
->fldrv_priv
;
1618 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
1620 cfi_send_gen_cmd(0x90, 0x55, 0, map
, cfi
, cfi
->device_type
, NULL
);
1621 printk(KERN_DEBUG
"block status register for 0x%08lx is %x\n",
1622 adr
, cfi_read_query(map
, adr
+(2*ofs_factor
)));
1623 chip
->state
= FL_JEDEC_QUERY
;
1628 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1629 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1631 static int do_xxlock_oneblock(struct map_info
*map
, struct flchip
*chip
,
1632 unsigned long adr
, int len
, void *thunk
)
1634 struct cfi_private
*cfi
= map
->fldrv_priv
;
1635 map_word status
, status_OK
;
1636 unsigned long timeo
= jiffies
+ HZ
;
1641 /* Let's determine this according to the interleave only once */
1642 status_OK
= CMD(0x80);
1644 spin_lock(chip
->mutex
);
1645 ret
= get_chip(map
, chip
, adr
, FL_LOCKING
);
1647 spin_unlock(chip
->mutex
);
1652 map_write(map
, CMD(0x60), adr
);
1654 if (thunk
== DO_XXLOCK_ONEBLOCK_LOCK
) {
1655 map_write(map
, CMD(0x01), adr
);
1656 chip
->state
= FL_LOCKING
;
1657 } else if (thunk
== DO_XXLOCK_ONEBLOCK_UNLOCK
) {
1658 map_write(map
, CMD(0xD0), adr
);
1659 chip
->state
= FL_UNLOCKING
;
1663 spin_unlock(chip
->mutex
);
1664 schedule_timeout(HZ
);
1665 spin_lock(chip
->mutex
);
1667 /* FIXME. Use a timer to check this, and return immediately. */
1668 /* Once the state machine's known to be working I'll do that */
1670 timeo
= jiffies
+ (HZ
*20);
1673 status
= map_read(map
, adr
);
1674 if (map_word_andequal(map
, status
, status_OK
, status_OK
))
1677 /* OK Still waiting */
1678 if (time_after(jiffies
, timeo
)) {
1679 map_write(map
, CMD(0x70), adr
);
1680 chip
->state
= FL_STATUS
;
1681 printk(KERN_ERR
"waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n",
1682 status
.x
[0], map_read(map
, adr
).x
[0]);
1684 spin_unlock(chip
->mutex
);
1688 /* Latency issues. Drop the lock, wait a while and retry */
1689 spin_unlock(chip
->mutex
);
1691 spin_lock(chip
->mutex
);
1694 /* Done and happy. */
1695 chip
->state
= FL_STATUS
;
1696 put_chip(map
, chip
, adr
);
1697 spin_unlock(chip
->mutex
);
1701 static int cfi_intelext_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1705 #ifdef DEBUG_LOCK_BITS
1706 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1707 __FUNCTION__
, ofs
, len
);
1708 cfi_intelext_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1712 ret
= cfi_intelext_varsize_frob(mtd
, do_xxlock_oneblock
,
1713 ofs
, len
, DO_XXLOCK_ONEBLOCK_LOCK
);
1715 #ifdef DEBUG_LOCK_BITS
1716 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1718 cfi_intelext_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1725 static int cfi_intelext_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
1729 #ifdef DEBUG_LOCK_BITS
1730 printk(KERN_DEBUG
"%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1731 __FUNCTION__
, ofs
, len
);
1732 cfi_intelext_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1736 ret
= cfi_intelext_varsize_frob(mtd
, do_xxlock_oneblock
,
1737 ofs
, len
, DO_XXLOCK_ONEBLOCK_UNLOCK
);
1739 #ifdef DEBUG_LOCK_BITS
1740 printk(KERN_DEBUG
"%s: lock status after, ret=%d\n",
1742 cfi_intelext_varsize_frob(mtd
, do_printlockstatus_oneblock
,
1749 static int cfi_intelext_suspend(struct mtd_info
*mtd
)
1751 struct map_info
*map
= mtd
->priv
;
1752 struct cfi_private
*cfi
= map
->fldrv_priv
;
1754 struct flchip
*chip
;
1757 for (i
=0; !ret
&& i
<cfi
->numchips
; i
++) {
1758 chip
= &cfi
->chips
[i
];
1760 spin_lock(chip
->mutex
);
1762 switch (chip
->state
) {
1766 case FL_JEDEC_QUERY
:
1767 if (chip
->oldstate
== FL_READY
) {
1768 chip
->oldstate
= chip
->state
;
1769 chip
->state
= FL_PM_SUSPENDED
;
1770 /* No need to wake_up() on this state change -
1771 * as the whole point is that nobody can do anything
1772 * with the chip now anyway.
1778 case FL_PM_SUSPENDED
:
1781 spin_unlock(chip
->mutex
);
1784 /* Unlock the chips again */
1787 for (i
--; i
>=0; i
--) {
1788 chip
= &cfi
->chips
[i
];
1790 spin_lock(chip
->mutex
);
1792 if (chip
->state
== FL_PM_SUSPENDED
) {
1793 /* No need to force it into a known state here,
1794 because we're returning failure, and it didn't
1796 chip
->state
= chip
->oldstate
;
1799 spin_unlock(chip
->mutex
);
1806 static void cfi_intelext_resume(struct mtd_info
*mtd
)
1808 struct map_info
*map
= mtd
->priv
;
1809 struct cfi_private
*cfi
= map
->fldrv_priv
;
1811 struct flchip
*chip
;
1813 for (i
=0; i
<cfi
->numchips
; i
++) {
1815 chip
= &cfi
->chips
[i
];
1817 spin_lock(chip
->mutex
);
1819 /* Go to known state. Chip may have been power cycled */
1820 if (chip
->state
== FL_PM_SUSPENDED
) {
1821 map_write(map
, CMD(0xFF), cfi
->chips
[i
].start
);
1822 chip
->state
= FL_READY
;
1826 spin_unlock(chip
->mutex
);
1830 static void cfi_intelext_destroy(struct mtd_info
*mtd
)
1832 struct map_info
*map
= mtd
->priv
;
1833 struct cfi_private
*cfi
= map
->fldrv_priv
;
1834 kfree(cfi
->cmdset_priv
);
1836 kfree(cfi
->chips
[0].priv
);
1838 kfree(mtd
->eraseregions
);
1841 static char im_name_1
[]="cfi_cmdset_0001";
1842 static char im_name_3
[]="cfi_cmdset_0003";
1844 int __init
cfi_intelext_init(void)
1846 inter_module_register(im_name_1
, THIS_MODULE
, &cfi_cmdset_0001
);
1847 inter_module_register(im_name_3
, THIS_MODULE
, &cfi_cmdset_0001
);
1851 static void __exit
cfi_intelext_exit(void)
1853 inter_module_unregister(im_name_1
);
1854 inter_module_unregister(im_name_3
);
1857 module_init(cfi_intelext_init
);
1858 module_exit(cfi_intelext_exit
);
1860 MODULE_LICENSE("GPL");
1861 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1862 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");