2 * MTD map driver for AMD compatible flash chips (non-CFI)
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
6 * $Id: amd_flash.c,v 1.26 2004/11/20 12:49:04 dwmw2 Exp $
8 * Copyright (c) 2001 Axis Communications AB
10 * This file is under GPL.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
30 #define DEVICE_TYPE_X8 (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
35 #define ADDR_MANUFACTURER 0x0000
36 #define ADDR_DEVICE_ID 0x0001
37 #define ADDR_SECTOR_LOCK 0x0002
38 #define ADDR_HANDSHAKE 0x0003
39 #define ADDR_UNLOCK_1 0x0555
40 #define ADDR_UNLOCK_2 0x02AA
43 #define CMD_UNLOCK_DATA_1 0x00AA
44 #define CMD_UNLOCK_DATA_2 0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46 #define CMD_UNLOCK_BYPASS_MODE 0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48 #define CMD_RESET_DATA 0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
52 #define CMD_UNLOCK_SECTOR 0x0060
55 #define MANUFACTURER_AMD 0x0001
56 #define MANUFACTURER_ATMEL 0x001F
57 #define MANUFACTURER_FUJITSU 0x0004
58 #define MANUFACTURER_ST 0x0020
59 #define MANUFACTURER_SST 0x00BF
60 #define MANUFACTURER_TOSHIBA 0x0098
63 #define AM29F800BB 0x2258
64 #define AM29F800BT 0x22D6
65 #define AM29LV800BB 0x225B
66 #define AM29LV800BT 0x22DA
67 #define AM29LV160DT 0x22C4
68 #define AM29LV160DB 0x2249
69 #define AM29BDS323D 0x22D1
70 #define AM29BDS643D 0x227E
73 #define AT49xV16x 0x00C0
74 #define AT49xV16xT 0x00C2
77 #define MBM29LV160TE 0x22C4
78 #define MBM29LV160BE 0x2249
79 #define MBM29LV800BB 0x225B
82 #define M29W800T 0x00D7
83 #define M29W160DT 0x22C4
84 #define M29W160DB 0x2249
87 #define SST39LF800 0x2781
88 #define SST39LF160 0x2782
91 #define TC58FVT160 0x00C2
92 #define TC58FVB160 0x0043
96 struct amd_flash_private
{
100 unsigned long chipshift
;
101 // const char *im_name;
102 struct flchip chips
[0];
105 struct amd_flash_info
{
110 const int numeraseregions
;
111 const struct mtd_erase_region_info regions
[4];
116 static int amd_flash_read(struct mtd_info
*, loff_t
, size_t, size_t *,
118 static int amd_flash_write(struct mtd_info
*, loff_t
, size_t, size_t *,
120 static int amd_flash_erase(struct mtd_info
*, struct erase_info
*);
121 static void amd_flash_sync(struct mtd_info
*);
122 static int amd_flash_suspend(struct mtd_info
*);
123 static void amd_flash_resume(struct mtd_info
*);
124 static void amd_flash_destroy(struct mtd_info
*);
125 static struct mtd_info
*amd_flash_probe(struct map_info
*map
);
128 static struct mtd_chip_driver amd_flash_chipdrv
= {
129 .probe
= amd_flash_probe
,
130 .destroy
= amd_flash_destroy
,
132 .module
= THIS_MODULE
137 static const char im_name
[] = "amd_flash";
141 static inline __u32
wide_read(struct map_info
*map
, __u32 addr
)
143 if (map
->buswidth
== 1) {
144 return map_read8(map
, addr
);
145 } else if (map
->buswidth
== 2) {
146 return map_read16(map
, addr
);
147 } else if (map
->buswidth
== 4) {
148 return map_read32(map
, addr
);
154 static inline void wide_write(struct map_info
*map
, __u32 val
, __u32 addr
)
156 if (map
->buswidth
== 1) {
157 map_write8(map
, val
, addr
);
158 } else if (map
->buswidth
== 2) {
159 map_write16(map
, val
, addr
);
160 } else if (map
->buswidth
== 4) {
161 map_write32(map
, val
, addr
);
165 static inline __u32
make_cmd(struct map_info
*map
, __u32 cmd
)
167 const struct amd_flash_private
*private = map
->fldrv_priv
;
168 if ((private->interleave
== 2) &&
169 (private->device_type
== DEVICE_TYPE_X16
)) {
176 static inline void send_unlock(struct map_info
*map
, unsigned long base
)
178 wide_write(map
, (CMD_UNLOCK_DATA_1
<< 16) | CMD_UNLOCK_DATA_1
,
179 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
180 wide_write(map
, (CMD_UNLOCK_DATA_2
<< 16) | CMD_UNLOCK_DATA_2
,
181 base
+ (map
->buswidth
* ADDR_UNLOCK_2
));
184 static inline void send_cmd(struct map_info
*map
, unsigned long base
, __u32 cmd
)
186 send_unlock(map
, base
);
187 wide_write(map
, make_cmd(map
, cmd
),
188 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
191 static inline void send_cmd_to_addr(struct map_info
*map
, unsigned long base
,
192 __u32 cmd
, unsigned long addr
)
194 send_unlock(map
, base
);
195 wide_write(map
, make_cmd(map
, cmd
), addr
);
198 static inline int flash_is_busy(struct map_info
*map
, unsigned long addr
,
202 if ((interleave
== 2) && (map
->buswidth
== 4)) {
205 read1
= wide_read(map
, addr
);
206 read2
= wide_read(map
, addr
);
208 return (((read1
>> 16) & D6_MASK
) !=
209 ((read2
>> 16) & D6_MASK
)) ||
210 (((read1
& 0xffff) & D6_MASK
) !=
211 ((read2
& 0xffff) & D6_MASK
));
214 return ((wide_read(map
, addr
) & D6_MASK
) !=
215 (wide_read(map
, addr
) & D6_MASK
));
218 static inline void unlock_sector(struct map_info
*map
, unsigned long sect_addr
,
221 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
223 (sect_addr
| (0x40 * map
->buswidth
)) :
224 (sect_addr
& ~(0x40 * map
->buswidth
)) ;
226 __u32 cmd
= make_cmd(map
, CMD_UNLOCK_SECTOR
);
228 wide_write(map
, make_cmd(map
, CMD_RESET_DATA
), 0);
229 wide_write(map
, cmd
, SLA
); /* 1st cycle: write cmd to any address */
230 wide_write(map
, cmd
, SLA
); /* 2nd cycle: write cmd to any address */
231 wide_write(map
, cmd
, SLA
); /* 3rd cycle: write cmd to SLA */
234 static inline int is_sector_locked(struct map_info
*map
,
235 unsigned long sect_addr
)
239 wide_write(map
, CMD_RESET_DATA
, 0);
240 send_cmd(map
, sect_addr
, CMD_MANUFACTURER_UNLOCK_DATA
);
242 /* status is 0x0000 for unlocked and 0x0001 for locked */
243 status
= wide_read(map
, sect_addr
+ (map
->buswidth
* ADDR_SECTOR_LOCK
));
244 wide_write(map
, CMD_RESET_DATA
, 0);
248 static int amd_flash_do_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
,
251 struct map_info
*map
;
252 struct mtd_erase_region_info
*merip
;
253 int eraseoffset
, erasesize
, eraseblocks
;
260 /* Pass the whole chip through sector by sector and check for each
261 sector if the sector and the given interval overlap */
262 for(i
= 0; i
< mtd
->numeraseregions
; i
++) {
263 merip
= &mtd
->eraseregions
[i
];
265 eraseoffset
= merip
->offset
;
266 erasesize
= merip
->erasesize
;
267 eraseblocks
= merip
->numblocks
;
269 if (ofs
> eraseoffset
+ erasesize
)
272 while (eraseblocks
> 0) {
273 if (ofs
< eraseoffset
+ erasesize
&& ofs
+ len
> eraseoffset
) {
274 unlock_sector(map
, eraseoffset
, is_unlock
);
276 lock_status
= is_sector_locked(map
, eraseoffset
);
278 if (is_unlock
&& lock_status
) {
279 printk("Cannot unlock sector at address %x length %xx\n",
280 eraseoffset
, merip
->erasesize
);
282 } else if (!is_unlock
&& !lock_status
) {
283 printk("Cannot lock sector at address %x length %x\n",
284 eraseoffset
, merip
->erasesize
);
288 eraseoffset
+= erasesize
;
295 static int amd_flash_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
297 return amd_flash_do_unlock(mtd
, ofs
, len
, 1);
300 static int amd_flash_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
302 return amd_flash_do_unlock(mtd
, ofs
, len
, 0);
307 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
308 * matching table entry (-1 if not found or alias for already found chip).
310 static int probe_new_chip(struct mtd_info
*mtd
, __u32 base
,
311 struct flchip
*chips
,
312 struct amd_flash_private
*private,
313 const struct amd_flash_info
*table
, int table_size
)
317 struct map_info
*map
= mtd
->priv
;
318 struct amd_flash_private temp
;
321 temp
.device_type
= DEVICE_TYPE_X16
; // Assume X16 (FIXME)
323 map
->fldrv_priv
= &temp
;
325 /* Enter autoselect mode. */
326 send_cmd(map
, base
, CMD_RESET_DATA
);
327 send_cmd(map
, base
, CMD_MANUFACTURER_UNLOCK_DATA
);
329 mfr_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_MANUFACTURER
));
330 dev_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_DEVICE_ID
));
332 if ((map
->buswidth
== 4) && ((mfr_id
>> 16) == (mfr_id
& 0xffff)) &&
333 ((dev_id
>> 16) == (dev_id
& 0xffff))) {
340 for (i
= 0; i
< table_size
; i
++) {
341 if ((mfr_id
== table
[i
].mfr_id
) &&
342 (dev_id
== table
[i
].dev_id
)) {
346 /* Is this an alias for an already found chip?
347 * In that case that chip should be in
348 * autoselect mode now.
350 for (j
= 0; j
< private->numchips
; j
++) {
355 wide_read(map
, chips
[j
].start
+
360 wide_read(map
, chips
[j
].start
+
363 if (temp
.interleave
== 2) {
364 mfr_id_other
&= 0xffff;
365 dev_id_other
&= 0xffff;
367 if ((mfr_id_other
== mfr_id
) &&
368 (dev_id_other
== dev_id
)) {
370 /* Exit autoselect mode. */
378 if (private->numchips
== MAX_AMD_CHIPS
) {
380 "%s: Too many flash chips "
381 "detected. Increase "
382 "MAX_AMD_CHIPS from %d.\n",
383 map
->name
, MAX_AMD_CHIPS
);
388 chips
[private->numchips
].start
= base
;
389 chips
[private->numchips
].state
= FL_READY
;
390 chips
[private->numchips
].mutex
=
391 &chips
[private->numchips
]._spinlock
;
395 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map
->name
,
396 temp
.interleave
, (table
[i
].size
)/(1024*1024),
397 table
[i
].name
, base
);
399 mtd
->size
+= table
[i
].size
* temp
.interleave
;
400 mtd
->numeraseregions
+= table
[i
].numeraseregions
;
406 /* Exit autoselect mode. */
407 send_cmd(map
, base
, CMD_RESET_DATA
);
409 if (i
== table_size
) {
410 printk(KERN_DEBUG
"%s: unknown flash device at 0x%x, "
411 "mfr id 0x%x, dev id 0x%x\n", map
->name
,
412 base
, mfr_id
, dev_id
);
413 map
->fldrv_priv
= NULL
;
418 private->device_type
= temp
.device_type
;
419 private->interleave
= temp
.interleave
;
426 static struct mtd_info
*amd_flash_probe(struct map_info
*map
)
428 static const struct amd_flash_info table
[] = {
430 .mfr_id
= MANUFACTURER_AMD
,
431 .dev_id
= AM29LV160DT
,
432 .name
= "AMD AM29LV160DT",
434 .numeraseregions
= 4,
436 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
437 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
438 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
439 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
442 .mfr_id
= MANUFACTURER_AMD
,
443 .dev_id
= AM29LV160DB
,
444 .name
= "AMD AM29LV160DB",
446 .numeraseregions
= 4,
448 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
449 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
450 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
451 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
454 .mfr_id
= MANUFACTURER_TOSHIBA
,
455 .dev_id
= TC58FVT160
,
456 .name
= "Toshiba TC58FVT160",
458 .numeraseregions
= 4,
460 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
461 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
462 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
463 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
466 .mfr_id
= MANUFACTURER_FUJITSU
,
467 .dev_id
= MBM29LV160TE
,
468 .name
= "Fujitsu MBM29LV160TE",
470 .numeraseregions
= 4,
472 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
473 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
474 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
475 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
478 .mfr_id
= MANUFACTURER_TOSHIBA
,
479 .dev_id
= TC58FVB160
,
480 .name
= "Toshiba TC58FVB160",
482 .numeraseregions
= 4,
484 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
485 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
486 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
487 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
490 .mfr_id
= MANUFACTURER_FUJITSU
,
491 .dev_id
= MBM29LV160BE
,
492 .name
= "Fujitsu MBM29LV160BE",
494 .numeraseregions
= 4,
496 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
497 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
498 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
499 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
502 .mfr_id
= MANUFACTURER_AMD
,
503 .dev_id
= AM29LV800BB
,
504 .name
= "AMD AM29LV800BB",
506 .numeraseregions
= 4,
508 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
509 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
510 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
511 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
514 .mfr_id
= MANUFACTURER_AMD
,
515 .dev_id
= AM29F800BB
,
516 .name
= "AMD AM29F800BB",
518 .numeraseregions
= 4,
520 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
521 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
522 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
523 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
526 .mfr_id
= MANUFACTURER_AMD
,
527 .dev_id
= AM29LV800BT
,
528 .name
= "AMD AM29LV800BT",
530 .numeraseregions
= 4,
532 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
533 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
534 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
535 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
538 .mfr_id
= MANUFACTURER_AMD
,
539 .dev_id
= AM29F800BT
,
540 .name
= "AMD AM29F800BT",
542 .numeraseregions
= 4,
544 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
545 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
546 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
547 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
550 .mfr_id
= MANUFACTURER_AMD
,
551 .dev_id
= AM29LV800BB
,
552 .name
= "AMD AM29LV800BB",
554 .numeraseregions
= 4,
556 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
557 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
558 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
559 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
562 .mfr_id
= MANUFACTURER_FUJITSU
,
563 .dev_id
= MBM29LV800BB
,
564 .name
= "Fujitsu MBM29LV800BB",
566 .numeraseregions
= 4,
568 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
569 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
570 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
571 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
574 .mfr_id
= MANUFACTURER_ST
,
576 .name
= "ST M29W800T",
578 .numeraseregions
= 4,
580 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
581 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
582 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
583 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
586 .mfr_id
= MANUFACTURER_ST
,
588 .name
= "ST M29W160DT",
590 .numeraseregions
= 4,
592 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
593 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
594 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
595 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
598 .mfr_id
= MANUFACTURER_ST
,
600 .name
= "ST M29W160DB",
602 .numeraseregions
= 4,
604 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
605 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
606 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
607 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
610 .mfr_id
= MANUFACTURER_AMD
,
611 .dev_id
= AM29BDS323D
,
612 .name
= "AMD AM29BDS323D",
614 .numeraseregions
= 3,
616 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 48 },
617 { .offset
= 0x300000, .erasesize
= 0x10000, .numblocks
= 15 },
618 { .offset
= 0x3f0000, .erasesize
= 0x02000, .numblocks
= 8 },
621 .mfr_id
= MANUFACTURER_AMD
,
622 .dev_id
= AM29BDS643D
,
623 .name
= "AMD AM29BDS643D",
625 .numeraseregions
= 3,
627 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 96 },
628 { .offset
= 0x600000, .erasesize
= 0x10000, .numblocks
= 31 },
629 { .offset
= 0x7f0000, .erasesize
= 0x02000, .numblocks
= 8 },
632 .mfr_id
= MANUFACTURER_ATMEL
,
634 .name
= "Atmel AT49xV16x",
636 .numeraseregions
= 2,
638 { .offset
= 0x000000, .erasesize
= 0x02000, .numblocks
= 8 },
639 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
642 .mfr_id
= MANUFACTURER_ATMEL
,
643 .dev_id
= AT49xV16xT
,
644 .name
= "Atmel AT49xV16xT",
646 .numeraseregions
= 2,
648 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
649 { .offset
= 0x1F0000, .erasesize
= 0x02000, .numblocks
= 8 }
654 struct mtd_info
*mtd
;
655 struct flchip chips
[MAX_AMD_CHIPS
];
656 int table_pos
[MAX_AMD_CHIPS
];
657 struct amd_flash_private temp
;
658 struct amd_flash_private
*private;
665 mtd
= (struct mtd_info
*)kmalloc(sizeof(*mtd
), GFP_KERNEL
);
668 "%s: kmalloc failed for info structure\n", map
->name
);
671 memset(mtd
, 0, sizeof(*mtd
));
674 memset(&temp
, 0, sizeof(temp
));
676 printk("%s: Probing for AMD compatible flash...\n", map
->name
);
678 if ((table_pos
[0] = probe_new_chip(mtd
, 0, NULL
, &temp
, table
,
679 sizeof(table
)/sizeof(table
[0])))
682 "%s: Found no AMD compatible device at location zero\n",
690 chips
[0].state
= FL_READY
;
691 chips
[0].mutex
= &chips
[0]._spinlock
;
693 for (size
= mtd
->size
; size
> 1; size
>>= 1) {
696 switch (temp
.interleave
) {
705 /* Find out if there are any more chips in the map. */
706 for (base
= (1 << temp
.chipshift
);
708 base
+= (1 << temp
.chipshift
)) {
709 int numchips
= temp
.numchips
;
710 table_pos
[numchips
] = probe_new_chip(mtd
, base
, chips
,
711 &temp
, table
, sizeof(table
)/sizeof(table
[0]));
714 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
) *
715 mtd
->numeraseregions
, GFP_KERNEL
);
716 if (!mtd
->eraseregions
) {
717 printk(KERN_WARNING
"%s: Failed to allocate "
718 "memory for MTD erase region info\n", map
->name
);
720 map
->fldrv_priv
= NULL
;
726 for (i
= 0; i
< temp
.numchips
; i
++) {
731 for (j
= 0; j
< table
[table_pos
[i
]].numeraseregions
; j
++) {
732 mtd
->eraseregions
[reg_idx
].offset
= offset
+
733 (table
[table_pos
[i
]].regions
[j
].offset
*
735 mtd
->eraseregions
[reg_idx
].erasesize
=
736 table
[table_pos
[i
]].regions
[j
].erasesize
*
738 mtd
->eraseregions
[reg_idx
].numblocks
=
739 table
[table_pos
[i
]].regions
[j
].numblocks
;
741 mtd
->eraseregions
[reg_idx
].erasesize
) {
743 mtd
->eraseregions
[reg_idx
].erasesize
;
745 dev_size
+= mtd
->eraseregions
[reg_idx
].erasesize
*
746 mtd
->eraseregions
[reg_idx
].numblocks
;
751 mtd
->type
= MTD_NORFLASH
;
752 mtd
->flags
= MTD_CAP_NORFLASH
;
753 mtd
->name
= map
->name
;
754 mtd
->erase
= amd_flash_erase
;
755 mtd
->read
= amd_flash_read
;
756 mtd
->write
= amd_flash_write
;
757 mtd
->sync
= amd_flash_sync
;
758 mtd
->suspend
= amd_flash_suspend
;
759 mtd
->resume
= amd_flash_resume
;
760 mtd
->lock
= amd_flash_lock
;
761 mtd
->unlock
= amd_flash_unlock
;
763 private = kmalloc(sizeof(*private) + (sizeof(struct flchip
) *
764 temp
.numchips
), GFP_KERNEL
);
767 "%s: kmalloc failed for private structure\n", map
->name
);
769 map
->fldrv_priv
= NULL
;
772 memcpy(private, &temp
, sizeof(temp
));
773 memcpy(private->chips
, chips
,
774 sizeof(struct flchip
) * private->numchips
);
775 for (i
= 0; i
< private->numchips
; i
++) {
776 init_waitqueue_head(&private->chips
[i
].wq
);
777 spin_lock_init(&private->chips
[i
]._spinlock
);
780 map
->fldrv_priv
= private;
782 map
->fldrv
= &amd_flash_chipdrv
;
784 __module_get(THIS_MODULE
);
790 static inline int read_one_chip(struct map_info
*map
, struct flchip
*chip
,
791 loff_t adr
, size_t len
, u_char
*buf
)
793 DECLARE_WAITQUEUE(wait
, current
);
794 unsigned long timeo
= jiffies
+ HZ
;
797 spin_lock_bh(chip
->mutex
);
799 if (chip
->state
!= FL_READY
){
800 printk(KERN_INFO
"%s: waiting for chip to read, state = %d\n",
801 map
->name
, chip
->state
);
802 set_current_state(TASK_UNINTERRUPTIBLE
);
803 add_wait_queue(&chip
->wq
, &wait
);
805 spin_unlock_bh(chip
->mutex
);
808 remove_wait_queue(&chip
->wq
, &wait
);
810 if(signal_pending(current
)) {
814 timeo
= jiffies
+ HZ
;
821 chip
->state
= FL_READY
;
823 map_copy_from(map
, buf
, adr
, len
);
826 spin_unlock_bh(chip
->mutex
);
833 static int amd_flash_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
834 size_t *retlen
, u_char
*buf
)
836 struct map_info
*map
= mtd
->priv
;
837 struct amd_flash_private
*private = map
->fldrv_priv
;
842 if ((from
+ len
) > mtd
->size
) {
843 printk(KERN_WARNING
"%s: read request past end of device "
844 "(0x%lx)\n", map
->name
, (unsigned long)from
+ len
);
849 /* Offset within the first chip that the first read should start. */
850 chipnum
= (from
>> private->chipshift
);
851 ofs
= from
- (chipnum
<< private->chipshift
);
856 unsigned long this_len
;
858 if (chipnum
>= private->numchips
) {
862 if ((len
+ ofs
- 1) >> private->chipshift
) {
863 this_len
= (1 << private->chipshift
) - ofs
;
868 ret
= read_one_chip(map
, &private->chips
[chipnum
], ofs
,
887 static int write_one_word(struct map_info
*map
, struct flchip
*chip
,
888 unsigned long adr
, __u32 datum
)
890 unsigned long timeo
= jiffies
+ HZ
;
891 struct amd_flash_private
*private = map
->fldrv_priv
;
892 DECLARE_WAITQUEUE(wait
, current
);
897 spin_lock_bh(chip
->mutex
);
899 if (chip
->state
!= FL_READY
){
900 printk("%s: waiting for chip to write, state = %d\n",
901 map
->name
, chip
->state
);
902 set_current_state(TASK_UNINTERRUPTIBLE
);
903 add_wait_queue(&chip
->wq
, &wait
);
905 spin_unlock_bh(chip
->mutex
);
908 remove_wait_queue(&chip
->wq
, &wait
);
909 printk(KERN_INFO
"%s: woke up to write\n", map
->name
);
910 if(signal_pending(current
))
913 timeo
= jiffies
+ HZ
;
918 chip
->state
= FL_WRITING
;
922 send_cmd(map
, chip
->start
, CMD_PROGRAM_UNLOCK_DATA
);
923 wide_write(map
, datum
, adr
);
926 while (times_left
-- && flash_is_busy(map
, adr
, private->interleave
)) {
927 if (need_resched()) {
928 spin_unlock_bh(chip
->mutex
);
930 spin_lock_bh(chip
->mutex
);
935 printk(KERN_WARNING
"%s: write to 0x%lx timed out!\n",
940 if ((verify
= wide_read(map
, adr
)) != datum
) {
941 printk(KERN_WARNING
"%s: write to 0x%lx failed. "
942 "datum = %x, verify = %x\n",
943 map
->name
, adr
, datum
, verify
);
949 chip
->state
= FL_READY
;
951 spin_unlock_bh(chip
->mutex
);
958 static int amd_flash_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
959 size_t *retlen
, const u_char
*buf
)
961 struct map_info
*map
= mtd
->priv
;
962 struct amd_flash_private
*private = map
->fldrv_priv
;
966 unsigned long chipstart
;
973 chipnum
= to
>> private->chipshift
;
974 ofs
= to
- (chipnum
<< private->chipshift
);
975 chipstart
= private->chips
[chipnum
].start
;
977 /* If it's not bus-aligned, do the first byte write. */
978 if (ofs
& (map
->buswidth
- 1)) {
979 unsigned long bus_ofs
= ofs
& ~(map
->buswidth
- 1);
980 int i
= ofs
- bus_ofs
;
985 map_copy_from(map
, tmp_buf
,
986 bus_ofs
+ private->chips
[chipnum
].start
,
988 while (len
&& i
< map
->buswidth
)
989 tmp_buf
[i
++] = buf
[n
++], len
--;
991 if (map
->buswidth
== 2) {
992 datum
= *(__u16
*)tmp_buf
;
993 } else if (map
->buswidth
== 4) {
994 datum
= *(__u32
*)tmp_buf
;
996 return -EINVAL
; /* should never happen, but be safe */
999 ret
= write_one_word(map
, &private->chips
[chipnum
], bus_ofs
,
1009 if (ofs
>> private->chipshift
) {
1012 if (chipnum
== private->numchips
) {
1018 /* We are now aligned, write as much as possible. */
1019 while(len
>= map
->buswidth
) {
1022 if (map
->buswidth
== 1) {
1023 datum
= *(__u8
*)buf
;
1024 } else if (map
->buswidth
== 2) {
1025 datum
= *(__u16
*)buf
;
1026 } else if (map
->buswidth
== 4) {
1027 datum
= *(__u32
*)buf
;
1032 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1038 ofs
+= map
->buswidth
;
1039 buf
+= map
->buswidth
;
1040 (*retlen
) += map
->buswidth
;
1041 len
-= map
->buswidth
;
1043 if (ofs
>> private->chipshift
) {
1046 if (chipnum
== private->numchips
) {
1049 chipstart
= private->chips
[chipnum
].start
;
1053 if (len
& (map
->buswidth
- 1)) {
1058 map_copy_from(map
, tmp_buf
,
1059 ofs
+ private->chips
[chipnum
].start
,
1062 tmp_buf
[i
++] = buf
[n
++];
1065 if (map
->buswidth
== 2) {
1066 datum
= *(__u16
*)tmp_buf
;
1067 } else if (map
->buswidth
== 4) {
1068 datum
= *(__u32
*)tmp_buf
;
1070 return -EINVAL
; /* should never happen, but be safe */
1073 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1087 static inline int erase_one_block(struct map_info
*map
, struct flchip
*chip
,
1088 unsigned long adr
, u_long size
)
1090 unsigned long timeo
= jiffies
+ HZ
;
1091 struct amd_flash_private
*private = map
->fldrv_priv
;
1092 DECLARE_WAITQUEUE(wait
, current
);
1095 spin_lock_bh(chip
->mutex
);
1097 if (chip
->state
!= FL_READY
){
1098 set_current_state(TASK_UNINTERRUPTIBLE
);
1099 add_wait_queue(&chip
->wq
, &wait
);
1101 spin_unlock_bh(chip
->mutex
);
1104 remove_wait_queue(&chip
->wq
, &wait
);
1106 if (signal_pending(current
)) {
1110 timeo
= jiffies
+ HZ
;
1115 chip
->state
= FL_ERASING
;
1119 send_cmd(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA
);
1120 send_cmd_to_addr(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA_2
, adr
);
1122 timeo
= jiffies
+ (HZ
* 20);
1124 spin_unlock_bh(chip
->mutex
);
1126 spin_lock_bh(chip
->mutex
);
1128 while (flash_is_busy(map
, adr
, private->interleave
)) {
1130 if (chip
->state
!= FL_ERASING
) {
1131 /* Someone's suspended the erase. Sleep */
1132 set_current_state(TASK_UNINTERRUPTIBLE
);
1133 add_wait_queue(&chip
->wq
, &wait
);
1135 spin_unlock_bh(chip
->mutex
);
1136 printk(KERN_INFO
"%s: erase suspended. Sleeping\n",
1139 remove_wait_queue(&chip
->wq
, &wait
);
1141 if (signal_pending(current
)) {
1145 timeo
= jiffies
+ (HZ
*2); /* FIXME */
1146 spin_lock_bh(chip
->mutex
);
1150 /* OK Still waiting */
1151 if (time_after(jiffies
, timeo
)) {
1152 chip
->state
= FL_READY
;
1153 spin_unlock_bh(chip
->mutex
);
1154 printk(KERN_WARNING
"%s: waiting for erase to complete "
1155 "timed out.\n", map
->name
);
1161 /* Latency issues. Drop the lock, wait a while and retry */
1162 spin_unlock_bh(chip
->mutex
);
1169 spin_lock_bh(chip
->mutex
);
1172 /* Verify every single word */
1178 for (address
= adr
; address
< (adr
+ size
); address
++) {
1179 if ((verify
= map_read8(map
, address
)) != 0xFF) {
1185 chip
->state
= FL_READY
;
1186 spin_unlock_bh(chip
->mutex
);
1188 "%s: verify error at 0x%x, size %ld.\n",
1189 map
->name
, address
, size
);
1197 chip
->state
= FL_READY
;
1199 spin_unlock_bh(chip
->mutex
);
1206 static int amd_flash_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1208 struct map_info
*map
= mtd
->priv
;
1209 struct amd_flash_private
*private = map
->fldrv_priv
;
1210 unsigned long adr
, len
;
1215 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
1217 if (instr
->addr
> mtd
->size
) {
1221 if ((instr
->len
+ instr
->addr
) > mtd
->size
) {
1225 /* Check that both start and end of the requested erase are
1226 * aligned with the erasesize at the appropriate addresses.
1231 /* Skip all erase regions which are ended before the start of
1232 the requested erase. Actually, to save on the calculations,
1233 we skip to the first erase region which starts after the
1234 start of the requested erase, and then go back one.
1237 while ((i
< mtd
->numeraseregions
) &&
1238 (instr
->addr
>= regions
[i
].offset
)) {
1243 /* OK, now i is pointing at the erase region in which this
1244 * erase request starts. Check the start of the requested
1245 * erase range is aligned with the erase size which is in
1249 if (instr
->addr
& (regions
[i
].erasesize
-1)) {
1253 /* Remember the erase region we start on. */
1257 /* Next, check that the end of the requested erase is aligned
1258 * with the erase region at that address.
1261 while ((i
< mtd
->numeraseregions
) &&
1262 ((instr
->addr
+ instr
->len
) >= regions
[i
].offset
)) {
1266 /* As before, drop back one to point at the region in which
1267 * the address actually falls.
1272 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1)) {
1276 chipnum
= instr
->addr
>> private->chipshift
;
1277 adr
= instr
->addr
- (chipnum
<< private->chipshift
);
1283 ret
= erase_one_block(map
, &private->chips
[chipnum
], adr
,
1284 regions
[i
].erasesize
);
1290 adr
+= regions
[i
].erasesize
;
1291 len
-= regions
[i
].erasesize
;
1293 if ((adr
% (1 << private->chipshift
)) ==
1294 ((regions
[i
].offset
+ (regions
[i
].erasesize
*
1295 regions
[i
].numblocks
))
1296 % (1 << private->chipshift
))) {
1300 if (adr
>> private->chipshift
) {
1303 if (chipnum
>= private->numchips
) {
1309 instr
->state
= MTD_ERASE_DONE
;
1310 mtd_erase_callback(instr
);
1317 static void amd_flash_sync(struct mtd_info
*mtd
)
1319 struct map_info
*map
= mtd
->priv
;
1320 struct amd_flash_private
*private = map
->fldrv_priv
;
1322 struct flchip
*chip
;
1324 DECLARE_WAITQUEUE(wait
, current
);
1326 for (i
= 0; !ret
&& (i
< private->numchips
); i
++) {
1327 chip
= &private->chips
[i
];
1330 spin_lock_bh(chip
->mutex
);
1332 switch(chip
->state
) {
1336 case FL_JEDEC_QUERY
:
1337 chip
->oldstate
= chip
->state
;
1338 chip
->state
= FL_SYNCING
;
1339 /* No need to wake_up() on this state change -
1340 * as the whole point is that nobody can do anything
1341 * with the chip now anyway.
1344 spin_unlock_bh(chip
->mutex
);
1348 /* Not an idle state */
1349 add_wait_queue(&chip
->wq
, &wait
);
1351 spin_unlock_bh(chip
->mutex
);
1355 remove_wait_queue(&chip
->wq
, &wait
);
1361 /* Unlock the chips again */
1362 for (i
--; i
>= 0; i
--) {
1363 chip
= &private->chips
[i
];
1365 spin_lock_bh(chip
->mutex
);
1367 if (chip
->state
== FL_SYNCING
) {
1368 chip
->state
= chip
->oldstate
;
1371 spin_unlock_bh(chip
->mutex
);
1377 static int amd_flash_suspend(struct mtd_info
*mtd
)
1379 printk("amd_flash_suspend(): not implemented!\n");
1385 static void amd_flash_resume(struct mtd_info
*mtd
)
1387 printk("amd_flash_resume(): not implemented!\n");
1392 static void amd_flash_destroy(struct mtd_info
*mtd
)
1394 struct map_info
*map
= mtd
->priv
;
1395 struct amd_flash_private
*private = map
->fldrv_priv
;
1399 int __init
amd_flash_init(void)
1401 register_mtd_chip_driver(&amd_flash_chipdrv
);
1405 void __exit
amd_flash_exit(void)
1407 unregister_mtd_chip_driver(&amd_flash_chipdrv
);
1410 module_init(amd_flash_init
);
1411 module_exit(amd_flash_exit
);
1413 MODULE_LICENSE("GPL");
1414 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1415 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");