2 * MTD map driver for AMD compatible flash chips (non-CFI)
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
8 * Copyright (c) 2001 Axis Communications AB
10 * This file is under GPL.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
30 #define DEVICE_TYPE_X8 (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
35 #define ADDR_MANUFACTURER 0x0000
36 #define ADDR_DEVICE_ID 0x0001
37 #define ADDR_SECTOR_LOCK 0x0002
38 #define ADDR_HANDSHAKE 0x0003
39 #define ADDR_UNLOCK_1 0x0555
40 #define ADDR_UNLOCK_2 0x02AA
43 #define CMD_UNLOCK_DATA_1 0x00AA
44 #define CMD_UNLOCK_DATA_2 0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46 #define CMD_UNLOCK_BYPASS_MODE 0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48 #define CMD_RESET_DATA 0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
52 #define CMD_UNLOCK_SECTOR 0x0060
55 #define MANUFACTURER_AMD 0x0001
56 #define MANUFACTURER_ATMEL 0x001F
57 #define MANUFACTURER_FUJITSU 0x0004
58 #define MANUFACTURER_ST 0x0020
59 #define MANUFACTURER_SST 0x00BF
60 #define MANUFACTURER_TOSHIBA 0x0098
63 #define AM29F800BB 0x2258
64 #define AM29F800BT 0x22D6
65 #define AM29LV800BB 0x225B
66 #define AM29LV800BT 0x22DA
67 #define AM29LV160DT 0x22C4
68 #define AM29LV160DB 0x2249
69 #define AM29BDS323D 0x22D1
72 #define AT49xV16x 0x00C0
73 #define AT49xV16xT 0x00C2
76 #define MBM29LV160TE 0x22C4
77 #define MBM29LV160BE 0x2249
78 #define MBM29LV800BB 0x225B
81 #define M29W800T 0x00D7
82 #define M29W160DT 0x22C4
83 #define M29W160DB 0x2249
86 #define SST39LF800 0x2781
87 #define SST39LF160 0x2782
90 #define TC58FVT160 0x00C2
91 #define TC58FVB160 0x0043
95 struct amd_flash_private
{
99 unsigned long chipshift
;
100 struct flchip chips
[0];
103 struct amd_flash_info
{
108 const int numeraseregions
;
109 const struct mtd_erase_region_info regions
[4];
114 static int amd_flash_read(struct mtd_info
*, loff_t
, size_t, size_t *,
116 static int amd_flash_write(struct mtd_info
*, loff_t
, size_t, size_t *,
118 static int amd_flash_erase(struct mtd_info
*, struct erase_info
*);
119 static void amd_flash_sync(struct mtd_info
*);
120 static int amd_flash_suspend(struct mtd_info
*);
121 static void amd_flash_resume(struct mtd_info
*);
122 static void amd_flash_destroy(struct mtd_info
*);
123 static struct mtd_info
*amd_flash_probe(struct map_info
*map
);
126 static struct mtd_chip_driver amd_flash_chipdrv
= {
127 .probe
= amd_flash_probe
,
128 .destroy
= amd_flash_destroy
,
130 .module
= THIS_MODULE
133 static inline __u32
wide_read(struct map_info
*map
, __u32 addr
)
135 if (map
->buswidth
== 1) {
136 return map_read8(map
, addr
);
137 } else if (map
->buswidth
== 2) {
138 return map_read16(map
, addr
);
139 } else if (map
->buswidth
== 4) {
140 return map_read32(map
, addr
);
146 static inline void wide_write(struct map_info
*map
, __u32 val
, __u32 addr
)
148 if (map
->buswidth
== 1) {
149 map_write8(map
, val
, addr
);
150 } else if (map
->buswidth
== 2) {
151 map_write16(map
, val
, addr
);
152 } else if (map
->buswidth
== 4) {
153 map_write32(map
, val
, addr
);
157 static inline __u32
make_cmd(struct map_info
*map
, __u32 cmd
)
159 const struct amd_flash_private
*private = map
->fldrv_priv
;
160 if ((private->interleave
== 2) &&
161 (private->device_type
== DEVICE_TYPE_X16
)) {
168 static inline void send_unlock(struct map_info
*map
, unsigned long base
)
170 wide_write(map
, (CMD_UNLOCK_DATA_1
<< 16) | CMD_UNLOCK_DATA_1
,
171 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
172 wide_write(map
, (CMD_UNLOCK_DATA_2
<< 16) | CMD_UNLOCK_DATA_2
,
173 base
+ (map
->buswidth
* ADDR_UNLOCK_2
));
176 static inline void send_cmd(struct map_info
*map
, unsigned long base
, __u32 cmd
)
178 send_unlock(map
, base
);
179 wide_write(map
, make_cmd(map
, cmd
),
180 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
183 static inline void send_cmd_to_addr(struct map_info
*map
, unsigned long base
,
184 __u32 cmd
, unsigned long addr
)
186 send_unlock(map
, base
);
187 wide_write(map
, make_cmd(map
, cmd
), addr
);
190 static inline int flash_is_busy(struct map_info
*map
, unsigned long addr
,
194 if ((interleave
== 2) && (map
->buswidth
== 4)) {
197 read1
= wide_read(map
, addr
);
198 read2
= wide_read(map
, addr
);
200 return (((read1
>> 16) & D6_MASK
) !=
201 ((read2
>> 16) & D6_MASK
)) ||
202 (((read1
& 0xffff) & D6_MASK
) !=
203 ((read2
& 0xffff) & D6_MASK
));
206 return ((wide_read(map
, addr
) & D6_MASK
) !=
207 (wide_read(map
, addr
) & D6_MASK
));
210 static inline void unlock_sector(struct map_info
*map
, unsigned long sect_addr
,
213 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
215 (sect_addr
| (0x40 * map
->buswidth
)) :
216 (sect_addr
& ~(0x40 * map
->buswidth
)) ;
218 __u32 cmd
= make_cmd(map
, CMD_UNLOCK_SECTOR
);
220 wide_write(map
, make_cmd(map
, CMD_RESET_DATA
), 0);
221 wide_write(map
, cmd
, SLA
); /* 1st cycle: write cmd to any address */
222 wide_write(map
, cmd
, SLA
); /* 2nd cycle: write cmd to any address */
223 wide_write(map
, cmd
, SLA
); /* 3rd cycle: write cmd to SLA */
226 static inline int is_sector_locked(struct map_info
*map
,
227 unsigned long sect_addr
)
231 wide_write(map
, CMD_RESET_DATA
, 0);
232 send_cmd(map
, sect_addr
, CMD_MANUFACTURER_UNLOCK_DATA
);
234 /* status is 0x0000 for unlocked and 0x0001 for locked */
235 status
= wide_read(map
, sect_addr
+ (map
->buswidth
* ADDR_SECTOR_LOCK
));
236 wide_write(map
, CMD_RESET_DATA
, 0);
240 static int amd_flash_do_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
,
243 struct map_info
*map
;
244 struct mtd_erase_region_info
*merip
;
245 int eraseoffset
, erasesize
, eraseblocks
;
252 /* Pass the whole chip through sector by sector and check for each
253 sector if the sector and the given interval overlap */
254 for(i
= 0; i
< mtd
->numeraseregions
; i
++) {
255 merip
= &mtd
->eraseregions
[i
];
257 eraseoffset
= merip
->offset
;
258 erasesize
= merip
->erasesize
;
259 eraseblocks
= merip
->numblocks
;
261 if (ofs
> eraseoffset
+ erasesize
)
264 while (eraseblocks
> 0) {
265 if (ofs
< eraseoffset
+ erasesize
&& ofs
+ len
> eraseoffset
) {
266 unlock_sector(map
, eraseoffset
, is_unlock
);
268 lock_status
= is_sector_locked(map
, eraseoffset
);
270 if (is_unlock
&& lock_status
) {
271 printk("Cannot unlock sector at address %x length %xx\n",
272 eraseoffset
, merip
->erasesize
);
274 } else if (!is_unlock
&& !lock_status
) {
275 printk("Cannot lock sector at address %x length %x\n",
276 eraseoffset
, merip
->erasesize
);
280 eraseoffset
+= erasesize
;
287 static int amd_flash_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
289 return amd_flash_do_unlock(mtd
, ofs
, len
, 1);
292 static int amd_flash_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
294 return amd_flash_do_unlock(mtd
, ofs
, len
, 0);
299 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
300 * matching table entry (-1 if not found or alias for already found chip).
302 static int probe_new_chip(struct mtd_info
*mtd
, __u32 base
,
303 struct flchip
*chips
,
304 struct amd_flash_private
*private,
305 const struct amd_flash_info
*table
, int table_size
)
309 struct map_info
*map
= mtd
->priv
;
310 struct amd_flash_private temp
;
313 temp
.device_type
= DEVICE_TYPE_X16
; // Assume X16 (FIXME)
315 map
->fldrv_priv
= &temp
;
317 /* Enter autoselect mode. */
318 send_cmd(map
, base
, CMD_RESET_DATA
);
319 send_cmd(map
, base
, CMD_MANUFACTURER_UNLOCK_DATA
);
321 mfr_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_MANUFACTURER
));
322 dev_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_DEVICE_ID
));
324 if ((map
->buswidth
== 4) && ((mfr_id
>> 16) == (mfr_id
& 0xffff)) &&
325 ((dev_id
>> 16) == (dev_id
& 0xffff))) {
332 for (i
= 0; i
< table_size
; i
++) {
333 if ((mfr_id
== table
[i
].mfr_id
) &&
334 (dev_id
== table
[i
].dev_id
)) {
338 /* Is this an alias for an already found chip?
339 * In that case that chip should be in
340 * autoselect mode now.
342 for (j
= 0; j
< private->numchips
; j
++) {
347 wide_read(map
, chips
[j
].start
+
352 wide_read(map
, chips
[j
].start
+
355 if (temp
.interleave
== 2) {
356 mfr_id_other
&= 0xffff;
357 dev_id_other
&= 0xffff;
359 if ((mfr_id_other
== mfr_id
) &&
360 (dev_id_other
== dev_id
)) {
362 /* Exit autoselect mode. */
370 if (private->numchips
== MAX_AMD_CHIPS
) {
372 "%s: Too many flash chips "
373 "detected. Increase "
374 "MAX_AMD_CHIPS from %d.\n",
375 map
->name
, MAX_AMD_CHIPS
);
380 chips
[private->numchips
].start
= base
;
381 chips
[private->numchips
].state
= FL_READY
;
382 chips
[private->numchips
].mutex
=
383 &chips
[private->numchips
]._spinlock
;
387 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map
->name
,
388 temp
.interleave
, (table
[i
].size
)/(1024*1024),
389 table
[i
].name
, base
);
391 mtd
->size
+= table
[i
].size
* temp
.interleave
;
392 mtd
->numeraseregions
+= table
[i
].numeraseregions
;
398 /* Exit autoselect mode. */
399 send_cmd(map
, base
, CMD_RESET_DATA
);
401 if (i
== table_size
) {
402 printk(KERN_DEBUG
"%s: unknown flash device at 0x%x, "
403 "mfr id 0x%x, dev id 0x%x\n", map
->name
,
404 base
, mfr_id
, dev_id
);
405 map
->fldrv_priv
= NULL
;
410 private->device_type
= temp
.device_type
;
411 private->interleave
= temp
.interleave
;
418 static struct mtd_info
*amd_flash_probe(struct map_info
*map
)
420 static const struct amd_flash_info table
[] = {
422 .mfr_id
= MANUFACTURER_AMD
,
423 .dev_id
= AM29LV160DT
,
424 .name
= "AMD AM29LV160DT",
426 .numeraseregions
= 4,
428 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
429 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
430 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
431 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
434 .mfr_id
= MANUFACTURER_AMD
,
435 .dev_id
= AM29LV160DB
,
436 .name
= "AMD AM29LV160DB",
438 .numeraseregions
= 4,
440 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
441 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
442 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
443 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
446 .mfr_id
= MANUFACTURER_TOSHIBA
,
447 .dev_id
= TC58FVT160
,
448 .name
= "Toshiba TC58FVT160",
450 .numeraseregions
= 4,
452 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
453 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
454 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
455 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
458 .mfr_id
= MANUFACTURER_FUJITSU
,
459 .dev_id
= MBM29LV160TE
,
460 .name
= "Fujitsu MBM29LV160TE",
462 .numeraseregions
= 4,
464 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
465 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
466 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
467 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
470 .mfr_id
= MANUFACTURER_TOSHIBA
,
471 .dev_id
= TC58FVB160
,
472 .name
= "Toshiba TC58FVB160",
474 .numeraseregions
= 4,
476 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
477 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
478 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
479 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
482 .mfr_id
= MANUFACTURER_FUJITSU
,
483 .dev_id
= MBM29LV160BE
,
484 .name
= "Fujitsu MBM29LV160BE",
486 .numeraseregions
= 4,
488 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
489 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
490 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
491 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
494 .mfr_id
= MANUFACTURER_AMD
,
495 .dev_id
= AM29LV800BB
,
496 .name
= "AMD AM29LV800BB",
498 .numeraseregions
= 4,
500 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
501 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
502 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
503 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
506 .mfr_id
= MANUFACTURER_AMD
,
507 .dev_id
= AM29F800BB
,
508 .name
= "AMD AM29F800BB",
510 .numeraseregions
= 4,
512 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
513 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
514 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
515 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
518 .mfr_id
= MANUFACTURER_AMD
,
519 .dev_id
= AM29LV800BT
,
520 .name
= "AMD AM29LV800BT",
522 .numeraseregions
= 4,
524 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
525 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
526 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
527 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
530 .mfr_id
= MANUFACTURER_AMD
,
531 .dev_id
= AM29F800BT
,
532 .name
= "AMD AM29F800BT",
534 .numeraseregions
= 4,
536 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
537 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
538 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
539 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
542 .mfr_id
= MANUFACTURER_AMD
,
543 .dev_id
= AM29LV800BB
,
544 .name
= "AMD AM29LV800BB",
546 .numeraseregions
= 4,
548 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
549 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
550 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
551 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
554 .mfr_id
= MANUFACTURER_FUJITSU
,
555 .dev_id
= MBM29LV800BB
,
556 .name
= "Fujitsu MBM29LV800BB",
558 .numeraseregions
= 4,
560 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
561 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
562 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
563 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
566 .mfr_id
= MANUFACTURER_ST
,
568 .name
= "ST M29W800T",
570 .numeraseregions
= 4,
572 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
573 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
574 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
575 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
578 .mfr_id
= MANUFACTURER_ST
,
580 .name
= "ST M29W160DT",
582 .numeraseregions
= 4,
584 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
585 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
586 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
587 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
590 .mfr_id
= MANUFACTURER_ST
,
592 .name
= "ST M29W160DB",
594 .numeraseregions
= 4,
596 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
597 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
598 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
599 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
602 .mfr_id
= MANUFACTURER_AMD
,
603 .dev_id
= AM29BDS323D
,
604 .name
= "AMD AM29BDS323D",
606 .numeraseregions
= 3,
608 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 48 },
609 { .offset
= 0x300000, .erasesize
= 0x10000, .numblocks
= 15 },
610 { .offset
= 0x3f0000, .erasesize
= 0x02000, .numblocks
= 8 },
613 .mfr_id
= MANUFACTURER_ATMEL
,
615 .name
= "Atmel AT49xV16x",
617 .numeraseregions
= 2,
619 { .offset
= 0x000000, .erasesize
= 0x02000, .numblocks
= 8 },
620 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
623 .mfr_id
= MANUFACTURER_ATMEL
,
624 .dev_id
= AT49xV16xT
,
625 .name
= "Atmel AT49xV16xT",
627 .numeraseregions
= 2,
629 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
630 { .offset
= 0x1F0000, .erasesize
= 0x02000, .numblocks
= 8 }
635 struct mtd_info
*mtd
;
636 struct flchip chips
[MAX_AMD_CHIPS
];
637 int table_pos
[MAX_AMD_CHIPS
];
638 struct amd_flash_private temp
;
639 struct amd_flash_private
*private;
646 mtd
= kzalloc(sizeof(*mtd
), GFP_KERNEL
);
649 "%s: kmalloc failed for info structure\n", map
->name
);
654 memset(&temp
, 0, sizeof(temp
));
656 printk("%s: Probing for AMD compatible flash...\n", map
->name
);
658 if ((table_pos
[0] = probe_new_chip(mtd
, 0, NULL
, &temp
, table
,
662 "%s: Found no AMD compatible device at location zero\n",
670 chips
[0].state
= FL_READY
;
671 chips
[0].mutex
= &chips
[0]._spinlock
;
673 for (size
= mtd
->size
; size
> 1; size
>>= 1) {
676 switch (temp
.interleave
) {
685 /* Find out if there are any more chips in the map. */
686 for (base
= (1 << temp
.chipshift
);
688 base
+= (1 << temp
.chipshift
)) {
689 int numchips
= temp
.numchips
;
690 table_pos
[numchips
] = probe_new_chip(mtd
, base
, chips
,
691 &temp
, table
, ARRAY_SIZE(table
));
694 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
) *
695 mtd
->numeraseregions
, GFP_KERNEL
);
696 if (!mtd
->eraseregions
) {
697 printk(KERN_WARNING
"%s: Failed to allocate "
698 "memory for MTD erase region info\n", map
->name
);
700 map
->fldrv_priv
= NULL
;
706 for (i
= 0; i
< temp
.numchips
; i
++) {
711 for (j
= 0; j
< table
[table_pos
[i
]].numeraseregions
; j
++) {
712 mtd
->eraseregions
[reg_idx
].offset
= offset
+
713 (table
[table_pos
[i
]].regions
[j
].offset
*
715 mtd
->eraseregions
[reg_idx
].erasesize
=
716 table
[table_pos
[i
]].regions
[j
].erasesize
*
718 mtd
->eraseregions
[reg_idx
].numblocks
=
719 table
[table_pos
[i
]].regions
[j
].numblocks
;
721 mtd
->eraseregions
[reg_idx
].erasesize
) {
723 mtd
->eraseregions
[reg_idx
].erasesize
;
725 dev_size
+= mtd
->eraseregions
[reg_idx
].erasesize
*
726 mtd
->eraseregions
[reg_idx
].numblocks
;
731 mtd
->type
= MTD_NORFLASH
;
733 mtd
->flags
= MTD_CAP_NORFLASH
;
734 mtd
->name
= map
->name
;
735 mtd
->erase
= amd_flash_erase
;
736 mtd
->read
= amd_flash_read
;
737 mtd
->write
= amd_flash_write
;
738 mtd
->sync
= amd_flash_sync
;
739 mtd
->suspend
= amd_flash_suspend
;
740 mtd
->resume
= amd_flash_resume
;
741 mtd
->lock
= amd_flash_lock
;
742 mtd
->unlock
= amd_flash_unlock
;
744 private = kmalloc(sizeof(*private) + (sizeof(struct flchip
) *
745 temp
.numchips
), GFP_KERNEL
);
748 "%s: kmalloc failed for private structure\n", map
->name
);
750 map
->fldrv_priv
= NULL
;
753 memcpy(private, &temp
, sizeof(temp
));
754 memcpy(private->chips
, chips
,
755 sizeof(struct flchip
) * private->numchips
);
756 for (i
= 0; i
< private->numchips
; i
++) {
757 init_waitqueue_head(&private->chips
[i
].wq
);
758 spin_lock_init(&private->chips
[i
]._spinlock
);
761 map
->fldrv_priv
= private;
763 map
->fldrv
= &amd_flash_chipdrv
;
765 __module_get(THIS_MODULE
);
771 static inline int read_one_chip(struct map_info
*map
, struct flchip
*chip
,
772 loff_t adr
, size_t len
, u_char
*buf
)
774 DECLARE_WAITQUEUE(wait
, current
);
775 unsigned long timeo
= jiffies
+ HZ
;
778 spin_lock_bh(chip
->mutex
);
780 if (chip
->state
!= FL_READY
){
781 printk(KERN_INFO
"%s: waiting for chip to read, state = %d\n",
782 map
->name
, chip
->state
);
783 set_current_state(TASK_UNINTERRUPTIBLE
);
784 add_wait_queue(&chip
->wq
, &wait
);
786 spin_unlock_bh(chip
->mutex
);
789 remove_wait_queue(&chip
->wq
, &wait
);
791 if(signal_pending(current
)) {
795 timeo
= jiffies
+ HZ
;
802 chip
->state
= FL_READY
;
804 map_copy_from(map
, buf
, adr
, len
);
807 spin_unlock_bh(chip
->mutex
);
814 static int amd_flash_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
815 size_t *retlen
, u_char
*buf
)
817 struct map_info
*map
= mtd
->priv
;
818 struct amd_flash_private
*private = map
->fldrv_priv
;
823 if ((from
+ len
) > mtd
->size
) {
824 printk(KERN_WARNING
"%s: read request past end of device "
825 "(0x%lx)\n", map
->name
, (unsigned long)from
+ len
);
830 /* Offset within the first chip that the first read should start. */
831 chipnum
= (from
>> private->chipshift
);
832 ofs
= from
- (chipnum
<< private->chipshift
);
837 unsigned long this_len
;
839 if (chipnum
>= private->numchips
) {
843 if ((len
+ ofs
- 1) >> private->chipshift
) {
844 this_len
= (1 << private->chipshift
) - ofs
;
849 ret
= read_one_chip(map
, &private->chips
[chipnum
], ofs
,
868 static int write_one_word(struct map_info
*map
, struct flchip
*chip
,
869 unsigned long adr
, __u32 datum
)
871 unsigned long timeo
= jiffies
+ HZ
;
872 struct amd_flash_private
*private = map
->fldrv_priv
;
873 DECLARE_WAITQUEUE(wait
, current
);
878 spin_lock_bh(chip
->mutex
);
880 if (chip
->state
!= FL_READY
){
881 printk("%s: waiting for chip to write, state = %d\n",
882 map
->name
, chip
->state
);
883 set_current_state(TASK_UNINTERRUPTIBLE
);
884 add_wait_queue(&chip
->wq
, &wait
);
886 spin_unlock_bh(chip
->mutex
);
889 remove_wait_queue(&chip
->wq
, &wait
);
890 printk(KERN_INFO
"%s: woke up to write\n", map
->name
);
891 if(signal_pending(current
))
894 timeo
= jiffies
+ HZ
;
899 chip
->state
= FL_WRITING
;
903 send_cmd(map
, chip
->start
, CMD_PROGRAM_UNLOCK_DATA
);
904 wide_write(map
, datum
, adr
);
907 while (times_left
-- && flash_is_busy(map
, adr
, private->interleave
)) {
908 if (need_resched()) {
909 spin_unlock_bh(chip
->mutex
);
911 spin_lock_bh(chip
->mutex
);
916 printk(KERN_WARNING
"%s: write to 0x%lx timed out!\n",
921 if ((verify
= wide_read(map
, adr
)) != datum
) {
922 printk(KERN_WARNING
"%s: write to 0x%lx failed. "
923 "datum = %x, verify = %x\n",
924 map
->name
, adr
, datum
, verify
);
930 chip
->state
= FL_READY
;
932 spin_unlock_bh(chip
->mutex
);
939 static int amd_flash_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
940 size_t *retlen
, const u_char
*buf
)
942 struct map_info
*map
= mtd
->priv
;
943 struct amd_flash_private
*private = map
->fldrv_priv
;
947 unsigned long chipstart
;
954 chipnum
= to
>> private->chipshift
;
955 ofs
= to
- (chipnum
<< private->chipshift
);
956 chipstart
= private->chips
[chipnum
].start
;
958 /* If it's not bus-aligned, do the first byte write. */
959 if (ofs
& (map
->buswidth
- 1)) {
960 unsigned long bus_ofs
= ofs
& ~(map
->buswidth
- 1);
961 int i
= ofs
- bus_ofs
;
966 map_copy_from(map
, tmp_buf
,
967 bus_ofs
+ private->chips
[chipnum
].start
,
969 while (len
&& i
< map
->buswidth
)
970 tmp_buf
[i
++] = buf
[n
++], len
--;
972 if (map
->buswidth
== 2) {
973 datum
= *(__u16
*)tmp_buf
;
974 } else if (map
->buswidth
== 4) {
975 datum
= *(__u32
*)tmp_buf
;
977 return -EINVAL
; /* should never happen, but be safe */
980 ret
= write_one_word(map
, &private->chips
[chipnum
], bus_ofs
,
990 if (ofs
>> private->chipshift
) {
993 if (chipnum
== private->numchips
) {
999 /* We are now aligned, write as much as possible. */
1000 while(len
>= map
->buswidth
) {
1003 if (map
->buswidth
== 1) {
1004 datum
= *(__u8
*)buf
;
1005 } else if (map
->buswidth
== 2) {
1006 datum
= *(__u16
*)buf
;
1007 } else if (map
->buswidth
== 4) {
1008 datum
= *(__u32
*)buf
;
1013 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1019 ofs
+= map
->buswidth
;
1020 buf
+= map
->buswidth
;
1021 (*retlen
) += map
->buswidth
;
1022 len
-= map
->buswidth
;
1024 if (ofs
>> private->chipshift
) {
1027 if (chipnum
== private->numchips
) {
1030 chipstart
= private->chips
[chipnum
].start
;
1034 if (len
& (map
->buswidth
- 1)) {
1039 map_copy_from(map
, tmp_buf
,
1040 ofs
+ private->chips
[chipnum
].start
,
1043 tmp_buf
[i
++] = buf
[n
++];
1046 if (map
->buswidth
== 2) {
1047 datum
= *(__u16
*)tmp_buf
;
1048 } else if (map
->buswidth
== 4) {
1049 datum
= *(__u32
*)tmp_buf
;
1051 return -EINVAL
; /* should never happen, but be safe */
1054 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1068 static inline int erase_one_block(struct map_info
*map
, struct flchip
*chip
,
1069 unsigned long adr
, u_long size
)
1071 unsigned long timeo
= jiffies
+ HZ
;
1072 struct amd_flash_private
*private = map
->fldrv_priv
;
1073 DECLARE_WAITQUEUE(wait
, current
);
1076 spin_lock_bh(chip
->mutex
);
1078 if (chip
->state
!= FL_READY
){
1079 set_current_state(TASK_UNINTERRUPTIBLE
);
1080 add_wait_queue(&chip
->wq
, &wait
);
1082 spin_unlock_bh(chip
->mutex
);
1085 remove_wait_queue(&chip
->wq
, &wait
);
1087 if (signal_pending(current
)) {
1091 timeo
= jiffies
+ HZ
;
1096 chip
->state
= FL_ERASING
;
1100 send_cmd(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA
);
1101 send_cmd_to_addr(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA_2
, adr
);
1103 timeo
= jiffies
+ (HZ
* 20);
1105 spin_unlock_bh(chip
->mutex
);
1107 spin_lock_bh(chip
->mutex
);
1109 while (flash_is_busy(map
, adr
, private->interleave
)) {
1111 if (chip
->state
!= FL_ERASING
) {
1112 /* Someone's suspended the erase. Sleep */
1113 set_current_state(TASK_UNINTERRUPTIBLE
);
1114 add_wait_queue(&chip
->wq
, &wait
);
1116 spin_unlock_bh(chip
->mutex
);
1117 printk(KERN_INFO
"%s: erase suspended. Sleeping\n",
1120 remove_wait_queue(&chip
->wq
, &wait
);
1122 if (signal_pending(current
)) {
1126 timeo
= jiffies
+ (HZ
*2); /* FIXME */
1127 spin_lock_bh(chip
->mutex
);
1131 /* OK Still waiting */
1132 if (time_after(jiffies
, timeo
)) {
1133 chip
->state
= FL_READY
;
1134 spin_unlock_bh(chip
->mutex
);
1135 printk(KERN_WARNING
"%s: waiting for erase to complete "
1136 "timed out.\n", map
->name
);
1142 /* Latency issues. Drop the lock, wait a while and retry */
1143 spin_unlock_bh(chip
->mutex
);
1150 spin_lock_bh(chip
->mutex
);
1153 /* Verify every single word */
1159 for (address
= adr
; address
< (adr
+ size
); address
++) {
1160 if ((verify
= map_read8(map
, address
)) != 0xFF) {
1166 chip
->state
= FL_READY
;
1167 spin_unlock_bh(chip
->mutex
);
1169 "%s: verify error at 0x%x, size %ld.\n",
1170 map
->name
, address
, size
);
1178 chip
->state
= FL_READY
;
1180 spin_unlock_bh(chip
->mutex
);
1187 static int amd_flash_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1189 struct map_info
*map
= mtd
->priv
;
1190 struct amd_flash_private
*private = map
->fldrv_priv
;
1191 unsigned long adr
, len
;
1196 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
1198 if (instr
->addr
> mtd
->size
) {
1202 if ((instr
->len
+ instr
->addr
) > mtd
->size
) {
1206 /* Check that both start and end of the requested erase are
1207 * aligned with the erasesize at the appropriate addresses.
1212 /* Skip all erase regions which are ended before the start of
1213 the requested erase. Actually, to save on the calculations,
1214 we skip to the first erase region which starts after the
1215 start of the requested erase, and then go back one.
1218 while ((i
< mtd
->numeraseregions
) &&
1219 (instr
->addr
>= regions
[i
].offset
)) {
1224 /* OK, now i is pointing at the erase region in which this
1225 * erase request starts. Check the start of the requested
1226 * erase range is aligned with the erase size which is in
1230 if (instr
->addr
& (regions
[i
].erasesize
-1)) {
1234 /* Remember the erase region we start on. */
1238 /* Next, check that the end of the requested erase is aligned
1239 * with the erase region at that address.
1242 while ((i
< mtd
->numeraseregions
) &&
1243 ((instr
->addr
+ instr
->len
) >= regions
[i
].offset
)) {
1247 /* As before, drop back one to point at the region in which
1248 * the address actually falls.
1253 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1)) {
1257 chipnum
= instr
->addr
>> private->chipshift
;
1258 adr
= instr
->addr
- (chipnum
<< private->chipshift
);
1264 ret
= erase_one_block(map
, &private->chips
[chipnum
], adr
,
1265 regions
[i
].erasesize
);
1271 adr
+= regions
[i
].erasesize
;
1272 len
-= regions
[i
].erasesize
;
1274 if ((adr
% (1 << private->chipshift
)) ==
1275 ((regions
[i
].offset
+ (regions
[i
].erasesize
*
1276 regions
[i
].numblocks
))
1277 % (1 << private->chipshift
))) {
1281 if (adr
>> private->chipshift
) {
1284 if (chipnum
>= private->numchips
) {
1290 instr
->state
= MTD_ERASE_DONE
;
1291 mtd_erase_callback(instr
);
1298 static void amd_flash_sync(struct mtd_info
*mtd
)
1300 struct map_info
*map
= mtd
->priv
;
1301 struct amd_flash_private
*private = map
->fldrv_priv
;
1303 struct flchip
*chip
;
1305 DECLARE_WAITQUEUE(wait
, current
);
1307 for (i
= 0; !ret
&& (i
< private->numchips
); i
++) {
1308 chip
= &private->chips
[i
];
1311 spin_lock_bh(chip
->mutex
);
1313 switch(chip
->state
) {
1317 case FL_JEDEC_QUERY
:
1318 chip
->oldstate
= chip
->state
;
1319 chip
->state
= FL_SYNCING
;
1320 /* No need to wake_up() on this state change -
1321 * as the whole point is that nobody can do anything
1322 * with the chip now anyway.
1325 spin_unlock_bh(chip
->mutex
);
1329 /* Not an idle state */
1330 add_wait_queue(&chip
->wq
, &wait
);
1332 spin_unlock_bh(chip
->mutex
);
1336 remove_wait_queue(&chip
->wq
, &wait
);
1342 /* Unlock the chips again */
1343 for (i
--; i
>= 0; i
--) {
1344 chip
= &private->chips
[i
];
1346 spin_lock_bh(chip
->mutex
);
1348 if (chip
->state
== FL_SYNCING
) {
1349 chip
->state
= chip
->oldstate
;
1352 spin_unlock_bh(chip
->mutex
);
1358 static int amd_flash_suspend(struct mtd_info
*mtd
)
1360 printk("amd_flash_suspend(): not implemented!\n");
1366 static void amd_flash_resume(struct mtd_info
*mtd
)
1368 printk("amd_flash_resume(): not implemented!\n");
1373 static void amd_flash_destroy(struct mtd_info
*mtd
)
1375 struct map_info
*map
= mtd
->priv
;
1376 struct amd_flash_private
*private = map
->fldrv_priv
;
1380 int __init
amd_flash_init(void)
1382 register_mtd_chip_driver(&amd_flash_chipdrv
);
1386 void __exit
amd_flash_exit(void)
1388 unregister_mtd_chip_driver(&amd_flash_chipdrv
);
1391 module_init(amd_flash_init
);
1392 module_exit(amd_flash_exit
);
1394 MODULE_LICENSE("GPL");
1395 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1396 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");