2 * MTD map driver for AMD compatible flash chips (non-CFI)
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
6 * $Id: amd_flash.c,v 1.27 2005/02/04 07:43:09 jonashg Exp $
8 * Copyright (c) 2001 Axis Communications AB
10 * This file is under GPL.
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19 #include <linux/slab.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/init.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/mtd.h>
25 #include <linux/mtd/flashchip.h>
27 /* There's no limit. It exists only to avoid realloc. */
28 #define MAX_AMD_CHIPS 8
30 #define DEVICE_TYPE_X8 (8 / 8)
31 #define DEVICE_TYPE_X16 (16 / 8)
32 #define DEVICE_TYPE_X32 (32 / 8)
35 #define ADDR_MANUFACTURER 0x0000
36 #define ADDR_DEVICE_ID 0x0001
37 #define ADDR_SECTOR_LOCK 0x0002
38 #define ADDR_HANDSHAKE 0x0003
39 #define ADDR_UNLOCK_1 0x0555
40 #define ADDR_UNLOCK_2 0x02AA
43 #define CMD_UNLOCK_DATA_1 0x00AA
44 #define CMD_UNLOCK_DATA_2 0x0055
45 #define CMD_MANUFACTURER_UNLOCK_DATA 0x0090
46 #define CMD_UNLOCK_BYPASS_MODE 0x0020
47 #define CMD_PROGRAM_UNLOCK_DATA 0x00A0
48 #define CMD_RESET_DATA 0x00F0
49 #define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080
50 #define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030
52 #define CMD_UNLOCK_SECTOR 0x0060
55 #define MANUFACTURER_AMD 0x0001
56 #define MANUFACTURER_ATMEL 0x001F
57 #define MANUFACTURER_FUJITSU 0x0004
58 #define MANUFACTURER_ST 0x0020
59 #define MANUFACTURER_SST 0x00BF
60 #define MANUFACTURER_TOSHIBA 0x0098
63 #define AM29F800BB 0x2258
64 #define AM29F800BT 0x22D6
65 #define AM29LV800BB 0x225B
66 #define AM29LV800BT 0x22DA
67 #define AM29LV160DT 0x22C4
68 #define AM29LV160DB 0x2249
69 #define AM29BDS323D 0x22D1
72 #define AT49xV16x 0x00C0
73 #define AT49xV16xT 0x00C2
76 #define MBM29LV160TE 0x22C4
77 #define MBM29LV160BE 0x2249
78 #define MBM29LV800BB 0x225B
81 #define M29W800T 0x00D7
82 #define M29W160DT 0x22C4
83 #define M29W160DB 0x2249
86 #define SST39LF800 0x2781
87 #define SST39LF160 0x2782
90 #define TC58FVT160 0x00C2
91 #define TC58FVB160 0x0043
95 struct amd_flash_private
{
99 unsigned long chipshift
;
100 // const char *im_name;
101 struct flchip chips
[0];
104 struct amd_flash_info
{
109 const int numeraseregions
;
110 const struct mtd_erase_region_info regions
[4];
115 static int amd_flash_read(struct mtd_info
*, loff_t
, size_t, size_t *,
117 static int amd_flash_write(struct mtd_info
*, loff_t
, size_t, size_t *,
119 static int amd_flash_erase(struct mtd_info
*, struct erase_info
*);
120 static void amd_flash_sync(struct mtd_info
*);
121 static int amd_flash_suspend(struct mtd_info
*);
122 static void amd_flash_resume(struct mtd_info
*);
123 static void amd_flash_destroy(struct mtd_info
*);
124 static struct mtd_info
*amd_flash_probe(struct map_info
*map
);
127 static struct mtd_chip_driver amd_flash_chipdrv
= {
128 .probe
= amd_flash_probe
,
129 .destroy
= amd_flash_destroy
,
131 .module
= THIS_MODULE
136 static const char im_name
[] = "amd_flash";
140 static inline __u32
wide_read(struct map_info
*map
, __u32 addr
)
142 if (map
->buswidth
== 1) {
143 return map_read8(map
, addr
);
144 } else if (map
->buswidth
== 2) {
145 return map_read16(map
, addr
);
146 } else if (map
->buswidth
== 4) {
147 return map_read32(map
, addr
);
153 static inline void wide_write(struct map_info
*map
, __u32 val
, __u32 addr
)
155 if (map
->buswidth
== 1) {
156 map_write8(map
, val
, addr
);
157 } else if (map
->buswidth
== 2) {
158 map_write16(map
, val
, addr
);
159 } else if (map
->buswidth
== 4) {
160 map_write32(map
, val
, addr
);
164 static inline __u32
make_cmd(struct map_info
*map
, __u32 cmd
)
166 const struct amd_flash_private
*private = map
->fldrv_priv
;
167 if ((private->interleave
== 2) &&
168 (private->device_type
== DEVICE_TYPE_X16
)) {
175 static inline void send_unlock(struct map_info
*map
, unsigned long base
)
177 wide_write(map
, (CMD_UNLOCK_DATA_1
<< 16) | CMD_UNLOCK_DATA_1
,
178 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
179 wide_write(map
, (CMD_UNLOCK_DATA_2
<< 16) | CMD_UNLOCK_DATA_2
,
180 base
+ (map
->buswidth
* ADDR_UNLOCK_2
));
183 static inline void send_cmd(struct map_info
*map
, unsigned long base
, __u32 cmd
)
185 send_unlock(map
, base
);
186 wide_write(map
, make_cmd(map
, cmd
),
187 base
+ (map
->buswidth
* ADDR_UNLOCK_1
));
190 static inline void send_cmd_to_addr(struct map_info
*map
, unsigned long base
,
191 __u32 cmd
, unsigned long addr
)
193 send_unlock(map
, base
);
194 wide_write(map
, make_cmd(map
, cmd
), addr
);
197 static inline int flash_is_busy(struct map_info
*map
, unsigned long addr
,
201 if ((interleave
== 2) && (map
->buswidth
== 4)) {
204 read1
= wide_read(map
, addr
);
205 read2
= wide_read(map
, addr
);
207 return (((read1
>> 16) & D6_MASK
) !=
208 ((read2
>> 16) & D6_MASK
)) ||
209 (((read1
& 0xffff) & D6_MASK
) !=
210 ((read2
& 0xffff) & D6_MASK
));
213 return ((wide_read(map
, addr
) & D6_MASK
) !=
214 (wide_read(map
, addr
) & D6_MASK
));
217 static inline void unlock_sector(struct map_info
*map
, unsigned long sect_addr
,
220 /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */
222 (sect_addr
| (0x40 * map
->buswidth
)) :
223 (sect_addr
& ~(0x40 * map
->buswidth
)) ;
225 __u32 cmd
= make_cmd(map
, CMD_UNLOCK_SECTOR
);
227 wide_write(map
, make_cmd(map
, CMD_RESET_DATA
), 0);
228 wide_write(map
, cmd
, SLA
); /* 1st cycle: write cmd to any address */
229 wide_write(map
, cmd
, SLA
); /* 2nd cycle: write cmd to any address */
230 wide_write(map
, cmd
, SLA
); /* 3rd cycle: write cmd to SLA */
233 static inline int is_sector_locked(struct map_info
*map
,
234 unsigned long sect_addr
)
238 wide_write(map
, CMD_RESET_DATA
, 0);
239 send_cmd(map
, sect_addr
, CMD_MANUFACTURER_UNLOCK_DATA
);
241 /* status is 0x0000 for unlocked and 0x0001 for locked */
242 status
= wide_read(map
, sect_addr
+ (map
->buswidth
* ADDR_SECTOR_LOCK
));
243 wide_write(map
, CMD_RESET_DATA
, 0);
247 static int amd_flash_do_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
,
250 struct map_info
*map
;
251 struct mtd_erase_region_info
*merip
;
252 int eraseoffset
, erasesize
, eraseblocks
;
259 /* Pass the whole chip through sector by sector and check for each
260 sector if the sector and the given interval overlap */
261 for(i
= 0; i
< mtd
->numeraseregions
; i
++) {
262 merip
= &mtd
->eraseregions
[i
];
264 eraseoffset
= merip
->offset
;
265 erasesize
= merip
->erasesize
;
266 eraseblocks
= merip
->numblocks
;
268 if (ofs
> eraseoffset
+ erasesize
)
271 while (eraseblocks
> 0) {
272 if (ofs
< eraseoffset
+ erasesize
&& ofs
+ len
> eraseoffset
) {
273 unlock_sector(map
, eraseoffset
, is_unlock
);
275 lock_status
= is_sector_locked(map
, eraseoffset
);
277 if (is_unlock
&& lock_status
) {
278 printk("Cannot unlock sector at address %x length %xx\n",
279 eraseoffset
, merip
->erasesize
);
281 } else if (!is_unlock
&& !lock_status
) {
282 printk("Cannot lock sector at address %x length %x\n",
283 eraseoffset
, merip
->erasesize
);
287 eraseoffset
+= erasesize
;
294 static int amd_flash_unlock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
296 return amd_flash_do_unlock(mtd
, ofs
, len
, 1);
299 static int amd_flash_lock(struct mtd_info
*mtd
, loff_t ofs
, size_t len
)
301 return amd_flash_do_unlock(mtd
, ofs
, len
, 0);
306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307 * matching table entry (-1 if not found or alias for already found chip).
309 static int probe_new_chip(struct mtd_info
*mtd
, __u32 base
,
310 struct flchip
*chips
,
311 struct amd_flash_private
*private,
312 const struct amd_flash_info
*table
, int table_size
)
316 struct map_info
*map
= mtd
->priv
;
317 struct amd_flash_private temp
;
320 temp
.device_type
= DEVICE_TYPE_X16
; // Assume X16 (FIXME)
322 map
->fldrv_priv
= &temp
;
324 /* Enter autoselect mode. */
325 send_cmd(map
, base
, CMD_RESET_DATA
);
326 send_cmd(map
, base
, CMD_MANUFACTURER_UNLOCK_DATA
);
328 mfr_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_MANUFACTURER
));
329 dev_id
= wide_read(map
, base
+ (map
->buswidth
* ADDR_DEVICE_ID
));
331 if ((map
->buswidth
== 4) && ((mfr_id
>> 16) == (mfr_id
& 0xffff)) &&
332 ((dev_id
>> 16) == (dev_id
& 0xffff))) {
339 for (i
= 0; i
< table_size
; i
++) {
340 if ((mfr_id
== table
[i
].mfr_id
) &&
341 (dev_id
== table
[i
].dev_id
)) {
345 /* Is this an alias for an already found chip?
346 * In that case that chip should be in
347 * autoselect mode now.
349 for (j
= 0; j
< private->numchips
; j
++) {
354 wide_read(map
, chips
[j
].start
+
359 wide_read(map
, chips
[j
].start
+
362 if (temp
.interleave
== 2) {
363 mfr_id_other
&= 0xffff;
364 dev_id_other
&= 0xffff;
366 if ((mfr_id_other
== mfr_id
) &&
367 (dev_id_other
== dev_id
)) {
369 /* Exit autoselect mode. */
377 if (private->numchips
== MAX_AMD_CHIPS
) {
379 "%s: Too many flash chips "
380 "detected. Increase "
381 "MAX_AMD_CHIPS from %d.\n",
382 map
->name
, MAX_AMD_CHIPS
);
387 chips
[private->numchips
].start
= base
;
388 chips
[private->numchips
].state
= FL_READY
;
389 chips
[private->numchips
].mutex
=
390 &chips
[private->numchips
]._spinlock
;
394 printk("%s: Found %d x %ldMiB %s at 0x%x\n", map
->name
,
395 temp
.interleave
, (table
[i
].size
)/(1024*1024),
396 table
[i
].name
, base
);
398 mtd
->size
+= table
[i
].size
* temp
.interleave
;
399 mtd
->numeraseregions
+= table
[i
].numeraseregions
;
405 /* Exit autoselect mode. */
406 send_cmd(map
, base
, CMD_RESET_DATA
);
408 if (i
== table_size
) {
409 printk(KERN_DEBUG
"%s: unknown flash device at 0x%x, "
410 "mfr id 0x%x, dev id 0x%x\n", map
->name
,
411 base
, mfr_id
, dev_id
);
412 map
->fldrv_priv
= NULL
;
417 private->device_type
= temp
.device_type
;
418 private->interleave
= temp
.interleave
;
425 static struct mtd_info
*amd_flash_probe(struct map_info
*map
)
427 static const struct amd_flash_info table
[] = {
429 .mfr_id
= MANUFACTURER_AMD
,
430 .dev_id
= AM29LV160DT
,
431 .name
= "AMD AM29LV160DT",
433 .numeraseregions
= 4,
435 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
436 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
437 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
438 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
441 .mfr_id
= MANUFACTURER_AMD
,
442 .dev_id
= AM29LV160DB
,
443 .name
= "AMD AM29LV160DB",
445 .numeraseregions
= 4,
447 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
448 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
449 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
450 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
453 .mfr_id
= MANUFACTURER_TOSHIBA
,
454 .dev_id
= TC58FVT160
,
455 .name
= "Toshiba TC58FVT160",
457 .numeraseregions
= 4,
459 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
460 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
461 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
462 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
465 .mfr_id
= MANUFACTURER_FUJITSU
,
466 .dev_id
= MBM29LV160TE
,
467 .name
= "Fujitsu MBM29LV160TE",
469 .numeraseregions
= 4,
471 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
472 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
473 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
474 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
477 .mfr_id
= MANUFACTURER_TOSHIBA
,
478 .dev_id
= TC58FVB160
,
479 .name
= "Toshiba TC58FVB160",
481 .numeraseregions
= 4,
483 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
484 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
485 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
486 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
489 .mfr_id
= MANUFACTURER_FUJITSU
,
490 .dev_id
= MBM29LV160BE
,
491 .name
= "Fujitsu MBM29LV160BE",
493 .numeraseregions
= 4,
495 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
496 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
497 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
498 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
501 .mfr_id
= MANUFACTURER_AMD
,
502 .dev_id
= AM29LV800BB
,
503 .name
= "AMD AM29LV800BB",
505 .numeraseregions
= 4,
507 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
508 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
509 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
510 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
513 .mfr_id
= MANUFACTURER_AMD
,
514 .dev_id
= AM29F800BB
,
515 .name
= "AMD AM29F800BB",
517 .numeraseregions
= 4,
519 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
520 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
521 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
522 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
525 .mfr_id
= MANUFACTURER_AMD
,
526 .dev_id
= AM29LV800BT
,
527 .name
= "AMD AM29LV800BT",
529 .numeraseregions
= 4,
531 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
532 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
533 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
534 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
537 .mfr_id
= MANUFACTURER_AMD
,
538 .dev_id
= AM29F800BT
,
539 .name
= "AMD AM29F800BT",
541 .numeraseregions
= 4,
543 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
544 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
545 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
546 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
549 .mfr_id
= MANUFACTURER_AMD
,
550 .dev_id
= AM29LV800BB
,
551 .name
= "AMD AM29LV800BB",
553 .numeraseregions
= 4,
555 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
556 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
557 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
558 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
561 .mfr_id
= MANUFACTURER_FUJITSU
,
562 .dev_id
= MBM29LV800BB
,
563 .name
= "Fujitsu MBM29LV800BB",
565 .numeraseregions
= 4,
567 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
568 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
569 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
570 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 15 }
573 .mfr_id
= MANUFACTURER_ST
,
575 .name
= "ST M29W800T",
577 .numeraseregions
= 4,
579 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 15 },
580 { .offset
= 0x0F0000, .erasesize
= 0x08000, .numblocks
= 1 },
581 { .offset
= 0x0F8000, .erasesize
= 0x02000, .numblocks
= 2 },
582 { .offset
= 0x0FC000, .erasesize
= 0x04000, .numblocks
= 1 }
585 .mfr_id
= MANUFACTURER_ST
,
587 .name
= "ST M29W160DT",
589 .numeraseregions
= 4,
591 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
592 { .offset
= 0x1F0000, .erasesize
= 0x08000, .numblocks
= 1 },
593 { .offset
= 0x1F8000, .erasesize
= 0x02000, .numblocks
= 2 },
594 { .offset
= 0x1FC000, .erasesize
= 0x04000, .numblocks
= 1 }
597 .mfr_id
= MANUFACTURER_ST
,
599 .name
= "ST M29W160DB",
601 .numeraseregions
= 4,
603 { .offset
= 0x000000, .erasesize
= 0x04000, .numblocks
= 1 },
604 { .offset
= 0x004000, .erasesize
= 0x02000, .numblocks
= 2 },
605 { .offset
= 0x008000, .erasesize
= 0x08000, .numblocks
= 1 },
606 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
609 .mfr_id
= MANUFACTURER_AMD
,
610 .dev_id
= AM29BDS323D
,
611 .name
= "AMD AM29BDS323D",
613 .numeraseregions
= 3,
615 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 48 },
616 { .offset
= 0x300000, .erasesize
= 0x10000, .numblocks
= 15 },
617 { .offset
= 0x3f0000, .erasesize
= 0x02000, .numblocks
= 8 },
620 .mfr_id
= MANUFACTURER_ATMEL
,
622 .name
= "Atmel AT49xV16x",
624 .numeraseregions
= 2,
626 { .offset
= 0x000000, .erasesize
= 0x02000, .numblocks
= 8 },
627 { .offset
= 0x010000, .erasesize
= 0x10000, .numblocks
= 31 }
630 .mfr_id
= MANUFACTURER_ATMEL
,
631 .dev_id
= AT49xV16xT
,
632 .name
= "Atmel AT49xV16xT",
634 .numeraseregions
= 2,
636 { .offset
= 0x000000, .erasesize
= 0x10000, .numblocks
= 31 },
637 { .offset
= 0x1F0000, .erasesize
= 0x02000, .numblocks
= 8 }
642 struct mtd_info
*mtd
;
643 struct flchip chips
[MAX_AMD_CHIPS
];
644 int table_pos
[MAX_AMD_CHIPS
];
645 struct amd_flash_private temp
;
646 struct amd_flash_private
*private;
653 mtd
= (struct mtd_info
*)kmalloc(sizeof(*mtd
), GFP_KERNEL
);
656 "%s: kmalloc failed for info structure\n", map
->name
);
659 memset(mtd
, 0, sizeof(*mtd
));
662 memset(&temp
, 0, sizeof(temp
));
664 printk("%s: Probing for AMD compatible flash...\n", map
->name
);
666 if ((table_pos
[0] = probe_new_chip(mtd
, 0, NULL
, &temp
, table
,
667 sizeof(table
)/sizeof(table
[0])))
670 "%s: Found no AMD compatible device at location zero\n",
678 chips
[0].state
= FL_READY
;
679 chips
[0].mutex
= &chips
[0]._spinlock
;
681 for (size
= mtd
->size
; size
> 1; size
>>= 1) {
684 switch (temp
.interleave
) {
693 /* Find out if there are any more chips in the map. */
694 for (base
= (1 << temp
.chipshift
);
696 base
+= (1 << temp
.chipshift
)) {
697 int numchips
= temp
.numchips
;
698 table_pos
[numchips
] = probe_new_chip(mtd
, base
, chips
,
699 &temp
, table
, sizeof(table
)/sizeof(table
[0]));
702 mtd
->eraseregions
= kmalloc(sizeof(struct mtd_erase_region_info
) *
703 mtd
->numeraseregions
, GFP_KERNEL
);
704 if (!mtd
->eraseregions
) {
705 printk(KERN_WARNING
"%s: Failed to allocate "
706 "memory for MTD erase region info\n", map
->name
);
708 map
->fldrv_priv
= NULL
;
714 for (i
= 0; i
< temp
.numchips
; i
++) {
719 for (j
= 0; j
< table
[table_pos
[i
]].numeraseregions
; j
++) {
720 mtd
->eraseregions
[reg_idx
].offset
= offset
+
721 (table
[table_pos
[i
]].regions
[j
].offset
*
723 mtd
->eraseregions
[reg_idx
].erasesize
=
724 table
[table_pos
[i
]].regions
[j
].erasesize
*
726 mtd
->eraseregions
[reg_idx
].numblocks
=
727 table
[table_pos
[i
]].regions
[j
].numblocks
;
729 mtd
->eraseregions
[reg_idx
].erasesize
) {
731 mtd
->eraseregions
[reg_idx
].erasesize
;
733 dev_size
+= mtd
->eraseregions
[reg_idx
].erasesize
*
734 mtd
->eraseregions
[reg_idx
].numblocks
;
739 mtd
->type
= MTD_NORFLASH
;
740 mtd
->flags
= MTD_CAP_NORFLASH
;
741 mtd
->name
= map
->name
;
742 mtd
->erase
= amd_flash_erase
;
743 mtd
->read
= amd_flash_read
;
744 mtd
->write
= amd_flash_write
;
745 mtd
->sync
= amd_flash_sync
;
746 mtd
->suspend
= amd_flash_suspend
;
747 mtd
->resume
= amd_flash_resume
;
748 mtd
->lock
= amd_flash_lock
;
749 mtd
->unlock
= amd_flash_unlock
;
751 private = kmalloc(sizeof(*private) + (sizeof(struct flchip
) *
752 temp
.numchips
), GFP_KERNEL
);
755 "%s: kmalloc failed for private structure\n", map
->name
);
757 map
->fldrv_priv
= NULL
;
760 memcpy(private, &temp
, sizeof(temp
));
761 memcpy(private->chips
, chips
,
762 sizeof(struct flchip
) * private->numchips
);
763 for (i
= 0; i
< private->numchips
; i
++) {
764 init_waitqueue_head(&private->chips
[i
].wq
);
765 spin_lock_init(&private->chips
[i
]._spinlock
);
768 map
->fldrv_priv
= private;
770 map
->fldrv
= &amd_flash_chipdrv
;
772 __module_get(THIS_MODULE
);
778 static inline int read_one_chip(struct map_info
*map
, struct flchip
*chip
,
779 loff_t adr
, size_t len
, u_char
*buf
)
781 DECLARE_WAITQUEUE(wait
, current
);
782 unsigned long timeo
= jiffies
+ HZ
;
785 spin_lock_bh(chip
->mutex
);
787 if (chip
->state
!= FL_READY
){
788 printk(KERN_INFO
"%s: waiting for chip to read, state = %d\n",
789 map
->name
, chip
->state
);
790 set_current_state(TASK_UNINTERRUPTIBLE
);
791 add_wait_queue(&chip
->wq
, &wait
);
793 spin_unlock_bh(chip
->mutex
);
796 remove_wait_queue(&chip
->wq
, &wait
);
798 if(signal_pending(current
)) {
802 timeo
= jiffies
+ HZ
;
809 chip
->state
= FL_READY
;
811 map_copy_from(map
, buf
, adr
, len
);
814 spin_unlock_bh(chip
->mutex
);
821 static int amd_flash_read(struct mtd_info
*mtd
, loff_t from
, size_t len
,
822 size_t *retlen
, u_char
*buf
)
824 struct map_info
*map
= mtd
->priv
;
825 struct amd_flash_private
*private = map
->fldrv_priv
;
830 if ((from
+ len
) > mtd
->size
) {
831 printk(KERN_WARNING
"%s: read request past end of device "
832 "(0x%lx)\n", map
->name
, (unsigned long)from
+ len
);
837 /* Offset within the first chip that the first read should start. */
838 chipnum
= (from
>> private->chipshift
);
839 ofs
= from
- (chipnum
<< private->chipshift
);
844 unsigned long this_len
;
846 if (chipnum
>= private->numchips
) {
850 if ((len
+ ofs
- 1) >> private->chipshift
) {
851 this_len
= (1 << private->chipshift
) - ofs
;
856 ret
= read_one_chip(map
, &private->chips
[chipnum
], ofs
,
875 static int write_one_word(struct map_info
*map
, struct flchip
*chip
,
876 unsigned long adr
, __u32 datum
)
878 unsigned long timeo
= jiffies
+ HZ
;
879 struct amd_flash_private
*private = map
->fldrv_priv
;
880 DECLARE_WAITQUEUE(wait
, current
);
885 spin_lock_bh(chip
->mutex
);
887 if (chip
->state
!= FL_READY
){
888 printk("%s: waiting for chip to write, state = %d\n",
889 map
->name
, chip
->state
);
890 set_current_state(TASK_UNINTERRUPTIBLE
);
891 add_wait_queue(&chip
->wq
, &wait
);
893 spin_unlock_bh(chip
->mutex
);
896 remove_wait_queue(&chip
->wq
, &wait
);
897 printk(KERN_INFO
"%s: woke up to write\n", map
->name
);
898 if(signal_pending(current
))
901 timeo
= jiffies
+ HZ
;
906 chip
->state
= FL_WRITING
;
910 send_cmd(map
, chip
->start
, CMD_PROGRAM_UNLOCK_DATA
);
911 wide_write(map
, datum
, adr
);
914 while (times_left
-- && flash_is_busy(map
, adr
, private->interleave
)) {
915 if (need_resched()) {
916 spin_unlock_bh(chip
->mutex
);
918 spin_lock_bh(chip
->mutex
);
923 printk(KERN_WARNING
"%s: write to 0x%lx timed out!\n",
928 if ((verify
= wide_read(map
, adr
)) != datum
) {
929 printk(KERN_WARNING
"%s: write to 0x%lx failed. "
930 "datum = %x, verify = %x\n",
931 map
->name
, adr
, datum
, verify
);
937 chip
->state
= FL_READY
;
939 spin_unlock_bh(chip
->mutex
);
946 static int amd_flash_write(struct mtd_info
*mtd
, loff_t to
, size_t len
,
947 size_t *retlen
, const u_char
*buf
)
949 struct map_info
*map
= mtd
->priv
;
950 struct amd_flash_private
*private = map
->fldrv_priv
;
954 unsigned long chipstart
;
961 chipnum
= to
>> private->chipshift
;
962 ofs
= to
- (chipnum
<< private->chipshift
);
963 chipstart
= private->chips
[chipnum
].start
;
965 /* If it's not bus-aligned, do the first byte write. */
966 if (ofs
& (map
->buswidth
- 1)) {
967 unsigned long bus_ofs
= ofs
& ~(map
->buswidth
- 1);
968 int i
= ofs
- bus_ofs
;
973 map_copy_from(map
, tmp_buf
,
974 bus_ofs
+ private->chips
[chipnum
].start
,
976 while (len
&& i
< map
->buswidth
)
977 tmp_buf
[i
++] = buf
[n
++], len
--;
979 if (map
->buswidth
== 2) {
980 datum
= *(__u16
*)tmp_buf
;
981 } else if (map
->buswidth
== 4) {
982 datum
= *(__u32
*)tmp_buf
;
984 return -EINVAL
; /* should never happen, but be safe */
987 ret
= write_one_word(map
, &private->chips
[chipnum
], bus_ofs
,
997 if (ofs
>> private->chipshift
) {
1000 if (chipnum
== private->numchips
) {
1006 /* We are now aligned, write as much as possible. */
1007 while(len
>= map
->buswidth
) {
1010 if (map
->buswidth
== 1) {
1011 datum
= *(__u8
*)buf
;
1012 } else if (map
->buswidth
== 2) {
1013 datum
= *(__u16
*)buf
;
1014 } else if (map
->buswidth
== 4) {
1015 datum
= *(__u32
*)buf
;
1020 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1026 ofs
+= map
->buswidth
;
1027 buf
+= map
->buswidth
;
1028 (*retlen
) += map
->buswidth
;
1029 len
-= map
->buswidth
;
1031 if (ofs
>> private->chipshift
) {
1034 if (chipnum
== private->numchips
) {
1037 chipstart
= private->chips
[chipnum
].start
;
1041 if (len
& (map
->buswidth
- 1)) {
1046 map_copy_from(map
, tmp_buf
,
1047 ofs
+ private->chips
[chipnum
].start
,
1050 tmp_buf
[i
++] = buf
[n
++];
1053 if (map
->buswidth
== 2) {
1054 datum
= *(__u16
*)tmp_buf
;
1055 } else if (map
->buswidth
== 4) {
1056 datum
= *(__u32
*)tmp_buf
;
1058 return -EINVAL
; /* should never happen, but be safe */
1061 ret
= write_one_word(map
, &private->chips
[chipnum
], ofs
, datum
);
1075 static inline int erase_one_block(struct map_info
*map
, struct flchip
*chip
,
1076 unsigned long adr
, u_long size
)
1078 unsigned long timeo
= jiffies
+ HZ
;
1079 struct amd_flash_private
*private = map
->fldrv_priv
;
1080 DECLARE_WAITQUEUE(wait
, current
);
1083 spin_lock_bh(chip
->mutex
);
1085 if (chip
->state
!= FL_READY
){
1086 set_current_state(TASK_UNINTERRUPTIBLE
);
1087 add_wait_queue(&chip
->wq
, &wait
);
1089 spin_unlock_bh(chip
->mutex
);
1092 remove_wait_queue(&chip
->wq
, &wait
);
1094 if (signal_pending(current
)) {
1098 timeo
= jiffies
+ HZ
;
1103 chip
->state
= FL_ERASING
;
1107 send_cmd(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA
);
1108 send_cmd_to_addr(map
, chip
->start
, CMD_SECTOR_ERASE_UNLOCK_DATA_2
, adr
);
1110 timeo
= jiffies
+ (HZ
* 20);
1112 spin_unlock_bh(chip
->mutex
);
1114 spin_lock_bh(chip
->mutex
);
1116 while (flash_is_busy(map
, adr
, private->interleave
)) {
1118 if (chip
->state
!= FL_ERASING
) {
1119 /* Someone's suspended the erase. Sleep */
1120 set_current_state(TASK_UNINTERRUPTIBLE
);
1121 add_wait_queue(&chip
->wq
, &wait
);
1123 spin_unlock_bh(chip
->mutex
);
1124 printk(KERN_INFO
"%s: erase suspended. Sleeping\n",
1127 remove_wait_queue(&chip
->wq
, &wait
);
1129 if (signal_pending(current
)) {
1133 timeo
= jiffies
+ (HZ
*2); /* FIXME */
1134 spin_lock_bh(chip
->mutex
);
1138 /* OK Still waiting */
1139 if (time_after(jiffies
, timeo
)) {
1140 chip
->state
= FL_READY
;
1141 spin_unlock_bh(chip
->mutex
);
1142 printk(KERN_WARNING
"%s: waiting for erase to complete "
1143 "timed out.\n", map
->name
);
1149 /* Latency issues. Drop the lock, wait a while and retry */
1150 spin_unlock_bh(chip
->mutex
);
1157 spin_lock_bh(chip
->mutex
);
1160 /* Verify every single word */
1166 for (address
= adr
; address
< (adr
+ size
); address
++) {
1167 if ((verify
= map_read8(map
, address
)) != 0xFF) {
1173 chip
->state
= FL_READY
;
1174 spin_unlock_bh(chip
->mutex
);
1176 "%s: verify error at 0x%x, size %ld.\n",
1177 map
->name
, address
, size
);
1185 chip
->state
= FL_READY
;
1187 spin_unlock_bh(chip
->mutex
);
1194 static int amd_flash_erase(struct mtd_info
*mtd
, struct erase_info
*instr
)
1196 struct map_info
*map
= mtd
->priv
;
1197 struct amd_flash_private
*private = map
->fldrv_priv
;
1198 unsigned long adr
, len
;
1203 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
1205 if (instr
->addr
> mtd
->size
) {
1209 if ((instr
->len
+ instr
->addr
) > mtd
->size
) {
1213 /* Check that both start and end of the requested erase are
1214 * aligned with the erasesize at the appropriate addresses.
1219 /* Skip all erase regions which are ended before the start of
1220 the requested erase. Actually, to save on the calculations,
1221 we skip to the first erase region which starts after the
1222 start of the requested erase, and then go back one.
1225 while ((i
< mtd
->numeraseregions
) &&
1226 (instr
->addr
>= regions
[i
].offset
)) {
1231 /* OK, now i is pointing at the erase region in which this
1232 * erase request starts. Check the start of the requested
1233 * erase range is aligned with the erase size which is in
1237 if (instr
->addr
& (regions
[i
].erasesize
-1)) {
1241 /* Remember the erase region we start on. */
1245 /* Next, check that the end of the requested erase is aligned
1246 * with the erase region at that address.
1249 while ((i
< mtd
->numeraseregions
) &&
1250 ((instr
->addr
+ instr
->len
) >= regions
[i
].offset
)) {
1254 /* As before, drop back one to point at the region in which
1255 * the address actually falls.
1260 if ((instr
->addr
+ instr
->len
) & (regions
[i
].erasesize
-1)) {
1264 chipnum
= instr
->addr
>> private->chipshift
;
1265 adr
= instr
->addr
- (chipnum
<< private->chipshift
);
1271 ret
= erase_one_block(map
, &private->chips
[chipnum
], adr
,
1272 regions
[i
].erasesize
);
1278 adr
+= regions
[i
].erasesize
;
1279 len
-= regions
[i
].erasesize
;
1281 if ((adr
% (1 << private->chipshift
)) ==
1282 ((regions
[i
].offset
+ (regions
[i
].erasesize
*
1283 regions
[i
].numblocks
))
1284 % (1 << private->chipshift
))) {
1288 if (adr
>> private->chipshift
) {
1291 if (chipnum
>= private->numchips
) {
1297 instr
->state
= MTD_ERASE_DONE
;
1298 mtd_erase_callback(instr
);
1305 static void amd_flash_sync(struct mtd_info
*mtd
)
1307 struct map_info
*map
= mtd
->priv
;
1308 struct amd_flash_private
*private = map
->fldrv_priv
;
1310 struct flchip
*chip
;
1312 DECLARE_WAITQUEUE(wait
, current
);
1314 for (i
= 0; !ret
&& (i
< private->numchips
); i
++) {
1315 chip
= &private->chips
[i
];
1318 spin_lock_bh(chip
->mutex
);
1320 switch(chip
->state
) {
1324 case FL_JEDEC_QUERY
:
1325 chip
->oldstate
= chip
->state
;
1326 chip
->state
= FL_SYNCING
;
1327 /* No need to wake_up() on this state change -
1328 * as the whole point is that nobody can do anything
1329 * with the chip now anyway.
1332 spin_unlock_bh(chip
->mutex
);
1336 /* Not an idle state */
1337 add_wait_queue(&chip
->wq
, &wait
);
1339 spin_unlock_bh(chip
->mutex
);
1343 remove_wait_queue(&chip
->wq
, &wait
);
1349 /* Unlock the chips again */
1350 for (i
--; i
>= 0; i
--) {
1351 chip
= &private->chips
[i
];
1353 spin_lock_bh(chip
->mutex
);
1355 if (chip
->state
== FL_SYNCING
) {
1356 chip
->state
= chip
->oldstate
;
1359 spin_unlock_bh(chip
->mutex
);
1365 static int amd_flash_suspend(struct mtd_info
*mtd
)
1367 printk("amd_flash_suspend(): not implemented!\n");
1373 static void amd_flash_resume(struct mtd_info
*mtd
)
1375 printk("amd_flash_resume(): not implemented!\n");
1380 static void amd_flash_destroy(struct mtd_info
*mtd
)
1382 struct map_info
*map
= mtd
->priv
;
1383 struct amd_flash_private
*private = map
->fldrv_priv
;
1387 int __init
amd_flash_init(void)
1389 register_mtd_chip_driver(&amd_flash_chipdrv
);
1393 void __exit
amd_flash_exit(void)
1395 unregister_mtd_chip_driver(&amd_flash_chipdrv
);
1398 module_init(amd_flash_init
);
1399 module_exit(amd_flash_exit
);
1401 MODULE_LICENSE("GPL");
1402 MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>");
1403 MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips");