2 * Common Flash Interface support:
3 * Generic utility functions not dependant on command set
5 * Copyright (C) 2002 Red Hat
6 * Copyright (C) 2003 STMicroelectronics Limited
8 * This code is covered by the GPL.
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
15 #include <asm/byteorder.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/delay.h>
20 #include <linux/interrupt.h>
21 #include <linux/mtd/xip.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/mtd/map.h>
24 #include <linux/mtd/cfi.h>
25 #include <linux/mtd/compatmac.h>
27 int __xipram
cfi_qry_present(struct map_info
*map
, __u32 base
,
28 struct cfi_private
*cfi
)
30 int osf
= cfi
->interleave
* cfi
->device_type
; /* scale factor */
34 qry
[0] = cfi_build_cmd('Q', map
, cfi
);
35 qry
[1] = cfi_build_cmd('R', map
, cfi
);
36 qry
[2] = cfi_build_cmd('Y', map
, cfi
);
38 val
[0] = map_read(map
, base
+ osf
*0x10);
39 val
[1] = map_read(map
, base
+ osf
*0x11);
40 val
[2] = map_read(map
, base
+ osf
*0x12);
42 if (!map_word_equal(map
, qry
[0], val
[0]))
45 if (!map_word_equal(map
, qry
[1], val
[1]))
48 if (!map_word_equal(map
, qry
[2], val
[2]))
51 return 1; /* "QRY" found */
53 EXPORT_SYMBOL_GPL(cfi_qry_present
);
55 int __xipram
cfi_qry_mode_on(uint32_t base
, struct map_info
*map
,
56 struct cfi_private
*cfi
)
58 cfi_send_gen_cmd(0xF0, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
59 cfi_send_gen_cmd(0x98, 0x55, base
, map
, cfi
, cfi
->device_type
, NULL
);
60 if (cfi_qry_present(map
, base
, cfi
))
62 /* QRY not found probably we deal with some odd CFI chips */
63 /* Some revisions of some old Intel chips? */
64 cfi_send_gen_cmd(0xF0, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
65 cfi_send_gen_cmd(0xFF, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
66 cfi_send_gen_cmd(0x98, 0x55, base
, map
, cfi
, cfi
->device_type
, NULL
);
67 if (cfi_qry_present(map
, base
, cfi
))
70 cfi_send_gen_cmd(0xF0, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
71 cfi_send_gen_cmd(0x98, 0x555, base
, map
, cfi
, cfi
->device_type
, NULL
);
72 if (cfi_qry_present(map
, base
, cfi
))
77 EXPORT_SYMBOL_GPL(cfi_qry_mode_on
);
79 void __xipram
cfi_qry_mode_off(uint32_t base
, struct map_info
*map
,
80 struct cfi_private
*cfi
)
82 cfi_send_gen_cmd(0xF0, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
83 cfi_send_gen_cmd(0xFF, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
84 /* M29W128G flashes require an additional reset command
86 if ((cfi
->mfr
== CFI_MFR_ST
) && (cfi
->id
== 0x227E || cfi
->id
== 0x7E))
87 cfi_send_gen_cmd(0xF0, 0, base
, map
, cfi
, cfi
->device_type
, NULL
);
89 EXPORT_SYMBOL_GPL(cfi_qry_mode_off
);
92 __xipram
cfi_read_pri(struct map_info
*map
, __u16 adr
, __u16 size
, const char* name
)
94 struct cfi_private
*cfi
= map
->fldrv_priv
;
95 __u32 base
= 0; // cfi->chips[0].start;
96 int ofs_factor
= cfi
->interleave
* cfi
->device_type
;
98 struct cfi_extquery
*extp
= NULL
;
100 printk(" %s Extended Query Table at 0x%4.4X\n", name
, adr
);
104 extp
= kmalloc(size
, GFP_KERNEL
);
106 printk(KERN_ERR
"Failed to allocate memory\n");
110 #ifdef CONFIG_MTD_XIP
114 /* Switch it into Query Mode */
115 cfi_qry_mode_on(base
, map
, cfi
);
116 /* Read in the Extended Query Table */
117 for (i
=0; i
<size
; i
++) {
118 ((unsigned char *)extp
)[i
] =
119 cfi_read_query(map
, base
+((adr
+i
)*ofs_factor
));
122 /* Make sure it returns to read mode */
123 cfi_qry_mode_off(base
, map
, cfi
);
125 #ifdef CONFIG_MTD_XIP
126 (void) map_read(map
, base
);
134 EXPORT_SYMBOL(cfi_read_pri
);
136 void cfi_fixup(struct mtd_info
*mtd
, struct cfi_fixup
*fixups
)
138 struct map_info
*map
= mtd
->priv
;
139 struct cfi_private
*cfi
= map
->fldrv_priv
;
142 for (f
=fixups
; f
->fixup
; f
++) {
143 if (((f
->mfr
== CFI_MFR_ANY
) || (f
->mfr
== cfi
->mfr
)) &&
144 ((f
->id
== CFI_ID_ANY
) || (f
->id
== cfi
->id
))) {
145 f
->fixup(mtd
, f
->param
);
150 EXPORT_SYMBOL(cfi_fixup
);
152 int cfi_varsize_frob(struct mtd_info
*mtd
, varsize_frob_t frob
,
153 loff_t ofs
, size_t len
, void *thunk
)
155 struct map_info
*map
= mtd
->priv
;
156 struct cfi_private
*cfi
= map
->fldrv_priv
;
158 int chipnum
, ret
= 0;
160 struct mtd_erase_region_info
*regions
= mtd
->eraseregions
;
165 if ((len
+ ofs
) > mtd
->size
)
168 /* Check that both start and end of the requested erase are
169 * aligned with the erasesize at the appropriate addresses.
174 /* Skip all erase regions which are ended before the start of
175 the requested erase. Actually, to save on the calculations,
176 we skip to the first erase region which starts after the
177 start of the requested erase, and then go back one.
180 while (i
< mtd
->numeraseregions
&& ofs
>= regions
[i
].offset
)
184 /* OK, now i is pointing at the erase region in which this
185 erase request starts. Check the start of the requested
186 erase range is aligned with the erase size which is in
190 if (ofs
& (regions
[i
].erasesize
-1))
193 /* Remember the erase region we start on */
196 /* Next, check that the end of the requested erase is aligned
197 * with the erase region at that address.
200 while (i
<mtd
->numeraseregions
&& (ofs
+ len
) >= regions
[i
].offset
)
203 /* As before, drop back one to point at the region in which
204 the address actually falls
208 if ((ofs
+ len
) & (regions
[i
].erasesize
-1))
211 chipnum
= ofs
>> cfi
->chipshift
;
212 adr
= ofs
- (chipnum
<< cfi
->chipshift
);
217 int size
= regions
[i
].erasesize
;
219 ret
= (*frob
)(map
, &cfi
->chips
[chipnum
], adr
, size
, thunk
);
228 if (ofs
== regions
[i
].offset
+ size
* regions
[i
].numblocks
)
231 if (adr
>> cfi
->chipshift
) {
235 if (chipnum
>= cfi
->numchips
)
243 EXPORT_SYMBOL(cfi_varsize_frob
);
245 MODULE_LICENSE("GPL");