gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / fs / hpfs / map.c
blob442770edcdc7040eba24739ff9e1d16d28365f68
1 /*
2 * linux/fs/hpfs/map.c
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
6 * mapping structures to memory with some minimal checks
7 */
9 #include "hpfs_fn.h"
11 __le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
13 return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
16 __le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
17 struct quad_buffer_head *qbh, char *id)
19 secno sec;
20 __le32 *ret;
21 unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
22 if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) {
23 hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
24 return NULL;
26 sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
27 if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
28 hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
29 return NULL;
31 ret = hpfs_map_4sectors(s, sec, qbh, 4);
32 if (ret) hpfs_prefetch_bitmap(s, bmp_block + 1);
33 return ret;
36 void hpfs_prefetch_bitmap(struct super_block *s, unsigned bmp_block)
38 unsigned to_prefetch, next_prefetch;
39 unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
40 if (unlikely(bmp_block >= n_bands))
41 return;
42 to_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
43 if (unlikely(bmp_block + 1 >= n_bands))
44 next_prefetch = 0;
45 else
46 next_prefetch = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block + 1]);
47 hpfs_prefetch_sectors(s, to_prefetch, 4 + 4 * (to_prefetch + 4 == next_prefetch));
51 * Load first code page into kernel memory, return pointer to 256-byte array,
52 * first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are
53 * lowercasing table
56 unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
58 struct buffer_head *bh;
59 secno cpds;
60 unsigned cpi;
61 unsigned char *ptr;
62 unsigned char *cp_table;
63 int i;
64 struct code_page_data *cpd;
65 struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
66 if (!cp) return NULL;
67 if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
68 pr_err("Code page directory magic doesn't match (magic = %08x)\n",
69 le32_to_cpu(cp->magic));
70 brelse(bh);
71 return NULL;
73 if (!le32_to_cpu(cp->n_code_pages)) {
74 pr_err("n_code_pages == 0\n");
75 brelse(bh);
76 return NULL;
78 cpds = le32_to_cpu(cp->array[0].code_page_data);
79 cpi = le16_to_cpu(cp->array[0].index);
80 brelse(bh);
82 if (cpi >= 3) {
83 pr_err("Code page index out of array\n");
84 return NULL;
87 if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
88 if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
89 pr_err("Code page index out of sector\n");
90 brelse(bh);
91 return NULL;
93 ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
94 if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
95 pr_err("out of memory for code page table\n");
96 brelse(bh);
97 return NULL;
99 memcpy(cp_table, ptr, 128);
100 brelse(bh);
102 /* Try to build lowercasing table from uppercasing one */
104 for (i=128; i<256; i++) cp_table[i]=i;
105 for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128)
106 cp_table[cp_table[i-128]] = i;
108 return cp_table;
111 __le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
113 struct buffer_head *bh;
114 int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
115 int i;
116 __le32 *b;
117 if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
118 pr_err("can't allocate memory for bitmap directory\n");
119 return NULL;
121 for (i=0;i<n;i++) {
122 __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
123 if (!d) {
124 kfree(b);
125 return NULL;
127 memcpy((char *)b + 512 * i, d, 512);
128 brelse(bh);
130 return b;
134 * Load fnode to memory
137 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp)
139 struct fnode *fnode;
140 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) {
141 return NULL;
143 if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) {
144 if (hpfs_sb(s)->sb_chk) {
145 struct extended_attribute *ea;
146 struct extended_attribute *ea_end;
147 if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
148 hpfs_error(s, "bad magic on fnode %08lx",
149 (unsigned long)ino);
150 goto bail;
152 if (!fnode_is_dir(fnode)) {
153 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
154 (bp_internal(&fnode->btree) ? 12 : 8)) {
155 hpfs_error(s,
156 "bad number of nodes in fnode %08lx",
157 (unsigned long)ino);
158 goto bail;
160 if (le16_to_cpu(fnode->btree.first_free) !=
161 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
162 hpfs_error(s,
163 "bad first_free pointer in fnode %08lx",
164 (unsigned long)ino);
165 goto bail;
168 if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
169 le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
170 hpfs_error(s,
171 "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
172 (unsigned long)ino,
173 le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
174 goto bail;
176 ea = fnode_ea(fnode);
177 ea_end = fnode_end_ea(fnode);
178 while (ea != ea_end) {
179 if (ea > ea_end) {
180 hpfs_error(s, "bad EA in fnode %08lx",
181 (unsigned long)ino);
182 goto bail;
184 ea = next_ea(ea);
188 return fnode;
189 bail:
190 brelse(*bhp);
191 return NULL;
194 struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp)
196 struct anode *anode;
197 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
198 if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
199 if (hpfs_sb(s)->sb_chk) {
200 if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
201 hpfs_error(s, "bad magic on anode %08x", ano);
202 goto bail;
204 if (le32_to_cpu(anode->self) != ano) {
205 hpfs_error(s, "self pointer invalid on anode %08x", ano);
206 goto bail;
208 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
209 (bp_internal(&anode->btree) ? 60 : 40)) {
210 hpfs_error(s, "bad number of nodes in anode %08x", ano);
211 goto bail;
213 if (le16_to_cpu(anode->btree.first_free) !=
214 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
215 hpfs_error(s, "bad first_free pointer in anode %08x", ano);
216 goto bail;
219 return anode;
220 bail:
221 brelse(*bhp);
222 return NULL;
226 * Load dnode to memory and do some checks
229 struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
230 struct quad_buffer_head *qbh)
232 struct dnode *dnode;
233 if (hpfs_sb(s)->sb_chk) {
234 if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL;
235 if (secno & 3) {
236 hpfs_error(s, "dnode %08x not byte-aligned", secno);
237 return NULL;
240 if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD)))
241 if (hpfs_sb(s)->sb_chk) {
242 unsigned p, pp = 0;
243 unsigned char *d = (unsigned char *)dnode;
244 int b = 0;
245 if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
246 hpfs_error(s, "bad magic on dnode %08x", secno);
247 goto bail;
249 if (le32_to_cpu(dnode->self) != secno)
250 hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
251 /* Check dirents - bad dirents would cause infinite
252 loops or shooting to memory */
253 if (le32_to_cpu(dnode->first_free) > 2048) {
254 hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
255 goto bail;
257 for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
258 struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
259 if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
260 hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
261 goto bail;
263 if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
264 if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
265 hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
266 goto bail;
269 if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down;
270 if (de->down) if (de_down_pointer(de) < 0x10) {
271 hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp);
272 goto bail;
274 pp = p;
277 if (p != le32_to_cpu(dnode->first_free)) {
278 hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
279 goto bail;
281 if (d[pp + 30] != 1 || d[pp + 31] != 255) {
282 hpfs_error(s, "dnode %08x does not end with \\377 entry", secno);
283 goto bail;
285 if (b == 3)
286 pr_err("unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n",
287 secno);
289 return dnode;
290 bail:
291 hpfs_brelse4(qbh);
292 return NULL;
295 dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
297 struct buffer_head *bh;
298 struct fnode *fnode;
299 dnode_secno dno;
301 fnode = hpfs_map_fnode(s, ino, &bh);
302 if (!fnode)
303 return 0;
305 dno = le32_to_cpu(fnode->u.external[0].disk_secno);
306 brelse(bh);
307 return dno;