x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / fs / hfs / bnode.c
blobd77d844b668b14cbe16ab3e341f617d7db8e5f38
1 /*
2 * linux/fs/hfs/bnode.c
4 * Copyright (C) 2001
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle basic btree node operations
9 */
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/swap.h>
15 #include "btree.h"
17 void hfs_bnode_read(struct hfs_bnode *node, void *buf,
18 int off, int len)
20 struct page *page;
22 off += node->page_offset;
23 page = node->page[0];
25 memcpy(buf, kmap(page) + off, len);
26 kunmap(page);
29 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
31 __be16 data;
32 // optimize later...
33 hfs_bnode_read(node, &data, off, 2);
34 return be16_to_cpu(data);
37 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
39 u8 data;
40 // optimize later...
41 hfs_bnode_read(node, &data, off, 1);
42 return data;
45 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
47 struct hfs_btree *tree;
48 int key_len;
50 tree = node->tree;
51 if (node->type == HFS_NODE_LEAF ||
52 tree->attributes & HFS_TREE_VARIDXKEYS)
53 key_len = hfs_bnode_read_u8(node, off) + 1;
54 else
55 key_len = tree->max_key_len + 1;
57 hfs_bnode_read(node, key, off, key_len);
60 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
62 struct page *page;
64 off += node->page_offset;
65 page = node->page[0];
67 memcpy(kmap(page) + off, buf, len);
68 kunmap(page);
69 set_page_dirty(page);
72 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
74 __be16 v = cpu_to_be16(data);
75 // optimize later...
76 hfs_bnode_write(node, &v, off, 2);
79 void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
81 // optimize later...
82 hfs_bnode_write(node, &data, off, 1);
85 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
87 struct page *page;
89 off += node->page_offset;
90 page = node->page[0];
92 memset(kmap(page) + off, 0, len);
93 kunmap(page);
94 set_page_dirty(page);
97 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
98 struct hfs_bnode *src_node, int src, int len)
100 struct hfs_btree *tree;
101 struct page *src_page, *dst_page;
103 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
104 if (!len)
105 return;
106 tree = src_node->tree;
107 src += src_node->page_offset;
108 dst += dst_node->page_offset;
109 src_page = src_node->page[0];
110 dst_page = dst_node->page[0];
112 memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
113 kunmap(src_page);
114 kunmap(dst_page);
115 set_page_dirty(dst_page);
118 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
120 struct page *page;
121 void *ptr;
123 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
124 if (!len)
125 return;
126 src += node->page_offset;
127 dst += node->page_offset;
128 page = node->page[0];
129 ptr = kmap(page);
130 memmove(ptr + dst, ptr + src, len);
131 kunmap(page);
132 set_page_dirty(page);
135 void hfs_bnode_dump(struct hfs_bnode *node)
137 struct hfs_bnode_desc desc;
138 __be32 cnid;
139 int i, off, key_off;
141 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
142 hfs_bnode_read(node, &desc, 0, sizeof(desc));
143 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
144 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
145 desc.type, desc.height, be16_to_cpu(desc.num_recs));
147 off = node->tree->node_size - 2;
148 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
149 key_off = hfs_bnode_read_u16(node, off);
150 hfs_dbg_cont(BNODE_MOD, " %d", key_off);
151 if (i && node->type == HFS_NODE_INDEX) {
152 int tmp;
154 if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
155 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
156 else
157 tmp = node->tree->max_key_len + 1;
158 hfs_dbg_cont(BNODE_MOD, " (%d,%d",
159 tmp, hfs_bnode_read_u8(node, key_off));
160 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
161 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
162 } else if (i && node->type == HFS_NODE_LEAF) {
163 int tmp;
165 tmp = hfs_bnode_read_u8(node, key_off);
166 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
169 hfs_dbg_cont(BNODE_MOD, "\n");
172 void hfs_bnode_unlink(struct hfs_bnode *node)
174 struct hfs_btree *tree;
175 struct hfs_bnode *tmp;
176 __be32 cnid;
178 tree = node->tree;
179 if (node->prev) {
180 tmp = hfs_bnode_find(tree, node->prev);
181 if (IS_ERR(tmp))
182 return;
183 tmp->next = node->next;
184 cnid = cpu_to_be32(tmp->next);
185 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
186 hfs_bnode_put(tmp);
187 } else if (node->type == HFS_NODE_LEAF)
188 tree->leaf_head = node->next;
190 if (node->next) {
191 tmp = hfs_bnode_find(tree, node->next);
192 if (IS_ERR(tmp))
193 return;
194 tmp->prev = node->prev;
195 cnid = cpu_to_be32(tmp->prev);
196 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
197 hfs_bnode_put(tmp);
198 } else if (node->type == HFS_NODE_LEAF)
199 tree->leaf_tail = node->prev;
201 // move down?
202 if (!node->prev && !node->next) {
203 printk(KERN_DEBUG "hfs_btree_del_level\n");
205 if (!node->parent) {
206 tree->root = 0;
207 tree->depth = 0;
209 set_bit(HFS_BNODE_DELETED, &node->flags);
212 static inline int hfs_bnode_hash(u32 num)
214 num = (num >> 16) + num;
215 num += num >> 8;
216 return num & (NODE_HASH_SIZE - 1);
219 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
221 struct hfs_bnode *node;
223 if (cnid >= tree->node_count) {
224 pr_err("request for non-existent node %d in B*Tree\n", cnid);
225 return NULL;
228 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
229 node; node = node->next_hash) {
230 if (node->this == cnid) {
231 return node;
234 return NULL;
237 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
239 struct super_block *sb;
240 struct hfs_bnode *node, *node2;
241 struct address_space *mapping;
242 struct page *page;
243 int size, block, i, hash;
244 loff_t off;
246 if (cnid >= tree->node_count) {
247 pr_err("request for non-existent node %d in B*Tree\n", cnid);
248 return NULL;
251 sb = tree->inode->i_sb;
252 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
253 sizeof(struct page *);
254 node = kzalloc(size, GFP_KERNEL);
255 if (!node)
256 return NULL;
257 node->tree = tree;
258 node->this = cnid;
259 set_bit(HFS_BNODE_NEW, &node->flags);
260 atomic_set(&node->refcnt, 1);
261 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
262 node->tree->cnid, node->this);
263 init_waitqueue_head(&node->lock_wq);
264 spin_lock(&tree->hash_lock);
265 node2 = hfs_bnode_findhash(tree, cnid);
266 if (!node2) {
267 hash = hfs_bnode_hash(cnid);
268 node->next_hash = tree->node_hash[hash];
269 tree->node_hash[hash] = node;
270 tree->node_hash_cnt++;
271 } else {
272 spin_unlock(&tree->hash_lock);
273 kfree(node);
274 wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
275 return node2;
277 spin_unlock(&tree->hash_lock);
279 mapping = tree->inode->i_mapping;
280 off = (loff_t)cnid * tree->node_size;
281 block = off >> PAGE_SHIFT;
282 node->page_offset = off & ~PAGE_MASK;
283 for (i = 0; i < tree->pages_per_bnode; i++) {
284 page = read_mapping_page(mapping, block++, NULL);
285 if (IS_ERR(page))
286 goto fail;
287 if (PageError(page)) {
288 put_page(page);
289 goto fail;
291 node->page[i] = page;
294 return node;
295 fail:
296 set_bit(HFS_BNODE_ERROR, &node->flags);
297 return node;
300 void hfs_bnode_unhash(struct hfs_bnode *node)
302 struct hfs_bnode **p;
304 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
305 node->tree->cnid, node->this, atomic_read(&node->refcnt));
306 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
307 *p && *p != node; p = &(*p)->next_hash)
309 BUG_ON(!*p);
310 *p = node->next_hash;
311 node->tree->node_hash_cnt--;
314 /* Load a particular node out of a tree */
315 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
317 struct hfs_bnode *node;
318 struct hfs_bnode_desc *desc;
319 int i, rec_off, off, next_off;
320 int entry_size, key_size;
322 spin_lock(&tree->hash_lock);
323 node = hfs_bnode_findhash(tree, num);
324 if (node) {
325 hfs_bnode_get(node);
326 spin_unlock(&tree->hash_lock);
327 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
328 if (test_bit(HFS_BNODE_ERROR, &node->flags))
329 goto node_error;
330 return node;
332 spin_unlock(&tree->hash_lock);
333 node = __hfs_bnode_create(tree, num);
334 if (!node)
335 return ERR_PTR(-ENOMEM);
336 if (test_bit(HFS_BNODE_ERROR, &node->flags))
337 goto node_error;
338 if (!test_bit(HFS_BNODE_NEW, &node->flags))
339 return node;
341 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
342 node->prev = be32_to_cpu(desc->prev);
343 node->next = be32_to_cpu(desc->next);
344 node->num_recs = be16_to_cpu(desc->num_recs);
345 node->type = desc->type;
346 node->height = desc->height;
347 kunmap(node->page[0]);
349 switch (node->type) {
350 case HFS_NODE_HEADER:
351 case HFS_NODE_MAP:
352 if (node->height != 0)
353 goto node_error;
354 break;
355 case HFS_NODE_LEAF:
356 if (node->height != 1)
357 goto node_error;
358 break;
359 case HFS_NODE_INDEX:
360 if (node->height <= 1 || node->height > tree->depth)
361 goto node_error;
362 break;
363 default:
364 goto node_error;
367 rec_off = tree->node_size - 2;
368 off = hfs_bnode_read_u16(node, rec_off);
369 if (off != sizeof(struct hfs_bnode_desc))
370 goto node_error;
371 for (i = 1; i <= node->num_recs; off = next_off, i++) {
372 rec_off -= 2;
373 next_off = hfs_bnode_read_u16(node, rec_off);
374 if (next_off <= off ||
375 next_off > tree->node_size ||
376 next_off & 1)
377 goto node_error;
378 entry_size = next_off - off;
379 if (node->type != HFS_NODE_INDEX &&
380 node->type != HFS_NODE_LEAF)
381 continue;
382 key_size = hfs_bnode_read_u8(node, off) + 1;
383 if (key_size >= entry_size /*|| key_size & 1*/)
384 goto node_error;
386 clear_bit(HFS_BNODE_NEW, &node->flags);
387 wake_up(&node->lock_wq);
388 return node;
390 node_error:
391 set_bit(HFS_BNODE_ERROR, &node->flags);
392 clear_bit(HFS_BNODE_NEW, &node->flags);
393 wake_up(&node->lock_wq);
394 hfs_bnode_put(node);
395 return ERR_PTR(-EIO);
398 void hfs_bnode_free(struct hfs_bnode *node)
400 int i;
402 for (i = 0; i < node->tree->pages_per_bnode; i++)
403 if (node->page[i])
404 put_page(node->page[i]);
405 kfree(node);
408 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
410 struct hfs_bnode *node;
411 struct page **pagep;
412 int i;
414 spin_lock(&tree->hash_lock);
415 node = hfs_bnode_findhash(tree, num);
416 spin_unlock(&tree->hash_lock);
417 if (node) {
418 pr_crit("new node %u already hashed?\n", num);
419 WARN_ON(1);
420 return node;
422 node = __hfs_bnode_create(tree, num);
423 if (!node)
424 return ERR_PTR(-ENOMEM);
425 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
426 hfs_bnode_put(node);
427 return ERR_PTR(-EIO);
430 pagep = node->page;
431 memset(kmap(*pagep) + node->page_offset, 0,
432 min((int)PAGE_SIZE, (int)tree->node_size));
433 set_page_dirty(*pagep);
434 kunmap(*pagep);
435 for (i = 1; i < tree->pages_per_bnode; i++) {
436 memset(kmap(*++pagep), 0, PAGE_SIZE);
437 set_page_dirty(*pagep);
438 kunmap(*pagep);
440 clear_bit(HFS_BNODE_NEW, &node->flags);
441 wake_up(&node->lock_wq);
443 return node;
446 void hfs_bnode_get(struct hfs_bnode *node)
448 if (node) {
449 atomic_inc(&node->refcnt);
450 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
451 node->tree->cnid, node->this,
452 atomic_read(&node->refcnt));
456 /* Dispose of resources used by a node */
457 void hfs_bnode_put(struct hfs_bnode *node)
459 if (node) {
460 struct hfs_btree *tree = node->tree;
461 int i;
463 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
464 node->tree->cnid, node->this,
465 atomic_read(&node->refcnt));
466 BUG_ON(!atomic_read(&node->refcnt));
467 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
468 return;
469 for (i = 0; i < tree->pages_per_bnode; i++) {
470 if (!node->page[i])
471 continue;
472 mark_page_accessed(node->page[i]);
475 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
476 hfs_bnode_unhash(node);
477 spin_unlock(&tree->hash_lock);
478 hfs_bmap_free(node);
479 hfs_bnode_free(node);
480 return;
482 spin_unlock(&tree->hash_lock);