5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle basic btree node operations
11 #include <linux/pagemap.h>
12 #include <linux/swap.h>
16 void hfs_bnode_read(struct hfs_bnode
*node
, void *buf
,
21 off
+= node
->page_offset
;
24 memcpy(buf
, kmap(page
) + off
, len
);
28 u16
hfs_bnode_read_u16(struct hfs_bnode
*node
, int off
)
32 hfs_bnode_read(node
, &data
, off
, 2);
33 return be16_to_cpu(data
);
36 u8
hfs_bnode_read_u8(struct hfs_bnode
*node
, int off
)
40 hfs_bnode_read(node
, &data
, off
, 1);
44 void hfs_bnode_read_key(struct hfs_bnode
*node
, void *key
, int off
)
46 struct hfs_btree
*tree
;
50 if (node
->type
== HFS_NODE_LEAF
||
51 tree
->attributes
& HFS_TREE_VARIDXKEYS
)
52 key_len
= hfs_bnode_read_u8(node
, off
) + 1;
54 key_len
= tree
->max_key_len
+ 1;
56 hfs_bnode_read(node
, key
, off
, key_len
);
59 void hfs_bnode_write(struct hfs_bnode
*node
, void *buf
, int off
, int len
)
63 off
+= node
->page_offset
;
66 memcpy(kmap(page
) + off
, buf
, len
);
71 void hfs_bnode_write_u16(struct hfs_bnode
*node
, int off
, u16 data
)
73 __be16 v
= cpu_to_be16(data
);
75 hfs_bnode_write(node
, &v
, off
, 2);
78 void hfs_bnode_write_u8(struct hfs_bnode
*node
, int off
, u8 data
)
81 hfs_bnode_write(node
, &data
, off
, 1);
84 void hfs_bnode_clear(struct hfs_bnode
*node
, int off
, int len
)
88 off
+= node
->page_offset
;
91 memset(kmap(page
) + off
, 0, len
);
96 void hfs_bnode_copy(struct hfs_bnode
*dst_node
, int dst
,
97 struct hfs_bnode
*src_node
, int src
, int len
)
99 struct hfs_btree
*tree
;
100 struct page
*src_page
, *dst_page
;
102 dprint(DBG_BNODE_MOD
, "copybytes: %u,%u,%u\n", dst
, src
, len
);
105 tree
= src_node
->tree
;
106 src
+= src_node
->page_offset
;
107 dst
+= dst_node
->page_offset
;
108 src_page
= src_node
->page
[0];
109 dst_page
= dst_node
->page
[0];
111 memcpy(kmap(dst_page
) + dst
, kmap(src_page
) + src
, len
);
114 set_page_dirty(dst_page
);
117 void hfs_bnode_move(struct hfs_bnode
*node
, int dst
, int src
, int len
)
122 dprint(DBG_BNODE_MOD
, "movebytes: %u,%u,%u\n", dst
, src
, len
);
125 src
+= node
->page_offset
;
126 dst
+= node
->page_offset
;
127 page
= node
->page
[0];
129 memmove(ptr
+ dst
, ptr
+ src
, len
);
131 set_page_dirty(page
);
134 void hfs_bnode_dump(struct hfs_bnode
*node
)
136 struct hfs_bnode_desc desc
;
140 dprint(DBG_BNODE_MOD
, "bnode: %d\n", node
->this);
141 hfs_bnode_read(node
, &desc
, 0, sizeof(desc
));
142 dprint(DBG_BNODE_MOD
, "%d, %d, %d, %d, %d\n",
143 be32_to_cpu(desc
.next
), be32_to_cpu(desc
.prev
),
144 desc
.type
, desc
.height
, be16_to_cpu(desc
.num_recs
));
146 off
= node
->tree
->node_size
- 2;
147 for (i
= be16_to_cpu(desc
.num_recs
); i
>= 0; off
-= 2, i
--) {
148 key_off
= hfs_bnode_read_u16(node
, off
);
149 dprint(DBG_BNODE_MOD
, " %d", key_off
);
150 if (i
&& node
->type
== HFS_NODE_INDEX
) {
153 if (node
->tree
->attributes
& HFS_TREE_VARIDXKEYS
)
154 tmp
= (hfs_bnode_read_u8(node
, key_off
) | 1) + 1;
156 tmp
= node
->tree
->max_key_len
+ 1;
157 dprint(DBG_BNODE_MOD
, " (%d,%d", tmp
, hfs_bnode_read_u8(node
, key_off
));
158 hfs_bnode_read(node
, &cnid
, key_off
+ tmp
, 4);
159 dprint(DBG_BNODE_MOD
, ",%d)", be32_to_cpu(cnid
));
160 } else if (i
&& node
->type
== HFS_NODE_LEAF
) {
163 tmp
= hfs_bnode_read_u8(node
, key_off
);
164 dprint(DBG_BNODE_MOD
, " (%d)", tmp
);
167 dprint(DBG_BNODE_MOD
, "\n");
170 void hfs_bnode_unlink(struct hfs_bnode
*node
)
172 struct hfs_btree
*tree
;
173 struct hfs_bnode
*tmp
;
178 tmp
= hfs_bnode_find(tree
, node
->prev
);
181 tmp
->next
= node
->next
;
182 cnid
= cpu_to_be32(tmp
->next
);
183 hfs_bnode_write(tmp
, &cnid
, offsetof(struct hfs_bnode_desc
, next
), 4);
185 } else if (node
->type
== HFS_NODE_LEAF
)
186 tree
->leaf_head
= node
->next
;
189 tmp
= hfs_bnode_find(tree
, node
->next
);
192 tmp
->prev
= node
->prev
;
193 cnid
= cpu_to_be32(tmp
->prev
);
194 hfs_bnode_write(tmp
, &cnid
, offsetof(struct hfs_bnode_desc
, prev
), 4);
196 } else if (node
->type
== HFS_NODE_LEAF
)
197 tree
->leaf_tail
= node
->prev
;
200 if (!node
->prev
&& !node
->next
) {
201 printk(KERN_DEBUG
"hfs_btree_del_level\n");
207 set_bit(HFS_BNODE_DELETED
, &node
->flags
);
210 static inline int hfs_bnode_hash(u32 num
)
212 num
= (num
>> 16) + num
;
214 return num
& (NODE_HASH_SIZE
- 1);
217 struct hfs_bnode
*hfs_bnode_findhash(struct hfs_btree
*tree
, u32 cnid
)
219 struct hfs_bnode
*node
;
221 if (cnid
>= tree
->node_count
) {
222 printk(KERN_ERR
"hfs: request for non-existent node %d in B*Tree\n", cnid
);
226 for (node
= tree
->node_hash
[hfs_bnode_hash(cnid
)];
227 node
; node
= node
->next_hash
) {
228 if (node
->this == cnid
) {
235 static struct hfs_bnode
*__hfs_bnode_create(struct hfs_btree
*tree
, u32 cnid
)
237 struct super_block
*sb
;
238 struct hfs_bnode
*node
, *node2
;
239 struct address_space
*mapping
;
241 int size
, block
, i
, hash
;
244 if (cnid
>= tree
->node_count
) {
245 printk(KERN_ERR
"hfs: request for non-existent node %d in B*Tree\n", cnid
);
249 sb
= tree
->inode
->i_sb
;
250 size
= sizeof(struct hfs_bnode
) + tree
->pages_per_bnode
*
251 sizeof(struct page
*);
252 node
= kmalloc(size
, GFP_KERNEL
);
255 memset(node
, 0, size
);
258 set_bit(HFS_BNODE_NEW
, &node
->flags
);
259 atomic_set(&node
->refcnt
, 1);
260 dprint(DBG_BNODE_REFS
, "new_node(%d:%d): 1\n",
261 node
->tree
->cnid
, node
->this);
262 init_waitqueue_head(&node
->lock_wq
);
263 spin_lock(&tree
->hash_lock
);
264 node2
= hfs_bnode_findhash(tree
, cnid
);
266 hash
= hfs_bnode_hash(cnid
);
267 node
->next_hash
= tree
->node_hash
[hash
];
268 tree
->node_hash
[hash
] = node
;
269 tree
->node_hash_cnt
++;
271 spin_unlock(&tree
->hash_lock
);
273 wait_event(node2
->lock_wq
, !test_bit(HFS_BNODE_NEW
, &node2
->flags
));
276 spin_unlock(&tree
->hash_lock
);
278 mapping
= tree
->inode
->i_mapping
;
279 off
= (loff_t
)cnid
* tree
->node_size
;
280 block
= off
>> PAGE_CACHE_SHIFT
;
281 node
->page_offset
= off
& ~PAGE_CACHE_MASK
;
282 for (i
= 0; i
< tree
->pages_per_bnode
; i
++) {
283 page
= read_cache_page(mapping
, block
++, (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
286 if (PageError(page
)) {
287 page_cache_release(page
);
290 page_cache_release(page
);
291 node
->page
[i
] = page
;
296 set_bit(HFS_BNODE_ERROR
, &node
->flags
);
300 void hfs_bnode_unhash(struct hfs_bnode
*node
)
302 struct hfs_bnode
**p
;
304 dprint(DBG_BNODE_REFS
, "remove_node(%d:%d): %d\n",
305 node
->tree
->cnid
, node
->this, atomic_read(&node
->refcnt
));
306 for (p
= &node
->tree
->node_hash
[hfs_bnode_hash(node
->this)];
307 *p
&& *p
!= node
; p
= &(*p
)->next_hash
)
310 *p
= node
->next_hash
;
311 node
->tree
->node_hash_cnt
--;
314 /* Load a particular node out of a tree */
315 struct hfs_bnode
*hfs_bnode_find(struct hfs_btree
*tree
, u32 num
)
317 struct hfs_bnode
*node
;
318 struct hfs_bnode_desc
*desc
;
319 int i
, rec_off
, off
, next_off
;
320 int entry_size
, key_size
;
322 spin_lock(&tree
->hash_lock
);
323 node
= hfs_bnode_findhash(tree
, num
);
326 spin_unlock(&tree
->hash_lock
);
327 wait_event(node
->lock_wq
, !test_bit(HFS_BNODE_NEW
, &node
->flags
));
328 if (test_bit(HFS_BNODE_ERROR
, &node
->flags
))
332 spin_unlock(&tree
->hash_lock
);
333 node
= __hfs_bnode_create(tree
, num
);
335 return ERR_PTR(-ENOMEM
);
336 if (test_bit(HFS_BNODE_ERROR
, &node
->flags
))
338 if (!test_bit(HFS_BNODE_NEW
, &node
->flags
))
341 desc
= (struct hfs_bnode_desc
*)(kmap(node
->page
[0]) + node
->page_offset
);
342 node
->prev
= be32_to_cpu(desc
->prev
);
343 node
->next
= be32_to_cpu(desc
->next
);
344 node
->num_recs
= be16_to_cpu(desc
->num_recs
);
345 node
->type
= desc
->type
;
346 node
->height
= desc
->height
;
347 kunmap(node
->page
[0]);
349 switch (node
->type
) {
350 case HFS_NODE_HEADER
:
352 if (node
->height
!= 0)
356 if (node
->height
!= 1)
360 if (node
->height
<= 1 || node
->height
> tree
->depth
)
367 rec_off
= tree
->node_size
- 2;
368 off
= hfs_bnode_read_u16(node
, rec_off
);
369 if (off
!= sizeof(struct hfs_bnode_desc
))
371 for (i
= 1; i
<= node
->num_recs
; off
= next_off
, i
++) {
373 next_off
= hfs_bnode_read_u16(node
, rec_off
);
374 if (next_off
<= off
||
375 next_off
> tree
->node_size
||
378 entry_size
= next_off
- off
;
379 if (node
->type
!= HFS_NODE_INDEX
&&
380 node
->type
!= HFS_NODE_LEAF
)
382 key_size
= hfs_bnode_read_u8(node
, off
) + 1;
383 if (key_size
>= entry_size
/*|| key_size & 1*/)
386 clear_bit(HFS_BNODE_NEW
, &node
->flags
);
387 wake_up(&node
->lock_wq
);
391 set_bit(HFS_BNODE_ERROR
, &node
->flags
);
392 clear_bit(HFS_BNODE_NEW
, &node
->flags
);
393 wake_up(&node
->lock_wq
);
395 return ERR_PTR(-EIO
);
398 void hfs_bnode_free(struct hfs_bnode
*node
)
402 //for (i = 0; i < node->tree->pages_per_bnode; i++)
403 // if (node->page[i])
404 // page_cache_release(node->page[i]);
408 struct hfs_bnode
*hfs_bnode_create(struct hfs_btree
*tree
, u32 num
)
410 struct hfs_bnode
*node
;
414 spin_lock(&tree
->hash_lock
);
415 node
= hfs_bnode_findhash(tree
, num
);
416 spin_unlock(&tree
->hash_lock
);
418 node
= __hfs_bnode_create(tree
, num
);
420 return ERR_PTR(-ENOMEM
);
421 if (test_bit(HFS_BNODE_ERROR
, &node
->flags
)) {
423 return ERR_PTR(-EIO
);
427 memset(kmap(*pagep
) + node
->page_offset
, 0,
428 min((int)PAGE_CACHE_SIZE
, (int)tree
->node_size
));
429 set_page_dirty(*pagep
);
431 for (i
= 1; i
< tree
->pages_per_bnode
; i
++) {
432 memset(kmap(*++pagep
), 0, PAGE_CACHE_SIZE
);
433 set_page_dirty(*pagep
);
436 clear_bit(HFS_BNODE_NEW
, &node
->flags
);
437 wake_up(&node
->lock_wq
);
442 void hfs_bnode_get(struct hfs_bnode
*node
)
445 atomic_inc(&node
->refcnt
);
446 dprint(DBG_BNODE_REFS
, "get_node(%d:%d): %d\n",
447 node
->tree
->cnid
, node
->this, atomic_read(&node
->refcnt
));
451 /* Dispose of resources used by a node */
452 void hfs_bnode_put(struct hfs_bnode
*node
)
455 struct hfs_btree
*tree
= node
->tree
;
458 dprint(DBG_BNODE_REFS
, "put_node(%d:%d): %d\n",
459 node
->tree
->cnid
, node
->this, atomic_read(&node
->refcnt
));
460 BUG_ON(!atomic_read(&node
->refcnt
));
461 if (!atomic_dec_and_lock(&node
->refcnt
, &tree
->hash_lock
))
463 for (i
= 0; i
< tree
->pages_per_bnode
; i
++) {
466 mark_page_accessed(node
->page
[i
]);
469 if (test_bit(HFS_BNODE_DELETED
, &node
->flags
)) {
470 hfs_bnode_unhash(node
);
471 spin_unlock(&tree
->hash_lock
);
473 hfs_bnode_free(node
);
476 spin_unlock(&tree
->hash_lock
);