5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle opening/closing btree
11 #include <linux/pagemap.h>
15 /* Get a reference to a B*Tree and do some initial checks */
16 struct hfs_btree
*hfs_btree_open(struct super_block
*sb
, u32 id
, btree_keycmp keycmp
)
18 struct hfs_btree
*tree
;
19 struct hfs_btree_header_rec
*head
;
20 struct address_space
*mapping
;
24 tree
= kmalloc(sizeof(*tree
), GFP_KERNEL
);
27 memset(tree
, 0, sizeof(*tree
));
29 init_MUTEX(&tree
->tree_lock
);
30 spin_lock_init(&tree
->hash_lock
);
31 /* Set the correct compare function */
34 tree
->keycmp
= keycmp
;
36 tree
->inode
= iget_locked(sb
, id
);
39 if (!(tree
->inode
->i_state
& I_NEW
))
42 struct hfs_mdb
*mdb
= HFS_SB(sb
)->mdb
;
43 HFS_I(tree
->inode
)->flags
= 0;
44 init_MUTEX(&HFS_I(tree
->inode
)->extents_lock
);
47 hfs_inode_read_fork(tree
->inode
, mdb
->drXTExtRec
, mdb
->drXTFlSize
,
48 mdb
->drXTFlSize
, be32_to_cpu(mdb
->drXTClpSiz
));
49 tree
->inode
->i_mapping
->a_ops
= &hfs_btree_aops
;
52 hfs_inode_read_fork(tree
->inode
, mdb
->drCTExtRec
, mdb
->drCTFlSize
,
53 mdb
->drCTFlSize
, be32_to_cpu(mdb
->drCTClpSiz
));
54 tree
->inode
->i_mapping
->a_ops
= &hfs_btree_aops
;
60 unlock_new_inode(tree
->inode
);
62 mapping
= tree
->inode
->i_mapping
;
63 page
= read_cache_page(mapping
, 0, (filler_t
*)mapping
->a_ops
->readpage
, NULL
);
68 head
= (struct hfs_btree_header_rec
*)(kmap(page
) + sizeof(struct hfs_bnode_desc
));
69 tree
->root
= be32_to_cpu(head
->root
);
70 tree
->leaf_count
= be32_to_cpu(head
->leaf_count
);
71 tree
->leaf_head
= be32_to_cpu(head
->leaf_head
);
72 tree
->leaf_tail
= be32_to_cpu(head
->leaf_tail
);
73 tree
->node_count
= be32_to_cpu(head
->node_count
);
74 tree
->free_nodes
= be32_to_cpu(head
->free_nodes
);
75 tree
->attributes
= be32_to_cpu(head
->attributes
);
76 tree
->node_size
= be16_to_cpu(head
->node_size
);
77 tree
->max_key_len
= be16_to_cpu(head
->max_key_len
);
78 tree
->depth
= be16_to_cpu(head
->depth
);
80 size
= tree
->node_size
;
81 if (!size
|| size
& (size
- 1))
83 if (!tree
->node_count
)
85 tree
->node_size_shift
= ffs(size
) - 1;
86 tree
->pages_per_bnode
= (tree
->node_size
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
89 page_cache_release(page
);
93 tree
->inode
->i_mapping
->a_ops
= &hfs_aops
;
94 page_cache_release(page
);
101 /* Release resources used by a btree */
102 void hfs_btree_close(struct hfs_btree
*tree
)
104 struct hfs_bnode
*node
;
110 for (i
= 0; i
< NODE_HASH_SIZE
; i
++) {
111 while ((node
= tree
->node_hash
[i
])) {
112 tree
->node_hash
[i
] = node
->next_hash
;
113 if (atomic_read(&node
->refcnt
))
114 printk("HFS: node %d:%d still has %d user(s)!\n",
115 node
->tree
->cnid
, node
->this, atomic_read(&node
->refcnt
));
116 hfs_bnode_free(node
);
117 tree
->node_hash_cnt
--;
124 void hfs_btree_write(struct hfs_btree
*tree
)
126 struct hfs_btree_header_rec
*head
;
127 struct hfs_bnode
*node
;
130 node
= hfs_bnode_find(tree
, 0);
134 /* Load the header */
135 page
= node
->page
[0];
136 head
= (struct hfs_btree_header_rec
*)(kmap(page
) + sizeof(struct hfs_bnode_desc
));
138 head
->root
= cpu_to_be32(tree
->root
);
139 head
->leaf_count
= cpu_to_be32(tree
->leaf_count
);
140 head
->leaf_head
= cpu_to_be32(tree
->leaf_head
);
141 head
->leaf_tail
= cpu_to_be32(tree
->leaf_tail
);
142 head
->node_count
= cpu_to_be32(tree
->node_count
);
143 head
->free_nodes
= cpu_to_be32(tree
->free_nodes
);
144 head
->attributes
= cpu_to_be32(tree
->attributes
);
145 head
->depth
= cpu_to_be16(tree
->depth
);
148 set_page_dirty(page
);
152 static struct hfs_bnode
*hfs_bmap_new_bmap(struct hfs_bnode
*prev
, u32 idx
)
154 struct hfs_btree
*tree
= prev
->tree
;
155 struct hfs_bnode
*node
;
156 struct hfs_bnode_desc desc
;
159 node
= hfs_bnode_create(tree
, idx
);
163 if (!tree
->free_nodes
)
167 cnid
= cpu_to_be32(idx
);
168 hfs_bnode_write(prev
, &cnid
, offsetof(struct hfs_bnode_desc
, next
), 4);
170 node
->type
= HFS_NODE_MAP
;
172 hfs_bnode_clear(node
, 0, tree
->node_size
);
175 desc
.type
= HFS_NODE_MAP
;
177 desc
.num_recs
= cpu_to_be16(1);
179 hfs_bnode_write(node
, &desc
, 0, sizeof(desc
));
180 hfs_bnode_write_u16(node
, 14, 0x8000);
181 hfs_bnode_write_u16(node
, tree
->node_size
- 2, 14);
182 hfs_bnode_write_u16(node
, tree
->node_size
- 4, tree
->node_size
- 6);
187 struct hfs_bnode
*hfs_bmap_alloc(struct hfs_btree
*tree
)
189 struct hfs_bnode
*node
, *next_node
;
196 while (!tree
->free_nodes
) {
197 struct inode
*inode
= tree
->inode
;
201 res
= hfs_extend_file(inode
);
204 inode
->i_blocks
= HFS_I(inode
)->alloc_blocks
*
205 HFS_SB(tree
->sb
)->fs_div
;
206 HFS_I(inode
)->phys_size
= inode
->i_size
=
207 (loff_t
)inode
->i_blocks
<< tree
->sb
->s_blocksize_bits
;
208 count
= inode
->i_size
>> tree
->node_size_shift
;
209 tree
->free_nodes
= count
- tree
->node_count
;
210 tree
->node_count
= count
;
214 node
= hfs_bnode_find(tree
, nidx
);
217 len
= hfs_brec_lenoff(node
, 2, &off
);
219 off
+= node
->page_offset
;
220 pagep
= node
->page
+ (off
>> PAGE_CACHE_SHIFT
);
222 off
&= ~PAGE_CACHE_MASK
;
229 for (m
= 0x80, i
= 0; i
< 8; m
>>= 1, i
++) {
233 set_page_dirty(*pagep
);
236 mark_inode_dirty(tree
->inode
);
238 return hfs_bnode_create(tree
, idx
);
242 if (++off
>= PAGE_CACHE_SIZE
) {
244 data
= kmap(*++pagep
);
253 printk("create new bmap node...\n");
254 next_node
= hfs_bmap_new_bmap(node
, idx
);
256 next_node
= hfs_bnode_find(tree
, nidx
);
258 if (IS_ERR(next_node
))
262 len
= hfs_brec_lenoff(node
, 0, &off
);
263 off
+= node
->page_offset
;
264 pagep
= node
->page
+ (off
>> PAGE_CACHE_SHIFT
);
266 off
&= ~PAGE_CACHE_MASK
;
270 void hfs_bmap_free(struct hfs_bnode
*node
)
272 struct hfs_btree
*tree
;
278 dprint(DBG_BNODE_MOD
, "btree_free_node: %u\n", node
->this);
281 node
= hfs_bnode_find(tree
, 0);
284 len
= hfs_brec_lenoff(node
, 2, &off
);
285 while (nidx
>= len
* 8) {
293 printk("HFS: unable to free bnode %u. bmap not found!\n", node
->this);
296 node
= hfs_bnode_find(tree
, i
);
299 if (node
->type
!= HFS_NODE_MAP
) {
301 printk("HFS: invalid bmap found! (%u,%d)\n", node
->this, node
->type
);
305 len
= hfs_brec_lenoff(node
, 0, &off
);
307 off
+= node
->page_offset
+ nidx
/ 8;
308 page
= node
->page
[off
>> PAGE_CACHE_SHIFT
];
310 off
&= ~PAGE_CACHE_MASK
;
311 m
= 1 << (~nidx
& 7);
314 printk("HFS: trying to free free bnode %u(%d)\n", node
->this, node
->type
);
319 data
[off
] = byte
& ~m
;
320 set_page_dirty(page
);
324 mark_inode_dirty(tree
->inode
);