2 * linux/fs/hfsplus/btree.c
5 * Brad Boyer (flar@allandria.com)
6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 * Handle opening/closing btree
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13 #include <linux/log2.h>
15 #include "hfsplus_fs.h"
16 #include "hfsplus_raw.h"
19 /* Get a reference to a B*Tree and do some initial checks */
20 struct hfs_btree
*hfs_btree_open(struct super_block
*sb
, u32 id
)
22 struct hfs_btree
*tree
;
23 struct hfs_btree_header_rec
*head
;
24 struct address_space
*mapping
;
29 tree
= kzalloc(sizeof(*tree
), GFP_KERNEL
);
33 mutex_init(&tree
->tree_lock
);
34 spin_lock_init(&tree
->hash_lock
);
37 inode
= hfsplus_iget(sb
, id
);
42 if (!HFSPLUS_I(tree
->inode
)->first_blocks
) {
43 pr_err("invalid btree extent records (0 size)\n");
47 mapping
= tree
->inode
->i_mapping
;
48 page
= read_mapping_page(mapping
, 0, NULL
);
53 head
= (struct hfs_btree_header_rec
*)(kmap(page
) +
54 sizeof(struct hfs_bnode_desc
));
55 tree
->root
= be32_to_cpu(head
->root
);
56 tree
->leaf_count
= be32_to_cpu(head
->leaf_count
);
57 tree
->leaf_head
= be32_to_cpu(head
->leaf_head
);
58 tree
->leaf_tail
= be32_to_cpu(head
->leaf_tail
);
59 tree
->node_count
= be32_to_cpu(head
->node_count
);
60 tree
->free_nodes
= be32_to_cpu(head
->free_nodes
);
61 tree
->attributes
= be32_to_cpu(head
->attributes
);
62 tree
->node_size
= be16_to_cpu(head
->node_size
);
63 tree
->max_key_len
= be16_to_cpu(head
->max_key_len
);
64 tree
->depth
= be16_to_cpu(head
->depth
);
66 /* Verify the tree and set the correct compare function */
68 case HFSPLUS_EXT_CNID
:
69 if (tree
->max_key_len
!= HFSPLUS_EXT_KEYLEN
- sizeof(u16
)) {
70 pr_err("invalid extent max_key_len %d\n",
74 if (tree
->attributes
& HFS_TREE_VARIDXKEYS
) {
75 pr_err("invalid extent btree flag\n");
79 tree
->keycmp
= hfsplus_ext_cmp_key
;
81 case HFSPLUS_CAT_CNID
:
82 if (tree
->max_key_len
!= HFSPLUS_CAT_KEYLEN
- sizeof(u16
)) {
83 pr_err("invalid catalog max_key_len %d\n",
87 if (!(tree
->attributes
& HFS_TREE_VARIDXKEYS
)) {
88 pr_err("invalid catalog btree flag\n");
92 if (test_bit(HFSPLUS_SB_HFSX
, &HFSPLUS_SB(sb
)->flags
) &&
93 (head
->key_type
== HFSPLUS_KEY_BINARY
))
94 tree
->keycmp
= hfsplus_cat_bin_cmp_key
;
96 tree
->keycmp
= hfsplus_cat_case_cmp_key
;
97 set_bit(HFSPLUS_SB_CASEFOLD
, &HFSPLUS_SB(sb
)->flags
);
100 case HFSPLUS_ATTR_CNID
:
101 if (tree
->max_key_len
!= HFSPLUS_ATTR_KEYLEN
- sizeof(u16
)) {
102 pr_err("invalid attributes max_key_len %d\n",
106 tree
->keycmp
= hfsplus_attr_bin_cmp_key
;
109 pr_err("unknown B*Tree requested\n");
113 if (!(tree
->attributes
& HFS_TREE_BIGKEYS
)) {
114 pr_err("invalid btree flag\n");
118 size
= tree
->node_size
;
119 if (!is_power_of_2(size
))
121 if (!tree
->node_count
)
124 tree
->node_size_shift
= ffs(size
) - 1;
126 tree
->pages_per_bnode
=
127 (tree
->node_size
+ PAGE_CACHE_SIZE
- 1) >>
131 page_cache_release(page
);
135 page_cache_release(page
);
137 tree
->inode
->i_mapping
->a_ops
= &hfsplus_aops
;
144 /* Release resources used by a btree */
145 void hfs_btree_close(struct hfs_btree
*tree
)
147 struct hfs_bnode
*node
;
153 for (i
= 0; i
< NODE_HASH_SIZE
; i
++) {
154 while ((node
= tree
->node_hash
[i
])) {
155 tree
->node_hash
[i
] = node
->next_hash
;
156 if (atomic_read(&node
->refcnt
))
157 pr_crit("node %d:%d "
158 "still has %d user(s)!\n",
159 node
->tree
->cnid
, node
->this,
160 atomic_read(&node
->refcnt
));
161 hfs_bnode_free(node
);
162 tree
->node_hash_cnt
--;
169 int hfs_btree_write(struct hfs_btree
*tree
)
171 struct hfs_btree_header_rec
*head
;
172 struct hfs_bnode
*node
;
175 node
= hfs_bnode_find(tree
, 0);
179 /* Load the header */
180 page
= node
->page
[0];
181 head
= (struct hfs_btree_header_rec
*)(kmap(page
) +
182 sizeof(struct hfs_bnode_desc
));
184 head
->root
= cpu_to_be32(tree
->root
);
185 head
->leaf_count
= cpu_to_be32(tree
->leaf_count
);
186 head
->leaf_head
= cpu_to_be32(tree
->leaf_head
);
187 head
->leaf_tail
= cpu_to_be32(tree
->leaf_tail
);
188 head
->node_count
= cpu_to_be32(tree
->node_count
);
189 head
->free_nodes
= cpu_to_be32(tree
->free_nodes
);
190 head
->attributes
= cpu_to_be32(tree
->attributes
);
191 head
->depth
= cpu_to_be16(tree
->depth
);
194 set_page_dirty(page
);
199 static struct hfs_bnode
*hfs_bmap_new_bmap(struct hfs_bnode
*prev
, u32 idx
)
201 struct hfs_btree
*tree
= prev
->tree
;
202 struct hfs_bnode
*node
;
203 struct hfs_bnode_desc desc
;
206 node
= hfs_bnode_create(tree
, idx
);
212 cnid
= cpu_to_be32(idx
);
213 hfs_bnode_write(prev
, &cnid
, offsetof(struct hfs_bnode_desc
, next
), 4);
215 node
->type
= HFS_NODE_MAP
;
217 hfs_bnode_clear(node
, 0, tree
->node_size
);
220 desc
.type
= HFS_NODE_MAP
;
222 desc
.num_recs
= cpu_to_be16(1);
224 hfs_bnode_write(node
, &desc
, 0, sizeof(desc
));
225 hfs_bnode_write_u16(node
, 14, 0x8000);
226 hfs_bnode_write_u16(node
, tree
->node_size
- 2, 14);
227 hfs_bnode_write_u16(node
, tree
->node_size
- 4, tree
->node_size
- 6);
232 struct hfs_bnode
*hfs_bmap_alloc(struct hfs_btree
*tree
)
234 struct hfs_bnode
*node
, *next_node
;
243 while (!tree
->free_nodes
) {
244 struct inode
*inode
= tree
->inode
;
245 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
249 res
= hfsplus_file_extend(inode
);
252 hip
->phys_size
= inode
->i_size
=
253 (loff_t
)hip
->alloc_blocks
<<
254 HFSPLUS_SB(tree
->sb
)->alloc_blksz_shift
;
256 hip
->alloc_blocks
<< HFSPLUS_SB(tree
->sb
)->fs_shift
;
257 inode_set_bytes(inode
, inode
->i_size
);
258 count
= inode
->i_size
>> tree
->node_size_shift
;
259 tree
->free_nodes
= count
- tree
->node_count
;
260 tree
->node_count
= count
;
264 node
= hfs_bnode_find(tree
, nidx
);
267 len
= hfs_brec_lenoff(node
, 2, &off16
);
270 off
+= node
->page_offset
;
271 pagep
= node
->page
+ (off
>> PAGE_CACHE_SHIFT
);
273 off
&= ~PAGE_CACHE_MASK
;
280 for (m
= 0x80, i
= 0; i
< 8; m
>>= 1, i
++) {
284 set_page_dirty(*pagep
);
287 mark_inode_dirty(tree
->inode
);
289 return hfs_bnode_create(tree
,
294 if (++off
>= PAGE_CACHE_SIZE
) {
296 data
= kmap(*++pagep
);
305 hfs_dbg(BNODE_MOD
, "create new bmap node\n");
306 next_node
= hfs_bmap_new_bmap(node
, idx
);
308 next_node
= hfs_bnode_find(tree
, nidx
);
310 if (IS_ERR(next_node
))
314 len
= hfs_brec_lenoff(node
, 0, &off16
);
316 off
+= node
->page_offset
;
317 pagep
= node
->page
+ (off
>> PAGE_CACHE_SHIFT
);
319 off
&= ~PAGE_CACHE_MASK
;
323 void hfs_bmap_free(struct hfs_bnode
*node
)
325 struct hfs_btree
*tree
;
331 hfs_dbg(BNODE_MOD
, "btree_free_node: %u\n", node
->this);
335 node
= hfs_bnode_find(tree
, 0);
338 len
= hfs_brec_lenoff(node
, 2, &off
);
339 while (nidx
>= len
* 8) {
347 pr_crit("unable to free bnode %u. "
352 node
= hfs_bnode_find(tree
, i
);
355 if (node
->type
!= HFS_NODE_MAP
) {
357 pr_crit("invalid bmap found! "
359 node
->this, node
->type
);
363 len
= hfs_brec_lenoff(node
, 0, &off
);
365 off
+= node
->page_offset
+ nidx
/ 8;
366 page
= node
->page
[off
>> PAGE_CACHE_SHIFT
];
368 off
&= ~PAGE_CACHE_MASK
;
369 m
= 1 << (~nidx
& 7);
372 pr_crit("trying to free free bnode "
374 node
->this, node
->type
);
379 data
[off
] = byte
& ~m
;
380 set_page_dirty(page
);
384 mark_inode_dirty(tree
->inode
);