1 // SPDX-License-Identifier: GPL-2.0
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Handle opening/closing btree
12 #include <linux/pagemap.h>
13 #include <linux/slab.h>
14 #include <linux/log2.h>
18 /* Get a reference to a B*Tree and do some initial checks */
19 struct hfs_btree
*hfs_btree_open(struct super_block
*sb
, u32 id
, btree_keycmp keycmp
)
21 struct hfs_btree
*tree
;
22 struct hfs_btree_header_rec
*head
;
23 struct address_space
*mapping
;
27 tree
= kzalloc(sizeof(*tree
), GFP_KERNEL
);
31 mutex_init(&tree
->tree_lock
);
32 spin_lock_init(&tree
->hash_lock
);
33 /* Set the correct compare function */
36 tree
->keycmp
= keycmp
;
38 tree
->inode
= iget_locked(sb
, id
);
41 BUG_ON(!(tree
->inode
->i_state
& I_NEW
));
43 struct hfs_mdb
*mdb
= HFS_SB(sb
)->mdb
;
44 HFS_I(tree
->inode
)->flags
= 0;
45 mutex_init(&HFS_I(tree
->inode
)->extents_lock
);
48 hfs_inode_read_fork(tree
->inode
, mdb
->drXTExtRec
, mdb
->drXTFlSize
,
49 mdb
->drXTFlSize
, be32_to_cpu(mdb
->drXTClpSiz
));
50 if (HFS_I(tree
->inode
)->alloc_blocks
>
51 HFS_I(tree
->inode
)->first_blocks
) {
52 pr_err("invalid btree extent records\n");
53 unlock_new_inode(tree
->inode
);
57 tree
->inode
->i_mapping
->a_ops
= &hfs_btree_aops
;
60 hfs_inode_read_fork(tree
->inode
, mdb
->drCTExtRec
, mdb
->drCTFlSize
,
61 mdb
->drCTFlSize
, be32_to_cpu(mdb
->drCTClpSiz
));
63 if (!HFS_I(tree
->inode
)->first_blocks
) {
64 pr_err("invalid btree extent records (0 size)\n");
65 unlock_new_inode(tree
->inode
);
69 tree
->inode
->i_mapping
->a_ops
= &hfs_btree_aops
;
75 unlock_new_inode(tree
->inode
);
77 mapping
= tree
->inode
->i_mapping
;
78 page
= read_mapping_page(mapping
, 0, NULL
);
83 head
= (struct hfs_btree_header_rec
*)(kmap_local_page(page
) +
84 sizeof(struct hfs_bnode_desc
));
85 tree
->root
= be32_to_cpu(head
->root
);
86 tree
->leaf_count
= be32_to_cpu(head
->leaf_count
);
87 tree
->leaf_head
= be32_to_cpu(head
->leaf_head
);
88 tree
->leaf_tail
= be32_to_cpu(head
->leaf_tail
);
89 tree
->node_count
= be32_to_cpu(head
->node_count
);
90 tree
->free_nodes
= be32_to_cpu(head
->free_nodes
);
91 tree
->attributes
= be32_to_cpu(head
->attributes
);
92 tree
->node_size
= be16_to_cpu(head
->node_size
);
93 tree
->max_key_len
= be16_to_cpu(head
->max_key_len
);
94 tree
->depth
= be16_to_cpu(head
->depth
);
96 size
= tree
->node_size
;
97 if (!is_power_of_2(size
))
99 if (!tree
->node_count
)
103 if (tree
->max_key_len
!= HFS_MAX_EXT_KEYLEN
) {
104 pr_err("invalid extent max_key_len %d\n",
110 if (tree
->max_key_len
!= HFS_MAX_CAT_KEYLEN
) {
111 pr_err("invalid catalog max_key_len %d\n",
120 tree
->node_size_shift
= ffs(size
) - 1;
121 tree
->pages_per_bnode
= (tree
->node_size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
131 tree
->inode
->i_mapping
->a_ops
= &hfs_aops
;
138 /* Release resources used by a btree */
139 void hfs_btree_close(struct hfs_btree
*tree
)
141 struct hfs_bnode
*node
;
147 for (i
= 0; i
< NODE_HASH_SIZE
; i
++) {
148 while ((node
= tree
->node_hash
[i
])) {
149 tree
->node_hash
[i
] = node
->next_hash
;
150 if (atomic_read(&node
->refcnt
))
151 pr_err("node %d:%d still has %d user(s)!\n",
152 node
->tree
->cnid
, node
->this,
153 atomic_read(&node
->refcnt
));
154 hfs_bnode_free(node
);
155 tree
->node_hash_cnt
--;
162 void hfs_btree_write(struct hfs_btree
*tree
)
164 struct hfs_btree_header_rec
*head
;
165 struct hfs_bnode
*node
;
168 node
= hfs_bnode_find(tree
, 0);
172 /* Load the header */
173 page
= node
->page
[0];
174 head
= (struct hfs_btree_header_rec
*)(kmap_local_page(page
) +
175 sizeof(struct hfs_bnode_desc
));
177 head
->root
= cpu_to_be32(tree
->root
);
178 head
->leaf_count
= cpu_to_be32(tree
->leaf_count
);
179 head
->leaf_head
= cpu_to_be32(tree
->leaf_head
);
180 head
->leaf_tail
= cpu_to_be32(tree
->leaf_tail
);
181 head
->node_count
= cpu_to_be32(tree
->node_count
);
182 head
->free_nodes
= cpu_to_be32(tree
->free_nodes
);
183 head
->attributes
= cpu_to_be32(tree
->attributes
);
184 head
->depth
= cpu_to_be16(tree
->depth
);
187 set_page_dirty(page
);
191 static struct hfs_bnode
*hfs_bmap_new_bmap(struct hfs_bnode
*prev
, u32 idx
)
193 struct hfs_btree
*tree
= prev
->tree
;
194 struct hfs_bnode
*node
;
195 struct hfs_bnode_desc desc
;
198 node
= hfs_bnode_create(tree
, idx
);
202 if (!tree
->free_nodes
)
206 cnid
= cpu_to_be32(idx
);
207 hfs_bnode_write(prev
, &cnid
, offsetof(struct hfs_bnode_desc
, next
), 4);
209 node
->type
= HFS_NODE_MAP
;
211 hfs_bnode_clear(node
, 0, tree
->node_size
);
214 desc
.type
= HFS_NODE_MAP
;
216 desc
.num_recs
= cpu_to_be16(1);
218 hfs_bnode_write(node
, &desc
, 0, sizeof(desc
));
219 hfs_bnode_write_u16(node
, 14, 0x8000);
220 hfs_bnode_write_u16(node
, tree
->node_size
- 2, 14);
221 hfs_bnode_write_u16(node
, tree
->node_size
- 4, tree
->node_size
- 6);
226 /* Make sure @tree has enough space for the @rsvd_nodes */
227 int hfs_bmap_reserve(struct hfs_btree
*tree
, int rsvd_nodes
)
229 struct inode
*inode
= tree
->inode
;
233 while (tree
->free_nodes
< rsvd_nodes
) {
234 res
= hfs_extend_file(inode
);
237 HFS_I(inode
)->phys_size
= inode
->i_size
=
238 (loff_t
)HFS_I(inode
)->alloc_blocks
*
239 HFS_SB(tree
->sb
)->alloc_blksz
;
240 HFS_I(inode
)->fs_blocks
= inode
->i_size
>>
241 tree
->sb
->s_blocksize_bits
;
242 inode_set_bytes(inode
, inode
->i_size
);
243 count
= inode
->i_size
>> tree
->node_size_shift
;
244 tree
->free_nodes
+= count
- tree
->node_count
;
245 tree
->node_count
= count
;
250 struct hfs_bnode
*hfs_bmap_alloc(struct hfs_btree
*tree
)
252 struct hfs_bnode
*node
, *next_node
;
261 res
= hfs_bmap_reserve(tree
, 1);
266 node
= hfs_bnode_find(tree
, nidx
);
269 len
= hfs_brec_lenoff(node
, 2, &off16
);
272 off
+= node
->page_offset
;
273 pagep
= node
->page
+ (off
>> PAGE_SHIFT
);
274 data
= kmap_local_page(*pagep
);
282 for (m
= 0x80, i
= 0; i
< 8; m
>>= 1, i
++) {
286 set_page_dirty(*pagep
);
289 mark_inode_dirty(tree
->inode
);
291 return hfs_bnode_create(tree
, idx
);
295 if (++off
>= PAGE_SIZE
) {
297 data
= kmap_local_page(*++pagep
);
306 printk(KERN_DEBUG
"create new bmap node...\n");
307 next_node
= hfs_bmap_new_bmap(node
, idx
);
309 next_node
= hfs_bnode_find(tree
, nidx
);
311 if (IS_ERR(next_node
))
315 len
= hfs_brec_lenoff(node
, 0, &off16
);
317 off
+= node
->page_offset
;
318 pagep
= node
->page
+ (off
>> PAGE_SHIFT
);
319 data
= kmap_local_page(*pagep
);
324 void hfs_bmap_free(struct hfs_bnode
*node
)
326 struct hfs_btree
*tree
;
332 hfs_dbg(BNODE_MOD
, "btree_free_node: %u\n", node
->this);
335 node
= hfs_bnode_find(tree
, 0);
338 len
= hfs_brec_lenoff(node
, 2, &off
);
339 while (nidx
>= len
* 8) {
346 pr_crit("unable to free bnode %u. bmap not found!\n",
352 node
= hfs_bnode_find(tree
, i
);
355 if (node
->type
!= HFS_NODE_MAP
) {
357 pr_crit("invalid bmap found! (%u,%d)\n",
358 node
->this, node
->type
);
362 len
= hfs_brec_lenoff(node
, 0, &off
);
364 off
+= node
->page_offset
+ nidx
/ 8;
365 page
= node
->page
[off
>> PAGE_SHIFT
];
366 data
= kmap_local_page(page
);
368 m
= 1 << (~nidx
& 7);
371 pr_crit("trying to free free bnode %u(%d)\n",
372 node
->this, node
->type
);
377 data
[off
] = byte
& ~m
;
378 set_page_dirty(page
);
382 mark_inode_dirty(tree
->inode
);