1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/hfsplus/btree.c
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Handle opening/closing btree
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/log2.h>
16 #include "hfsplus_fs.h"
17 #include "hfsplus_raw.h"
20 * Initial source code of clump size calculation is gotten
21 * from http://opensource.apple.com/tarballs/diskdev_cmds/
23 #define CLUMP_ENTRIES 15
25 static short clumptbl
[CLUMP_ENTRIES
* 3] = {
27 * Volume Attributes Catalog Extents
28 * Size Clump (MB) Clump (MB) Clump (MB)
35 * For volumes 16GB and larger, we want to make sure that a full OS
36 * install won't require fragmentation of the Catalog or Attributes
37 * B-trees. We do this by making the clump sizes sufficiently large,
38 * and by leaving a gap after the B-trees for them to grow into.
40 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
42 * Catalog B-tree Header
47 * Attributes B-tree Header
53 * We also want Time Machine backup volumes to have a sufficiently
54 * large clump size to reduce fragmentation.
56 * The series of numbers for Catalog and Attribute form a geometric
57 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
58 * the previous term. For Attributes (16GB to 512GB), each term is
59 * 4**(1/5) times the previous term. For 1TB to 16TB, each term is
60 * 2**(1/5) times the previous term.
64 /* 64GB */ 111, 74, 7,
65 /* 128GB */ 147, 111, 8,
66 /* 256GB */ 194, 169, 9,
67 /* 512GB */ 256, 256, 11,
68 /* 1TB */ 294, 294, 14,
69 /* 2TB */ 338, 338, 16,
70 /* 4TB */ 388, 388, 20,
71 /* 8TB */ 446, 446, 25,
72 /* 16TB */ 512, 512, 32
75 u32
hfsplus_calc_btree_clump_size(u32 block_size
, u32 node_size
,
76 u64 sectors
, int file_id
)
78 u32 mod
= max(node_size
, block_size
);
83 /* Figure out which column of the above table to use for this file. */
85 case HFSPLUS_ATTR_CNID
:
88 case HFSPLUS_CAT_CNID
:
97 * The default clump size is 0.8% of the volume size. And
98 * it must also be a multiple of the node and block size.
100 if (sectors
< 0x200000) {
101 clump_size
= sectors
<< 2; /* 0.8 % */
102 if (clump_size
< (8 * node_size
))
103 clump_size
= 8 * node_size
;
105 /* turn exponent into table index... */
106 for (i
= 0, sectors
= sectors
>> 22;
107 sectors
&& (i
< CLUMP_ENTRIES
- 1);
108 ++i
, sectors
= sectors
>> 1) {
112 clump_size
= clumptbl
[column
+ (i
) * 3] * 1024 * 1024;
116 * Round the clump size to a multiple of node and block size.
117 * NOTE: This rounds down.
123 * Rounding down could have rounded down to 0 if the block size was
124 * greater than the clump size. If so, just use one block or node.
132 /* Get a reference to a B*Tree and do some initial checks */
133 struct hfs_btree
*hfs_btree_open(struct super_block
*sb
, u32 id
)
135 struct hfs_btree
*tree
;
136 struct hfs_btree_header_rec
*head
;
137 struct address_space
*mapping
;
142 tree
= kzalloc(sizeof(*tree
), GFP_KERNEL
);
146 mutex_init(&tree
->tree_lock
);
147 spin_lock_init(&tree
->hash_lock
);
150 inode
= hfsplus_iget(sb
, id
);
155 if (!HFSPLUS_I(tree
->inode
)->first_blocks
) {
156 pr_err("invalid btree extent records (0 size)\n");
160 mapping
= tree
->inode
->i_mapping
;
161 page
= read_mapping_page(mapping
, 0, NULL
);
165 /* Load the header */
166 head
= (struct hfs_btree_header_rec
*)(kmap(page
) +
167 sizeof(struct hfs_bnode_desc
));
168 tree
->root
= be32_to_cpu(head
->root
);
169 tree
->leaf_count
= be32_to_cpu(head
->leaf_count
);
170 tree
->leaf_head
= be32_to_cpu(head
->leaf_head
);
171 tree
->leaf_tail
= be32_to_cpu(head
->leaf_tail
);
172 tree
->node_count
= be32_to_cpu(head
->node_count
);
173 tree
->free_nodes
= be32_to_cpu(head
->free_nodes
);
174 tree
->attributes
= be32_to_cpu(head
->attributes
);
175 tree
->node_size
= be16_to_cpu(head
->node_size
);
176 tree
->max_key_len
= be16_to_cpu(head
->max_key_len
);
177 tree
->depth
= be16_to_cpu(head
->depth
);
179 /* Verify the tree and set the correct compare function */
181 case HFSPLUS_EXT_CNID
:
182 if (tree
->max_key_len
!= HFSPLUS_EXT_KEYLEN
- sizeof(u16
)) {
183 pr_err("invalid extent max_key_len %d\n",
187 if (tree
->attributes
& HFS_TREE_VARIDXKEYS
) {
188 pr_err("invalid extent btree flag\n");
192 tree
->keycmp
= hfsplus_ext_cmp_key
;
194 case HFSPLUS_CAT_CNID
:
195 if (tree
->max_key_len
!= HFSPLUS_CAT_KEYLEN
- sizeof(u16
)) {
196 pr_err("invalid catalog max_key_len %d\n",
200 if (!(tree
->attributes
& HFS_TREE_VARIDXKEYS
)) {
201 pr_err("invalid catalog btree flag\n");
205 if (test_bit(HFSPLUS_SB_HFSX
, &HFSPLUS_SB(sb
)->flags
) &&
206 (head
->key_type
== HFSPLUS_KEY_BINARY
))
207 tree
->keycmp
= hfsplus_cat_bin_cmp_key
;
209 tree
->keycmp
= hfsplus_cat_case_cmp_key
;
210 set_bit(HFSPLUS_SB_CASEFOLD
, &HFSPLUS_SB(sb
)->flags
);
213 case HFSPLUS_ATTR_CNID
:
214 if (tree
->max_key_len
!= HFSPLUS_ATTR_KEYLEN
- sizeof(u16
)) {
215 pr_err("invalid attributes max_key_len %d\n",
219 tree
->keycmp
= hfsplus_attr_bin_cmp_key
;
222 pr_err("unknown B*Tree requested\n");
226 if (!(tree
->attributes
& HFS_TREE_BIGKEYS
)) {
227 pr_err("invalid btree flag\n");
231 size
= tree
->node_size
;
232 if (!is_power_of_2(size
))
234 if (!tree
->node_count
)
237 tree
->node_size_shift
= ffs(size
) - 1;
239 tree
->pages_per_bnode
=
240 (tree
->node_size
+ PAGE_SIZE
- 1) >>
250 tree
->inode
->i_mapping
->a_ops
= &hfsplus_aops
;
257 /* Release resources used by a btree */
258 void hfs_btree_close(struct hfs_btree
*tree
)
260 struct hfs_bnode
*node
;
266 for (i
= 0; i
< NODE_HASH_SIZE
; i
++) {
267 while ((node
= tree
->node_hash
[i
])) {
268 tree
->node_hash
[i
] = node
->next_hash
;
269 if (atomic_read(&node
->refcnt
))
270 pr_crit("node %d:%d "
271 "still has %d user(s)!\n",
272 node
->tree
->cnid
, node
->this,
273 atomic_read(&node
->refcnt
));
274 hfs_bnode_free(node
);
275 tree
->node_hash_cnt
--;
282 int hfs_btree_write(struct hfs_btree
*tree
)
284 struct hfs_btree_header_rec
*head
;
285 struct hfs_bnode
*node
;
288 node
= hfs_bnode_find(tree
, 0);
292 /* Load the header */
293 page
= node
->page
[0];
294 head
= (struct hfs_btree_header_rec
*)(kmap(page
) +
295 sizeof(struct hfs_bnode_desc
));
297 head
->root
= cpu_to_be32(tree
->root
);
298 head
->leaf_count
= cpu_to_be32(tree
->leaf_count
);
299 head
->leaf_head
= cpu_to_be32(tree
->leaf_head
);
300 head
->leaf_tail
= cpu_to_be32(tree
->leaf_tail
);
301 head
->node_count
= cpu_to_be32(tree
->node_count
);
302 head
->free_nodes
= cpu_to_be32(tree
->free_nodes
);
303 head
->attributes
= cpu_to_be32(tree
->attributes
);
304 head
->depth
= cpu_to_be16(tree
->depth
);
307 set_page_dirty(page
);
312 static struct hfs_bnode
*hfs_bmap_new_bmap(struct hfs_bnode
*prev
, u32 idx
)
314 struct hfs_btree
*tree
= prev
->tree
;
315 struct hfs_bnode
*node
;
316 struct hfs_bnode_desc desc
;
319 node
= hfs_bnode_create(tree
, idx
);
325 cnid
= cpu_to_be32(idx
);
326 hfs_bnode_write(prev
, &cnid
, offsetof(struct hfs_bnode_desc
, next
), 4);
328 node
->type
= HFS_NODE_MAP
;
330 hfs_bnode_clear(node
, 0, tree
->node_size
);
333 desc
.type
= HFS_NODE_MAP
;
335 desc
.num_recs
= cpu_to_be16(1);
337 hfs_bnode_write(node
, &desc
, 0, sizeof(desc
));
338 hfs_bnode_write_u16(node
, 14, 0x8000);
339 hfs_bnode_write_u16(node
, tree
->node_size
- 2, 14);
340 hfs_bnode_write_u16(node
, tree
->node_size
- 4, tree
->node_size
- 6);
345 /* Make sure @tree has enough space for the @rsvd_nodes */
346 int hfs_bmap_reserve(struct hfs_btree
*tree
, int rsvd_nodes
)
348 struct inode
*inode
= tree
->inode
;
349 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
356 while (tree
->free_nodes
< rsvd_nodes
) {
357 res
= hfsplus_file_extend(inode
, hfs_bnode_need_zeroout(tree
));
360 hip
->phys_size
= inode
->i_size
=
361 (loff_t
)hip
->alloc_blocks
<<
362 HFSPLUS_SB(tree
->sb
)->alloc_blksz_shift
;
364 hip
->alloc_blocks
<< HFSPLUS_SB(tree
->sb
)->fs_shift
;
365 inode_set_bytes(inode
, inode
->i_size
);
366 count
= inode
->i_size
>> tree
->node_size_shift
;
367 tree
->free_nodes
+= count
- tree
->node_count
;
368 tree
->node_count
= count
;
373 struct hfs_bnode
*hfs_bmap_alloc(struct hfs_btree
*tree
)
375 struct hfs_bnode
*node
, *next_node
;
384 res
= hfs_bmap_reserve(tree
, 1);
389 node
= hfs_bnode_find(tree
, nidx
);
392 len
= hfs_brec_lenoff(node
, 2, &off16
);
395 off
+= node
->page_offset
;
396 pagep
= node
->page
+ (off
>> PAGE_SHIFT
);
405 for (m
= 0x80, i
= 0; i
< 8; m
>>= 1, i
++) {
409 set_page_dirty(*pagep
);
412 mark_inode_dirty(tree
->inode
);
414 return hfs_bnode_create(tree
,
419 if (++off
>= PAGE_SIZE
) {
421 data
= kmap(*++pagep
);
430 hfs_dbg(BNODE_MOD
, "create new bmap node\n");
431 next_node
= hfs_bmap_new_bmap(node
, idx
);
433 next_node
= hfs_bnode_find(tree
, nidx
);
435 if (IS_ERR(next_node
))
439 len
= hfs_brec_lenoff(node
, 0, &off16
);
441 off
+= node
->page_offset
;
442 pagep
= node
->page
+ (off
>> PAGE_SHIFT
);
448 void hfs_bmap_free(struct hfs_bnode
*node
)
450 struct hfs_btree
*tree
;
456 hfs_dbg(BNODE_MOD
, "btree_free_node: %u\n", node
->this);
460 node
= hfs_bnode_find(tree
, 0);
463 len
= hfs_brec_lenoff(node
, 2, &off
);
464 while (nidx
>= len
* 8) {
471 pr_crit("unable to free bnode %u. "
478 node
= hfs_bnode_find(tree
, i
);
481 if (node
->type
!= HFS_NODE_MAP
) {
483 pr_crit("invalid bmap found! "
485 node
->this, node
->type
);
489 len
= hfs_brec_lenoff(node
, 0, &off
);
491 off
+= node
->page_offset
+ nidx
/ 8;
492 page
= node
->page
[off
>> PAGE_SHIFT
];
495 m
= 1 << (~nidx
& 7);
498 pr_crit("trying to free free bnode "
500 node
->this, node
->type
);
505 data
[off
] = byte
& ~m
;
506 set_page_dirty(page
);
510 mark_inode_dirty(tree
->inode
);