1 // SPDX-License-Identifier: GPL-2.0
3 * linux/fs/hfsplus/extents.c
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
9 * Handling of Extents both in catalog and extents overflow trees
12 #include <linux/errno.h>
14 #include <linux/pagemap.h>
16 #include "hfsplus_fs.h"
17 #include "hfsplus_raw.h"
19 /* Compare two extents keys, returns 0 on same, pos/neg for difference */
20 int hfsplus_ext_cmp_key(const hfsplus_btree_key
*k1
,
21 const hfsplus_btree_key
*k2
)
29 return be32_to_cpu(k1id
) < be32_to_cpu(k2id
) ? -1 : 1;
31 if (k1
->ext
.fork_type
!= k2
->ext
.fork_type
)
32 return k1
->ext
.fork_type
< k2
->ext
.fork_type
? -1 : 1;
34 k1s
= k1
->ext
.start_block
;
35 k2s
= k2
->ext
.start_block
;
38 return be32_to_cpu(k1s
) < be32_to_cpu(k2s
) ? -1 : 1;
41 static void hfsplus_ext_build_key(hfsplus_btree_key
*key
, u32 cnid
,
44 key
->key_len
= cpu_to_be16(HFSPLUS_EXT_KEYLEN
- 2);
45 key
->ext
.cnid
= cpu_to_be32(cnid
);
46 key
->ext
.start_block
= cpu_to_be32(block
);
47 key
->ext
.fork_type
= type
;
51 static u32
hfsplus_ext_find_block(struct hfsplus_extent
*ext
, u32 off
)
56 for (i
= 0; i
< 8; ext
++, i
++) {
57 count
= be32_to_cpu(ext
->block_count
);
59 return be32_to_cpu(ext
->start_block
) + off
;
66 static int hfsplus_ext_block_count(struct hfsplus_extent
*ext
)
71 for (i
= 0; i
< 8; ext
++, i
++)
72 count
+= be32_to_cpu(ext
->block_count
);
76 static u32
hfsplus_ext_lastblock(struct hfsplus_extent
*ext
)
81 for (i
= 0; i
< 7; ext
--, i
++)
84 return be32_to_cpu(ext
->start_block
) + be32_to_cpu(ext
->block_count
);
87 static int __hfsplus_ext_write_extent(struct inode
*inode
,
88 struct hfs_find_data
*fd
)
90 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
93 WARN_ON(!mutex_is_locked(&hip
->extents_lock
));
95 hfsplus_ext_build_key(fd
->search_key
, inode
->i_ino
, hip
->cached_start
,
96 HFSPLUS_IS_RSRC(inode
) ?
97 HFSPLUS_TYPE_RSRC
: HFSPLUS_TYPE_DATA
);
99 res
= hfs_brec_find(fd
, hfs_find_rec_by_key
);
100 if (hip
->extent_state
& HFSPLUS_EXT_NEW
) {
103 /* Fail early and avoid ENOSPC during the btree operation */
104 res
= hfs_bmap_reserve(fd
->tree
, fd
->tree
->depth
+ 1);
107 hfs_brec_insert(fd
, hip
->cached_extents
,
108 sizeof(hfsplus_extent_rec
));
109 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
113 hfs_bnode_write(fd
->bnode
, hip
->cached_extents
,
114 fd
->entryoffset
, fd
->entrylength
);
115 hip
->extent_state
&= ~HFSPLUS_EXT_DIRTY
;
119 * We can't just use hfsplus_mark_inode_dirty here, because we
120 * also get called from hfsplus_write_inode, which should not
121 * redirty the inode. Instead the callers have to be careful
122 * to explicily mark the inode dirty, too.
124 set_bit(HFSPLUS_I_EXT_DIRTY
, &hip
->flags
);
129 static int hfsplus_ext_write_extent_locked(struct inode
*inode
)
133 if (HFSPLUS_I(inode
)->extent_state
& HFSPLUS_EXT_DIRTY
) {
134 struct hfs_find_data fd
;
136 res
= hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->ext_tree
, &fd
);
139 res
= __hfsplus_ext_write_extent(inode
, &fd
);
145 int hfsplus_ext_write_extent(struct inode
*inode
)
149 mutex_lock(&HFSPLUS_I(inode
)->extents_lock
);
150 res
= hfsplus_ext_write_extent_locked(inode
);
151 mutex_unlock(&HFSPLUS_I(inode
)->extents_lock
);
156 static inline int __hfsplus_ext_read_extent(struct hfs_find_data
*fd
,
157 struct hfsplus_extent
*extent
,
158 u32 cnid
, u32 block
, u8 type
)
162 hfsplus_ext_build_key(fd
->search_key
, cnid
, block
, type
);
163 fd
->key
->ext
.cnid
= 0;
164 res
= hfs_brec_find(fd
, hfs_find_rec_by_key
);
165 if (res
&& res
!= -ENOENT
)
167 if (fd
->key
->ext
.cnid
!= fd
->search_key
->ext
.cnid
||
168 fd
->key
->ext
.fork_type
!= fd
->search_key
->ext
.fork_type
)
170 if (fd
->entrylength
!= sizeof(hfsplus_extent_rec
))
172 hfs_bnode_read(fd
->bnode
, extent
, fd
->entryoffset
,
173 sizeof(hfsplus_extent_rec
));
177 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data
*fd
,
178 struct inode
*inode
, u32 block
)
180 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
183 WARN_ON(!mutex_is_locked(&hip
->extents_lock
));
185 if (hip
->extent_state
& HFSPLUS_EXT_DIRTY
) {
186 res
= __hfsplus_ext_write_extent(inode
, fd
);
191 res
= __hfsplus_ext_read_extent(fd
, hip
->cached_extents
, inode
->i_ino
,
192 block
, HFSPLUS_IS_RSRC(inode
) ?
196 hip
->cached_start
= be32_to_cpu(fd
->key
->ext
.start_block
);
198 hfsplus_ext_block_count(hip
->cached_extents
);
200 hip
->cached_start
= hip
->cached_blocks
= 0;
201 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
206 static int hfsplus_ext_read_extent(struct inode
*inode
, u32 block
)
208 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
209 struct hfs_find_data fd
;
212 if (block
>= hip
->cached_start
&&
213 block
< hip
->cached_start
+ hip
->cached_blocks
)
216 res
= hfs_find_init(HFSPLUS_SB(inode
->i_sb
)->ext_tree
, &fd
);
218 res
= __hfsplus_ext_cache_extent(&fd
, inode
, block
);
224 /* Get a block at iblock for inode, possibly allocating if create */
225 int hfsplus_get_block(struct inode
*inode
, sector_t iblock
,
226 struct buffer_head
*bh_result
, int create
)
228 struct super_block
*sb
= inode
->i_sb
;
229 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
230 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
232 u32 ablock
, dblock
, mask
;
236 /* Convert inode block to disk allocation block */
237 ablock
= iblock
>> sbi
->fs_shift
;
239 if (iblock
>= hip
->fs_blocks
) {
242 if (iblock
> hip
->fs_blocks
)
244 if (ablock
>= hip
->alloc_blocks
) {
245 res
= hfsplus_file_extend(inode
, false);
252 if (ablock
< hip
->first_blocks
) {
253 dblock
= hfsplus_ext_find_block(hip
->first_extents
, ablock
);
257 if (inode
->i_ino
== HFSPLUS_EXT_CNID
)
260 mutex_lock(&hip
->extents_lock
);
263 * hfsplus_ext_read_extent will write out a cached extent into
264 * the extents btree. In that case we may have to mark the inode
265 * dirty even for a pure read of an extent here.
267 was_dirty
= (hip
->extent_state
& HFSPLUS_EXT_DIRTY
);
268 res
= hfsplus_ext_read_extent(inode
, ablock
);
270 mutex_unlock(&hip
->extents_lock
);
273 dblock
= hfsplus_ext_find_block(hip
->cached_extents
,
274 ablock
- hip
->cached_start
);
275 mutex_unlock(&hip
->extents_lock
);
278 hfs_dbg(EXTENT
, "get_block(%lu): %llu - %u\n",
279 inode
->i_ino
, (long long)iblock
, dblock
);
281 mask
= (1 << sbi
->fs_shift
) - 1;
282 sector
= ((sector_t
)dblock
<< sbi
->fs_shift
) +
283 sbi
->blockoffset
+ (iblock
& mask
);
284 map_bh(bh_result
, sb
, sector
);
287 set_buffer_new(bh_result
);
288 hip
->phys_size
+= sb
->s_blocksize
;
290 inode_add_bytes(inode
, sb
->s_blocksize
);
292 if (create
|| was_dirty
)
293 mark_inode_dirty(inode
);
297 static void hfsplus_dump_extent(struct hfsplus_extent
*extent
)
301 hfs_dbg(EXTENT
, " ");
302 for (i
= 0; i
< 8; i
++)
303 hfs_dbg_cont(EXTENT
, " %u:%u",
304 be32_to_cpu(extent
[i
].start_block
),
305 be32_to_cpu(extent
[i
].block_count
));
306 hfs_dbg_cont(EXTENT
, "\n");
309 static int hfsplus_add_extent(struct hfsplus_extent
*extent
, u32 offset
,
310 u32 alloc_block
, u32 block_count
)
315 hfsplus_dump_extent(extent
);
316 for (i
= 0; i
< 8; extent
++, i
++) {
317 count
= be32_to_cpu(extent
->block_count
);
318 if (offset
== count
) {
319 start
= be32_to_cpu(extent
->start_block
);
320 if (alloc_block
!= start
+ count
) {
324 extent
->start_block
= cpu_to_be32(alloc_block
);
326 block_count
+= count
;
327 extent
->block_count
= cpu_to_be32(block_count
);
329 } else if (offset
< count
)
337 static int hfsplus_free_extents(struct super_block
*sb
,
338 struct hfsplus_extent
*extent
,
339 u32 offset
, u32 block_nr
)
345 /* Mapping the allocation file may lock the extent tree */
346 WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb
)->ext_tree
->tree_lock
));
348 hfsplus_dump_extent(extent
);
349 for (i
= 0; i
< 8; extent
++, i
++) {
350 count
= be32_to_cpu(extent
->block_count
);
353 else if (offset
< count
)
361 start
= be32_to_cpu(extent
->start_block
);
362 if (count
<= block_nr
) {
363 err
= hfsplus_block_free(sb
, start
, count
);
365 pr_err("can't free extent\n");
366 hfs_dbg(EXTENT
, " start: %u count: %u\n",
369 extent
->block_count
= 0;
370 extent
->start_block
= 0;
374 err
= hfsplus_block_free(sb
, start
+ count
, block_nr
);
376 pr_err("can't free extent\n");
377 hfs_dbg(EXTENT
, " start: %u count: %u\n",
380 extent
->block_count
= cpu_to_be32(count
);
383 if (!block_nr
|| !i
) {
385 * Try to free all extents and
386 * return only last error
392 count
= be32_to_cpu(extent
->block_count
);
396 int hfsplus_free_fork(struct super_block
*sb
, u32 cnid
,
397 struct hfsplus_fork_raw
*fork
, int type
)
399 struct hfs_find_data fd
;
400 hfsplus_extent_rec ext_entry
;
401 u32 total_blocks
, blocks
, start
;
404 total_blocks
= be32_to_cpu(fork
->total_blocks
);
409 for (i
= 0; i
< 8; i
++)
410 blocks
+= be32_to_cpu(fork
->extents
[i
].block_count
);
412 res
= hfsplus_free_extents(sb
, fork
->extents
, blocks
, blocks
);
415 if (total_blocks
== blocks
)
418 res
= hfs_find_init(HFSPLUS_SB(sb
)->ext_tree
, &fd
);
422 res
= __hfsplus_ext_read_extent(&fd
, ext_entry
, cnid
,
426 start
= be32_to_cpu(fd
.key
->ext
.start_block
);
427 hfs_brec_remove(&fd
);
429 mutex_unlock(&fd
.tree
->tree_lock
);
430 hfsplus_free_extents(sb
, ext_entry
, total_blocks
- start
,
432 total_blocks
= start
;
433 mutex_lock(&fd
.tree
->tree_lock
);
434 } while (total_blocks
> blocks
);
440 int hfsplus_file_extend(struct inode
*inode
, bool zeroout
)
442 struct super_block
*sb
= inode
->i_sb
;
443 struct hfsplus_sb_info
*sbi
= HFSPLUS_SB(sb
);
444 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
445 u32 start
, len
, goal
;
448 if (sbi
->alloc_file
->i_size
* 8 <
449 sbi
->total_blocks
- sbi
->free_blocks
+ 8) {
450 /* extend alloc file */
451 pr_err("extend alloc file! (%llu,%u,%u)\n",
452 sbi
->alloc_file
->i_size
* 8,
453 sbi
->total_blocks
, sbi
->free_blocks
);
457 mutex_lock(&hip
->extents_lock
);
458 if (hip
->alloc_blocks
== hip
->first_blocks
)
459 goal
= hfsplus_ext_lastblock(hip
->first_extents
);
461 res
= hfsplus_ext_read_extent(inode
, hip
->alloc_blocks
);
464 goal
= hfsplus_ext_lastblock(hip
->cached_extents
);
467 len
= hip
->clump_blocks
;
468 start
= hfsplus_block_allocate(sb
, sbi
->total_blocks
, goal
, &len
);
469 if (start
>= sbi
->total_blocks
) {
470 start
= hfsplus_block_allocate(sb
, goal
, 0, &len
);
478 res
= sb_issue_zeroout(sb
, start
, len
, GFP_NOFS
);
483 hfs_dbg(EXTENT
, "extend %lu: %u,%u\n", inode
->i_ino
, start
, len
);
485 if (hip
->alloc_blocks
<= hip
->first_blocks
) {
486 if (!hip
->first_blocks
) {
487 hfs_dbg(EXTENT
, "first extents\n");
489 hip
->first_extents
[0].start_block
= cpu_to_be32(start
);
490 hip
->first_extents
[0].block_count
= cpu_to_be32(len
);
493 /* try to append to extents in inode */
494 res
= hfsplus_add_extent(hip
->first_extents
,
501 hfsplus_dump_extent(hip
->first_extents
);
502 hip
->first_blocks
+= len
;
505 res
= hfsplus_add_extent(hip
->cached_extents
,
506 hip
->alloc_blocks
- hip
->cached_start
,
509 hfsplus_dump_extent(hip
->cached_extents
);
510 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
;
511 hip
->cached_blocks
+= len
;
512 } else if (res
== -ENOSPC
)
517 hip
->alloc_blocks
+= len
;
518 mutex_unlock(&hip
->extents_lock
);
519 hfsplus_mark_inode_dirty(inode
, HFSPLUS_I_ALLOC_DIRTY
);
522 mutex_unlock(&hip
->extents_lock
);
526 hfs_dbg(EXTENT
, "insert new extent\n");
527 res
= hfsplus_ext_write_extent_locked(inode
);
531 memset(hip
->cached_extents
, 0, sizeof(hfsplus_extent_rec
));
532 hip
->cached_extents
[0].start_block
= cpu_to_be32(start
);
533 hip
->cached_extents
[0].block_count
= cpu_to_be32(len
);
534 hfsplus_dump_extent(hip
->cached_extents
);
535 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
;
536 hip
->cached_start
= hip
->alloc_blocks
;
537 hip
->cached_blocks
= len
;
543 void hfsplus_file_truncate(struct inode
*inode
)
545 struct super_block
*sb
= inode
->i_sb
;
546 struct hfsplus_inode_info
*hip
= HFSPLUS_I(inode
);
547 struct hfs_find_data fd
;
548 u32 alloc_cnt
, blk_cnt
, start
;
551 hfs_dbg(INODE
, "truncate: %lu, %llu -> %llu\n",
552 inode
->i_ino
, (long long)hip
->phys_size
, inode
->i_size
);
554 if (inode
->i_size
> hip
->phys_size
) {
555 struct address_space
*mapping
= inode
->i_mapping
;
558 loff_t size
= inode
->i_size
;
560 res
= pagecache_write_begin(NULL
, mapping
, size
, 0, 0,
564 res
= pagecache_write_end(NULL
, mapping
, size
,
568 mark_inode_dirty(inode
);
570 } else if (inode
->i_size
== hip
->phys_size
)
573 blk_cnt
= (inode
->i_size
+ HFSPLUS_SB(sb
)->alloc_blksz
- 1) >>
574 HFSPLUS_SB(sb
)->alloc_blksz_shift
;
576 mutex_lock(&hip
->extents_lock
);
578 alloc_cnt
= hip
->alloc_blocks
;
579 if (blk_cnt
== alloc_cnt
)
582 res
= hfs_find_init(HFSPLUS_SB(sb
)->ext_tree
, &fd
);
584 mutex_unlock(&hip
->extents_lock
);
585 /* XXX: We lack error handling of hfsplus_file_truncate() */
589 if (alloc_cnt
== hip
->first_blocks
) {
590 mutex_unlock(&fd
.tree
->tree_lock
);
591 hfsplus_free_extents(sb
, hip
->first_extents
,
592 alloc_cnt
, alloc_cnt
- blk_cnt
);
593 hfsplus_dump_extent(hip
->first_extents
);
594 hip
->first_blocks
= blk_cnt
;
595 mutex_lock(&fd
.tree
->tree_lock
);
598 res
= __hfsplus_ext_cache_extent(&fd
, inode
, alloc_cnt
);
601 hfs_brec_remove(&fd
);
603 mutex_unlock(&fd
.tree
->tree_lock
);
604 start
= hip
->cached_start
;
605 hfsplus_free_extents(sb
, hip
->cached_extents
,
606 alloc_cnt
- start
, alloc_cnt
- blk_cnt
);
607 hfsplus_dump_extent(hip
->cached_extents
);
608 if (blk_cnt
> start
) {
609 hip
->extent_state
|= HFSPLUS_EXT_DIRTY
;
613 hip
->cached_start
= hip
->cached_blocks
= 0;
614 hip
->extent_state
&= ~(HFSPLUS_EXT_DIRTY
| HFSPLUS_EXT_NEW
);
615 mutex_lock(&fd
.tree
->tree_lock
);
619 hip
->alloc_blocks
= blk_cnt
;
621 mutex_unlock(&hip
->extents_lock
);
622 hip
->phys_size
= inode
->i_size
;
623 hip
->fs_blocks
= (inode
->i_size
+ sb
->s_blocksize
- 1) >>
624 sb
->s_blocksize_bits
;
625 inode_set_bytes(inode
, hip
->fs_blocks
<< sb
->s_blocksize_bits
);
626 hfsplus_mark_inode_dirty(inode
, HFSPLUS_I_ALLOC_DIRTY
);