1 // SPDX-License-Identifier: GPL-2.0-only
6 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
9 * (C) 1999-2001 Ben Fennema
10 * (C) 1999 Stelias Computing Inc
14 * 02/24/99 blf Created.
20 #include <linux/bitops.h>
21 #include <linux/overflow.h>
26 #define udf_clear_bit __test_and_clear_bit_le
27 #define udf_set_bit __test_and_set_bit_le
28 #define udf_test_bit test_bit_le
29 #define udf_find_next_one_bit find_next_bit_le
31 static int read_block_bitmap(struct super_block
*sb
,
32 struct udf_bitmap
*bitmap
, unsigned int block
,
33 unsigned long bitmap_nr
)
35 struct buffer_head
*bh
= NULL
;
37 int max_bits
, off
, count
;
38 struct kernel_lb_addr loc
;
40 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
41 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
43 bh
= sb_bread(sb
, udf_get_lb_pblock(sb
, &loc
, block
));
44 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
48 /* Check consistency of Space Bitmap buffer. */
49 max_bits
= sb
->s_blocksize
* 8;
51 off
= sizeof(struct spaceBitmapDesc
) << 3;
52 count
= min(max_bits
- off
, bitmap
->s_nr_groups
);
55 * Rough check if bitmap number is too big to have any bitmap
59 (bitmap
->s_nr_groups
>> (sb
->s_blocksize_bits
+ 3)) + 2)
62 count
= bitmap
->s_nr_groups
- bitmap_nr
* max_bits
+
63 (sizeof(struct spaceBitmapDesc
) << 3);
64 count
= min(count
, max_bits
);
67 for (i
= 0; i
< count
; i
++)
68 if (udf_test_bit(i
+ off
, bh
->b_data
)) {
69 bitmap
->s_block_bitmap
[bitmap_nr
] =
70 ERR_PTR(-EFSCORRUPTED
);
77 static int load_block_bitmap(struct super_block
*sb
,
78 struct udf_bitmap
*bitmap
,
79 unsigned int block_group
)
82 int nr_groups
= bitmap
->s_nr_groups
;
84 if (block_group
>= nr_groups
) {
85 udf_debug("block_group (%u) > nr_groups (%d)\n",
86 block_group
, nr_groups
);
89 if (bitmap
->s_block_bitmap
[block_group
]) {
91 * The bitmap failed verification in the past. No point in
94 if (IS_ERR(bitmap
->s_block_bitmap
[block_group
]))
95 return PTR_ERR(bitmap
->s_block_bitmap
[block_group
]);
99 retval
= read_block_bitmap(sb
, bitmap
, block_group
, block_group
);
106 static void udf_add_free_space(struct super_block
*sb
, u16 partition
, u32 cnt
)
108 struct udf_sb_info
*sbi
= UDF_SB(sb
);
109 struct logicalVolIntegrityDesc
*lvid
;
114 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
115 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
116 udf_updated_lvid(sb
);
119 static void udf_bitmap_free_blocks(struct super_block
*sb
,
120 struct udf_bitmap
*bitmap
,
121 struct kernel_lb_addr
*bloc
,
125 struct udf_sb_info
*sbi
= UDF_SB(sb
);
126 struct buffer_head
*bh
= NULL
;
128 unsigned long block_group
;
132 unsigned long overflow
;
134 mutex_lock(&sbi
->s_alloc_mutex
);
135 /* We make sure this cannot overflow when mounting the filesystem */
136 block
= bloc
->logicalBlockNum
+ offset
+
137 (sizeof(struct spaceBitmapDesc
) << 3);
140 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
141 bit
= block
% (sb
->s_blocksize
<< 3);
144 * Check to see if we are freeing blocks across a group boundary.
146 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
147 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
150 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
154 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
155 for (i
= 0; i
< count
; i
++) {
156 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
157 udf_debug("bit %lu already set\n", bit
+ i
);
158 udf_debug("byte=%2x\n",
159 ((__u8
*)bh
->b_data
)[(bit
+ i
) >> 3]);
162 udf_add_free_space(sb
, sbi
->s_partition
, count
);
163 mark_buffer_dirty(bh
);
171 mutex_unlock(&sbi
->s_alloc_mutex
);
174 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
175 struct udf_bitmap
*bitmap
,
176 uint16_t partition
, uint32_t first_block
,
177 uint32_t block_count
)
179 struct udf_sb_info
*sbi
= UDF_SB(sb
);
181 int bit
, block
, block_group
;
183 struct buffer_head
*bh
;
186 mutex_lock(&sbi
->s_alloc_mutex
);
187 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
188 if (first_block
>= part_len
)
191 if (first_block
+ block_count
> part_len
)
192 block_count
= part_len
- first_block
;
195 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
196 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
198 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
201 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
203 bit
= block
% (sb
->s_blocksize
<< 3);
205 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
206 if (!udf_clear_bit(bit
, bh
->b_data
))
213 mark_buffer_dirty(bh
);
214 } while (block_count
> 0);
217 udf_add_free_space(sb
, partition
, -alloc_count
);
218 mutex_unlock(&sbi
->s_alloc_mutex
);
222 static udf_pblk_t
udf_bitmap_new_block(struct super_block
*sb
,
223 struct udf_bitmap
*bitmap
, uint16_t partition
,
224 uint32_t goal
, int *err
)
226 struct udf_sb_info
*sbi
= UDF_SB(sb
);
229 int block_group
, group_start
;
230 int end_goal
, nr_groups
, bitmap_nr
, i
;
231 struct buffer_head
*bh
= NULL
;
233 udf_pblk_t newblock
= 0;
236 mutex_lock(&sbi
->s_alloc_mutex
);
239 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
242 nr_groups
= bitmap
->s_nr_groups
;
243 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
244 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
245 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
247 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
250 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
251 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
252 sb
->s_blocksize
- group_start
);
254 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
255 bit
= block
% (sb
->s_blocksize
<< 3);
256 if (udf_test_bit(bit
, bh
->b_data
))
259 end_goal
= (bit
+ 63) & ~63;
260 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
264 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
265 sb
->s_blocksize
- ((bit
+ 7) >> 3));
266 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
267 if (newbit
< sb
->s_blocksize
<< 3) {
272 newbit
= udf_find_next_one_bit(bh
->b_data
,
273 sb
->s_blocksize
<< 3, bit
);
274 if (newbit
< sb
->s_blocksize
<< 3) {
280 for (i
= 0; i
< (nr_groups
* 2); i
++) {
282 if (block_group
>= nr_groups
)
284 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
286 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
289 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
291 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
292 sb
->s_blocksize
- group_start
);
293 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
294 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
298 bit
= udf_find_next_one_bit(bh
->b_data
,
299 sb
->s_blocksize
<< 3,
301 if (bit
< sb
->s_blocksize
<< 3)
305 if (i
>= (nr_groups
* 2)) {
306 mutex_unlock(&sbi
->s_alloc_mutex
);
309 if (bit
< sb
->s_blocksize
<< 3)
312 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
314 if (bit
>= sb
->s_blocksize
<< 3) {
315 mutex_unlock(&sbi
->s_alloc_mutex
);
321 while (i
< 7 && bit
> (group_start
<< 3) &&
322 udf_test_bit(bit
- 1, bh
->b_data
)) {
328 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
329 (sizeof(struct spaceBitmapDesc
) << 3);
331 if (newblock
>= sbi
->s_partmaps
[partition
].s_partition_len
) {
333 * Ran off the end of the bitmap, and bits following are
334 * non-compliant (not all zero)
336 udf_err(sb
, "bitmap for partition %d corrupted (block %u marked"
337 " as free, partition length is %u)\n", partition
,
338 newblock
, sbi
->s_partmaps
[partition
].s_partition_len
);
342 if (!udf_clear_bit(bit
, bh
->b_data
)) {
343 udf_debug("bit already cleared for block %d\n", bit
);
347 mark_buffer_dirty(bh
);
349 udf_add_free_space(sb
, partition
, -1);
350 mutex_unlock(&sbi
->s_alloc_mutex
);
356 mutex_unlock(&sbi
->s_alloc_mutex
);
360 static void udf_table_free_blocks(struct super_block
*sb
,
362 struct kernel_lb_addr
*bloc
,
366 struct udf_sb_info
*sbi
= UDF_SB(sb
);
369 struct kernel_lb_addr eloc
;
370 struct extent_position oepos
, epos
;
372 struct udf_inode_info
*iinfo
;
374 mutex_lock(&sbi
->s_alloc_mutex
);
375 iinfo
= UDF_I(table
);
376 udf_add_free_space(sb
, sbi
->s_partition
, count
);
378 start
= bloc
->logicalBlockNum
+ offset
;
379 end
= bloc
->logicalBlockNum
+ offset
+ count
- 1;
381 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
383 epos
.block
= oepos
.block
= iinfo
->i_location
;
384 epos
.bh
= oepos
.bh
= NULL
;
387 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
388 if (((eloc
.logicalBlockNum
+
389 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
390 if ((0x3FFFFFFF - elen
) <
391 (count
<< sb
->s_blocksize_bits
)) {
392 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
393 sb
->s_blocksize_bits
);
396 elen
= (etype
<< 30) |
397 (0x40000000 - sb
->s_blocksize
);
399 elen
= (etype
<< 30) |
401 (count
<< sb
->s_blocksize_bits
));
405 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
406 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
407 if ((0x3FFFFFFF - elen
) <
408 (count
<< sb
->s_blocksize_bits
)) {
409 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
410 sb
->s_blocksize_bits
);
413 eloc
.logicalBlockNum
-= tmp
;
414 elen
= (etype
<< 30) |
415 (0x40000000 - sb
->s_blocksize
);
417 eloc
.logicalBlockNum
= start
;
418 elen
= (etype
<< 30) |
420 (count
<< sb
->s_blocksize_bits
));
424 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
427 if (epos
.bh
!= oepos
.bh
) {
428 oepos
.block
= epos
.block
;
434 oepos
.offset
= epos
.offset
;
440 * NOTE: we CANNOT use udf_add_aext here, as it can try to
441 * allocate a new block, and since we hold the super block
442 * lock already very bad things would happen :)
444 * We copy the behavior of udf_add_aext, but instead of
445 * trying to allocate a new block close to the existing one,
446 * we just steal a block from the extent we are trying to add.
448 * It would be nice if the blocks were close together, but it
454 eloc
.logicalBlockNum
= start
;
455 elen
= EXT_RECORDED_ALLOCATED
|
456 (count
<< sb
->s_blocksize_bits
);
458 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
459 adsize
= sizeof(struct short_ad
);
460 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
461 adsize
= sizeof(struct long_ad
);
468 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
469 /* Steal a block from the extent being free'd */
470 udf_setup_indirect_aext(table
, eloc
.logicalBlockNum
,
473 eloc
.logicalBlockNum
++;
474 elen
-= sb
->s_blocksize
;
477 /* It's possible that stealing the block emptied the extent */
479 __udf_add_aext(table
, &epos
, &eloc
, elen
, 1);
486 mutex_unlock(&sbi
->s_alloc_mutex
);
490 static int udf_table_prealloc_blocks(struct super_block
*sb
,
491 struct inode
*table
, uint16_t partition
,
492 uint32_t first_block
, uint32_t block_count
)
494 struct udf_sb_info
*sbi
= UDF_SB(sb
);
496 uint32_t elen
, adsize
;
497 struct kernel_lb_addr eloc
;
498 struct extent_position epos
;
500 struct udf_inode_info
*iinfo
;
502 if (first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
505 iinfo
= UDF_I(table
);
506 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
507 adsize
= sizeof(struct short_ad
);
508 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
509 adsize
= sizeof(struct long_ad
);
513 mutex_lock(&sbi
->s_alloc_mutex
);
514 epos
.offset
= sizeof(struct unallocSpaceEntry
);
515 epos
.block
= iinfo
->i_location
;
517 eloc
.logicalBlockNum
= 0xFFFFFFFF;
519 while (first_block
!= eloc
.logicalBlockNum
&&
520 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
521 udf_debug("eloc=%u, elen=%u, first_block=%u\n",
522 eloc
.logicalBlockNum
, elen
, first_block
);
523 ; /* empty loop body */
526 if (first_block
== eloc
.logicalBlockNum
) {
527 epos
.offset
-= adsize
;
529 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
530 if (alloc_count
> block_count
) {
531 alloc_count
= block_count
;
532 eloc
.logicalBlockNum
+= alloc_count
;
533 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
534 udf_write_aext(table
, &epos
, &eloc
,
535 (etype
<< 30) | elen
, 1);
537 udf_delete_aext(table
, epos
);
545 udf_add_free_space(sb
, partition
, -alloc_count
);
546 mutex_unlock(&sbi
->s_alloc_mutex
);
550 static udf_pblk_t
udf_table_new_block(struct super_block
*sb
,
551 struct inode
*table
, uint16_t partition
,
552 uint32_t goal
, int *err
)
554 struct udf_sb_info
*sbi
= UDF_SB(sb
);
555 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
556 udf_pblk_t newblock
= 0;
558 uint32_t elen
, goal_elen
= 0;
559 struct kernel_lb_addr eloc
, goal_eloc
;
560 struct extent_position epos
, goal_epos
;
562 struct udf_inode_info
*iinfo
= UDF_I(table
);
566 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
567 adsize
= sizeof(struct short_ad
);
568 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
569 adsize
= sizeof(struct long_ad
);
573 mutex_lock(&sbi
->s_alloc_mutex
);
574 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
577 /* We search for the closest matching block to goal. If we find
578 a exact hit, we stop. Otherwise we keep going till we run out
579 of extents. We store the buffer_head, bloc, and extoffset
580 of the current closest match and use that when we are done.
582 epos
.offset
= sizeof(struct unallocSpaceEntry
);
583 epos
.block
= iinfo
->i_location
;
584 epos
.bh
= goal_epos
.bh
= NULL
;
587 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
588 if (goal
>= eloc
.logicalBlockNum
) {
589 if (goal
< eloc
.logicalBlockNum
+
590 (elen
>> sb
->s_blocksize_bits
))
593 nspread
= goal
- eloc
.logicalBlockNum
-
594 (elen
>> sb
->s_blocksize_bits
);
596 nspread
= eloc
.logicalBlockNum
- goal
;
599 if (nspread
< spread
) {
601 if (goal_epos
.bh
!= epos
.bh
) {
602 brelse(goal_epos
.bh
);
603 goal_epos
.bh
= epos
.bh
;
604 get_bh(goal_epos
.bh
);
606 goal_epos
.block
= epos
.block
;
607 goal_epos
.offset
= epos
.offset
- adsize
;
609 goal_elen
= (etype
<< 30) | elen
;
615 if (spread
== 0xFFFFFFFF) {
616 brelse(goal_epos
.bh
);
617 mutex_unlock(&sbi
->s_alloc_mutex
);
621 /* Only allocate blocks from the beginning of the extent.
622 That way, we only delete (empty) extents, never have to insert an
623 extent because of splitting */
624 /* This works, but very poorly.... */
626 newblock
= goal_eloc
.logicalBlockNum
;
627 goal_eloc
.logicalBlockNum
++;
628 goal_elen
-= sb
->s_blocksize
;
631 udf_write_aext(table
, &goal_epos
, &goal_eloc
, goal_elen
, 1);
633 udf_delete_aext(table
, goal_epos
);
634 brelse(goal_epos
.bh
);
636 udf_add_free_space(sb
, partition
, -1);
638 mutex_unlock(&sbi
->s_alloc_mutex
);
643 void udf_free_blocks(struct super_block
*sb
, struct inode
*inode
,
644 struct kernel_lb_addr
*bloc
, uint32_t offset
,
647 uint16_t partition
= bloc
->partitionReferenceNum
;
648 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
651 if (check_add_overflow(bloc
->logicalBlockNum
, offset
, &blk
) ||
652 check_add_overflow(blk
, count
, &blk
) ||
653 bloc
->logicalBlockNum
+ count
> map
->s_partition_len
) {
654 udf_debug("Invalid request to free blocks: (%d, %u), off %u, "
655 "len %u, partition len %u\n",
656 partition
, bloc
->logicalBlockNum
, offset
, count
,
657 map
->s_partition_len
);
661 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
662 udf_bitmap_free_blocks(sb
, map
->s_uspace
.s_bitmap
,
663 bloc
, offset
, count
);
664 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
665 udf_table_free_blocks(sb
, map
->s_uspace
.s_table
,
666 bloc
, offset
, count
);
670 inode_sub_bytes(inode
,
671 ((sector_t
)count
) << sb
->s_blocksize_bits
);
675 inline int udf_prealloc_blocks(struct super_block
*sb
,
677 uint16_t partition
, uint32_t first_block
,
678 uint32_t block_count
)
680 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
683 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
684 allocated
= udf_bitmap_prealloc_blocks(sb
,
685 map
->s_uspace
.s_bitmap
,
686 partition
, first_block
,
688 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
689 allocated
= udf_table_prealloc_blocks(sb
,
690 map
->s_uspace
.s_table
,
691 partition
, first_block
,
696 if (inode
&& allocated
> 0)
697 inode_add_bytes(inode
, allocated
<< sb
->s_blocksize_bits
);
701 inline udf_pblk_t
udf_new_block(struct super_block
*sb
,
703 uint16_t partition
, uint32_t goal
, int *err
)
705 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
708 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
709 block
= udf_bitmap_new_block(sb
,
710 map
->s_uspace
.s_bitmap
,
711 partition
, goal
, err
);
712 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
713 block
= udf_table_new_block(sb
,
714 map
->s_uspace
.s_table
,
715 partition
, goal
, err
);
721 inode_add_bytes(inode
, sb
->s_blocksize
);