5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/bitops.h>
29 #define udf_clear_bit __test_and_clear_bit_le
30 #define udf_set_bit __test_and_set_bit_le
31 #define udf_test_bit test_bit_le
32 #define udf_find_next_one_bit find_next_bit_le
34 static int read_block_bitmap(struct super_block
*sb
,
35 struct udf_bitmap
*bitmap
, unsigned int block
,
36 unsigned long bitmap_nr
)
38 struct buffer_head
*bh
= NULL
;
40 struct kernel_lb_addr loc
;
42 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
43 loc
.partitionReferenceNum
= UDF_SB(sb
)->s_partition
;
45 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, &loc
, block
));
49 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
53 static int __load_block_bitmap(struct super_block
*sb
,
54 struct udf_bitmap
*bitmap
,
55 unsigned int block_group
)
58 int nr_groups
= bitmap
->s_nr_groups
;
60 if (block_group
>= nr_groups
) {
61 udf_debug("block_group (%d) > nr_groups (%d)\n",
62 block_group
, nr_groups
);
65 if (bitmap
->s_block_bitmap
[block_group
])
68 retval
= read_block_bitmap(sb
, bitmap
, block_group
, block_group
);
75 static inline int load_block_bitmap(struct super_block
*sb
,
76 struct udf_bitmap
*bitmap
,
77 unsigned int block_group
)
81 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
86 if (!bitmap
->s_block_bitmap
[slot
])
92 static void udf_add_free_space(struct super_block
*sb
, u16 partition
, u32 cnt
)
94 struct udf_sb_info
*sbi
= UDF_SB(sb
);
95 struct logicalVolIntegrityDesc
*lvid
;
100 lvid
= (struct logicalVolIntegrityDesc
*)sbi
->s_lvid_bh
->b_data
;
101 le32_add_cpu(&lvid
->freeSpaceTable
[partition
], cnt
);
102 udf_updated_lvid(sb
);
105 static void udf_bitmap_free_blocks(struct super_block
*sb
,
106 struct udf_bitmap
*bitmap
,
107 struct kernel_lb_addr
*bloc
,
111 struct udf_sb_info
*sbi
= UDF_SB(sb
);
112 struct buffer_head
*bh
= NULL
;
113 struct udf_part_map
*partmap
;
115 unsigned long block_group
;
119 unsigned long overflow
;
121 mutex_lock(&sbi
->s_alloc_mutex
);
122 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
123 if (bloc
->logicalBlockNum
+ count
< count
||
124 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
125 udf_debug("%d < %d || %d + %d > %d\n",
126 bloc
->logicalBlockNum
, 0,
127 bloc
->logicalBlockNum
, count
,
128 partmap
->s_partition_len
);
132 block
= bloc
->logicalBlockNum
+ offset
+
133 (sizeof(struct spaceBitmapDesc
) << 3);
137 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
138 bit
= block
% (sb
->s_blocksize
<< 3);
141 * Check to see if we are freeing blocks across a group boundary.
143 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
144 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
147 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
151 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
152 for (i
= 0; i
< count
; i
++) {
153 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
154 udf_debug("bit %ld already set\n", bit
+ i
);
155 udf_debug("byte=%2x\n",
156 ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
159 udf_add_free_space(sb
, sbi
->s_partition
, count
);
160 mark_buffer_dirty(bh
);
168 mutex_unlock(&sbi
->s_alloc_mutex
);
171 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
172 struct udf_bitmap
*bitmap
,
173 uint16_t partition
, uint32_t first_block
,
174 uint32_t block_count
)
176 struct udf_sb_info
*sbi
= UDF_SB(sb
);
178 int bit
, block
, block_group
, group_start
;
179 int nr_groups
, bitmap_nr
;
180 struct buffer_head
*bh
;
183 mutex_lock(&sbi
->s_alloc_mutex
);
184 part_len
= sbi
->s_partmaps
[partition
].s_partition_len
;
185 if (first_block
>= part_len
)
188 if (first_block
+ block_count
> part_len
)
189 block_count
= part_len
- first_block
;
192 nr_groups
= udf_compute_nr_groups(sb
, partition
);
193 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
194 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
195 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
197 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
200 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
202 bit
= block
% (sb
->s_blocksize
<< 3);
204 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
205 if (!udf_clear_bit(bit
, bh
->b_data
))
212 mark_buffer_dirty(bh
);
213 } while (block_count
> 0);
216 udf_add_free_space(sb
, partition
, -alloc_count
);
217 mutex_unlock(&sbi
->s_alloc_mutex
);
221 static int udf_bitmap_new_block(struct super_block
*sb
,
222 struct udf_bitmap
*bitmap
, uint16_t partition
,
223 uint32_t goal
, int *err
)
225 struct udf_sb_info
*sbi
= UDF_SB(sb
);
226 int newbit
, bit
= 0, block
, block_group
, group_start
;
227 int end_goal
, nr_groups
, bitmap_nr
, i
;
228 struct buffer_head
*bh
= NULL
;
233 mutex_lock(&sbi
->s_alloc_mutex
);
236 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
239 nr_groups
= bitmap
->s_nr_groups
;
240 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
241 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
242 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
244 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
247 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
248 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
249 sb
->s_blocksize
- group_start
);
251 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
252 bit
= block
% (sb
->s_blocksize
<< 3);
253 if (udf_test_bit(bit
, bh
->b_data
))
256 end_goal
= (bit
+ 63) & ~63;
257 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
261 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF,
262 sb
->s_blocksize
- ((bit
+ 7) >> 3));
263 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
264 if (newbit
< sb
->s_blocksize
<< 3) {
269 newbit
= udf_find_next_one_bit(bh
->b_data
,
270 sb
->s_blocksize
<< 3, bit
);
271 if (newbit
< sb
->s_blocksize
<< 3) {
277 for (i
= 0; i
< (nr_groups
* 2); i
++) {
279 if (block_group
>= nr_groups
)
281 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
283 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
286 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
288 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
289 sb
->s_blocksize
- group_start
);
290 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
291 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
295 bit
= udf_find_next_one_bit(bh
->b_data
,
296 sb
->s_blocksize
<< 3,
298 if (bit
< sb
->s_blocksize
<< 3)
302 if (i
>= (nr_groups
* 2)) {
303 mutex_unlock(&sbi
->s_alloc_mutex
);
306 if (bit
< sb
->s_blocksize
<< 3)
309 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3,
311 if (bit
>= sb
->s_blocksize
<< 3) {
312 mutex_unlock(&sbi
->s_alloc_mutex
);
318 while (i
< 7 && bit
> (group_start
<< 3) &&
319 udf_test_bit(bit
- 1, bh
->b_data
)) {
325 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
326 (sizeof(struct spaceBitmapDesc
) << 3);
328 if (!udf_clear_bit(bit
, bh
->b_data
)) {
329 udf_debug("bit already cleared for block %d\n", bit
);
333 mark_buffer_dirty(bh
);
335 udf_add_free_space(sb
, partition
, -1);
336 mutex_unlock(&sbi
->s_alloc_mutex
);
342 mutex_unlock(&sbi
->s_alloc_mutex
);
346 static void udf_table_free_blocks(struct super_block
*sb
,
348 struct kernel_lb_addr
*bloc
,
352 struct udf_sb_info
*sbi
= UDF_SB(sb
);
353 struct udf_part_map
*partmap
;
356 struct kernel_lb_addr eloc
;
357 struct extent_position oepos
, epos
;
359 struct udf_inode_info
*iinfo
;
361 mutex_lock(&sbi
->s_alloc_mutex
);
362 partmap
= &sbi
->s_partmaps
[bloc
->partitionReferenceNum
];
363 if (bloc
->logicalBlockNum
+ count
< count
||
364 (bloc
->logicalBlockNum
+ count
) > partmap
->s_partition_len
) {
365 udf_debug("%d < %d || %d + %d > %d\n",
366 bloc
->logicalBlockNum
, 0,
367 bloc
->logicalBlockNum
, count
,
368 partmap
->s_partition_len
);
372 iinfo
= UDF_I(table
);
373 udf_add_free_space(sb
, sbi
->s_partition
, count
);
375 start
= bloc
->logicalBlockNum
+ offset
;
376 end
= bloc
->logicalBlockNum
+ offset
+ count
- 1;
378 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
380 epos
.block
= oepos
.block
= iinfo
->i_location
;
381 epos
.bh
= oepos
.bh
= NULL
;
384 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
385 if (((eloc
.logicalBlockNum
+
386 (elen
>> sb
->s_blocksize_bits
)) == start
)) {
387 if ((0x3FFFFFFF - elen
) <
388 (count
<< sb
->s_blocksize_bits
)) {
389 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
390 sb
->s_blocksize_bits
);
393 elen
= (etype
<< 30) |
394 (0x40000000 - sb
->s_blocksize
);
396 elen
= (etype
<< 30) |
398 (count
<< sb
->s_blocksize_bits
));
402 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
403 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
404 if ((0x3FFFFFFF - elen
) <
405 (count
<< sb
->s_blocksize_bits
)) {
406 uint32_t tmp
= ((0x3FFFFFFF - elen
) >>
407 sb
->s_blocksize_bits
);
410 eloc
.logicalBlockNum
-= tmp
;
411 elen
= (etype
<< 30) |
412 (0x40000000 - sb
->s_blocksize
);
414 eloc
.logicalBlockNum
= start
;
415 elen
= (etype
<< 30) |
417 (count
<< sb
->s_blocksize_bits
));
421 udf_write_aext(table
, &oepos
, &eloc
, elen
, 1);
424 if (epos
.bh
!= oepos
.bh
) {
425 oepos
.block
= epos
.block
;
431 oepos
.offset
= epos
.offset
;
437 * NOTE: we CANNOT use udf_add_aext here, as it can try to
438 * allocate a new block, and since we hold the super block
439 * lock already very bad things would happen :)
441 * We copy the behavior of udf_add_aext, but instead of
442 * trying to allocate a new block close to the existing one,
443 * we just steal a block from the extent we are trying to add.
445 * It would be nice if the blocks were close together, but it
450 struct short_ad
*sad
= NULL
;
451 struct long_ad
*lad
= NULL
;
452 struct allocExtDesc
*aed
;
454 eloc
.logicalBlockNum
= start
;
455 elen
= EXT_RECORDED_ALLOCATED
|
456 (count
<< sb
->s_blocksize_bits
);
458 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
459 adsize
= sizeof(struct short_ad
);
460 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
461 adsize
= sizeof(struct long_ad
);
468 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
469 unsigned char *sptr
, *dptr
;
475 /* Steal a block from the extent being free'd */
476 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
477 eloc
.logicalBlockNum
++;
478 elen
-= sb
->s_blocksize
;
480 epos
.bh
= udf_tread(sb
,
481 udf_get_lb_pblock(sb
, &epos
.block
, 0));
486 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
487 aed
->previousAllocExtLocation
=
488 cpu_to_le32(oepos
.block
.logicalBlockNum
);
489 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
490 loffset
= epos
.offset
;
491 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
492 sptr
= iinfo
->i_ext
.i_data
+ epos
.offset
494 dptr
= epos
.bh
->b_data
+
495 sizeof(struct allocExtDesc
);
496 memcpy(dptr
, sptr
, adsize
);
497 epos
.offset
= sizeof(struct allocExtDesc
) +
500 loffset
= epos
.offset
+ adsize
;
501 aed
->lengthAllocDescs
= cpu_to_le32(0);
503 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
504 aed
= (struct allocExtDesc
*)
506 le32_add_cpu(&aed
->lengthAllocDescs
,
509 sptr
= iinfo
->i_ext
.i_data
+
511 iinfo
->i_lenAlloc
+= adsize
;
512 mark_inode_dirty(table
);
514 epos
.offset
= sizeof(struct allocExtDesc
);
516 if (sbi
->s_udfrev
>= 0x0200)
517 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
518 3, 1, epos
.block
.logicalBlockNum
,
521 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
,
522 2, 1, epos
.block
.logicalBlockNum
,
525 switch (iinfo
->i_alloc_type
) {
526 case ICBTAG_FLAG_AD_SHORT
:
527 sad
= (struct short_ad
*)sptr
;
528 sad
->extLength
= cpu_to_le32(
529 EXT_NEXT_EXTENT_ALLOCDECS
|
532 cpu_to_le32(epos
.block
.logicalBlockNum
);
534 case ICBTAG_FLAG_AD_LONG
:
535 lad
= (struct long_ad
*)sptr
;
536 lad
->extLength
= cpu_to_le32(
537 EXT_NEXT_EXTENT_ALLOCDECS
|
540 cpu_to_lelb(epos
.block
);
544 udf_update_tag(oepos
.bh
->b_data
, loffset
);
545 mark_buffer_dirty(oepos
.bh
);
547 mark_inode_dirty(table
);
551 /* It's possible that stealing the block emptied the extent */
553 udf_write_aext(table
, &epos
, &eloc
, elen
, 1);
556 iinfo
->i_lenAlloc
+= adsize
;
557 mark_inode_dirty(table
);
559 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
560 le32_add_cpu(&aed
->lengthAllocDescs
, adsize
);
561 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
562 mark_buffer_dirty(epos
.bh
);
571 mutex_unlock(&sbi
->s_alloc_mutex
);
575 static int udf_table_prealloc_blocks(struct super_block
*sb
,
576 struct inode
*table
, uint16_t partition
,
577 uint32_t first_block
, uint32_t block_count
)
579 struct udf_sb_info
*sbi
= UDF_SB(sb
);
581 uint32_t elen
, adsize
;
582 struct kernel_lb_addr eloc
;
583 struct extent_position epos
;
585 struct udf_inode_info
*iinfo
;
587 if (first_block
>= sbi
->s_partmaps
[partition
].s_partition_len
)
590 iinfo
= UDF_I(table
);
591 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
592 adsize
= sizeof(struct short_ad
);
593 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
594 adsize
= sizeof(struct long_ad
);
598 mutex_lock(&sbi
->s_alloc_mutex
);
599 epos
.offset
= sizeof(struct unallocSpaceEntry
);
600 epos
.block
= iinfo
->i_location
;
602 eloc
.logicalBlockNum
= 0xFFFFFFFF;
604 while (first_block
!= eloc
.logicalBlockNum
&&
605 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
606 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
607 eloc
.logicalBlockNum
, elen
, first_block
);
608 ; /* empty loop body */
611 if (first_block
== eloc
.logicalBlockNum
) {
612 epos
.offset
-= adsize
;
614 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
615 if (alloc_count
> block_count
) {
616 alloc_count
= block_count
;
617 eloc
.logicalBlockNum
+= alloc_count
;
618 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
619 udf_write_aext(table
, &epos
, &eloc
,
620 (etype
<< 30) | elen
, 1);
622 udf_delete_aext(table
, epos
, eloc
,
623 (etype
<< 30) | elen
);
631 udf_add_free_space(sb
, partition
, -alloc_count
);
632 mutex_unlock(&sbi
->s_alloc_mutex
);
636 static int udf_table_new_block(struct super_block
*sb
,
637 struct inode
*table
, uint16_t partition
,
638 uint32_t goal
, int *err
)
640 struct udf_sb_info
*sbi
= UDF_SB(sb
);
641 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
642 uint32_t newblock
= 0, adsize
;
643 uint32_t elen
, goal_elen
= 0;
644 struct kernel_lb_addr eloc
, uninitialized_var(goal_eloc
);
645 struct extent_position epos
, goal_epos
;
647 struct udf_inode_info
*iinfo
= UDF_I(table
);
651 if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_SHORT
)
652 adsize
= sizeof(struct short_ad
);
653 else if (iinfo
->i_alloc_type
== ICBTAG_FLAG_AD_LONG
)
654 adsize
= sizeof(struct long_ad
);
658 mutex_lock(&sbi
->s_alloc_mutex
);
659 if (goal
>= sbi
->s_partmaps
[partition
].s_partition_len
)
662 /* We search for the closest matching block to goal. If we find
663 a exact hit, we stop. Otherwise we keep going till we run out
664 of extents. We store the buffer_head, bloc, and extoffset
665 of the current closest match and use that when we are done.
667 epos
.offset
= sizeof(struct unallocSpaceEntry
);
668 epos
.block
= iinfo
->i_location
;
669 epos
.bh
= goal_epos
.bh
= NULL
;
672 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
673 if (goal
>= eloc
.logicalBlockNum
) {
674 if (goal
< eloc
.logicalBlockNum
+
675 (elen
>> sb
->s_blocksize_bits
))
678 nspread
= goal
- eloc
.logicalBlockNum
-
679 (elen
>> sb
->s_blocksize_bits
);
681 nspread
= eloc
.logicalBlockNum
- goal
;
684 if (nspread
< spread
) {
686 if (goal_epos
.bh
!= epos
.bh
) {
687 brelse(goal_epos
.bh
);
688 goal_epos
.bh
= epos
.bh
;
689 get_bh(goal_epos
.bh
);
691 goal_epos
.block
= epos
.block
;
692 goal_epos
.offset
= epos
.offset
- adsize
;
694 goal_elen
= (etype
<< 30) | elen
;
700 if (spread
== 0xFFFFFFFF) {
701 brelse(goal_epos
.bh
);
702 mutex_unlock(&sbi
->s_alloc_mutex
);
706 /* Only allocate blocks from the beginning of the extent.
707 That way, we only delete (empty) extents, never have to insert an
708 extent because of splitting */
709 /* This works, but very poorly.... */
711 newblock
= goal_eloc
.logicalBlockNum
;
712 goal_eloc
.logicalBlockNum
++;
713 goal_elen
-= sb
->s_blocksize
;
716 udf_write_aext(table
, &goal_epos
, &goal_eloc
, goal_elen
, 1);
718 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
719 brelse(goal_epos
.bh
);
721 udf_add_free_space(sb
, partition
, -1);
723 mutex_unlock(&sbi
->s_alloc_mutex
);
728 void udf_free_blocks(struct super_block
*sb
, struct inode
*inode
,
729 struct kernel_lb_addr
*bloc
, uint32_t offset
,
732 uint16_t partition
= bloc
->partitionReferenceNum
;
733 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
735 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
) {
736 udf_bitmap_free_blocks(sb
, map
->s_uspace
.s_bitmap
,
737 bloc
, offset
, count
);
738 } else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
) {
739 udf_table_free_blocks(sb
, map
->s_uspace
.s_table
,
740 bloc
, offset
, count
);
741 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
) {
742 udf_bitmap_free_blocks(sb
, map
->s_fspace
.s_bitmap
,
743 bloc
, offset
, count
);
744 } else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
) {
745 udf_table_free_blocks(sb
, map
->s_fspace
.s_table
,
746 bloc
, offset
, count
);
750 inode_sub_bytes(inode
,
751 ((sector_t
)count
) << sb
->s_blocksize_bits
);
755 inline int udf_prealloc_blocks(struct super_block
*sb
,
757 uint16_t partition
, uint32_t first_block
,
758 uint32_t block_count
)
760 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
763 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
764 allocated
= udf_bitmap_prealloc_blocks(sb
,
765 map
->s_uspace
.s_bitmap
,
766 partition
, first_block
,
768 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
769 allocated
= udf_table_prealloc_blocks(sb
,
770 map
->s_uspace
.s_table
,
771 partition
, first_block
,
773 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
774 allocated
= udf_bitmap_prealloc_blocks(sb
,
775 map
->s_fspace
.s_bitmap
,
776 partition
, first_block
,
778 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
779 allocated
= udf_table_prealloc_blocks(sb
,
780 map
->s_fspace
.s_table
,
781 partition
, first_block
,
786 if (inode
&& allocated
> 0)
787 inode_add_bytes(inode
, allocated
<< sb
->s_blocksize_bits
);
791 inline int udf_new_block(struct super_block
*sb
,
793 uint16_t partition
, uint32_t goal
, int *err
)
795 struct udf_part_map
*map
= &UDF_SB(sb
)->s_partmaps
[partition
];
798 if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_BITMAP
)
799 block
= udf_bitmap_new_block(sb
,
800 map
->s_uspace
.s_bitmap
,
801 partition
, goal
, err
);
802 else if (map
->s_partition_flags
& UDF_PART_FLAG_UNALLOC_TABLE
)
803 block
= udf_table_new_block(sb
,
804 map
->s_uspace
.s_table
,
805 partition
, goal
, err
);
806 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_BITMAP
)
807 block
= udf_bitmap_new_block(sb
,
808 map
->s_fspace
.s_bitmap
,
809 partition
, goal
, err
);
810 else if (map
->s_partition_flags
& UDF_PART_FLAG_FREED_TABLE
)
811 block
= udf_table_new_block(sb
,
812 map
->s_fspace
.s_table
,
813 partition
, goal
, err
);
819 inode_add_bytes(inode
, sb
->s_blocksize
);