5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
44 static inline int find_next_one_bit(void *addr
, int size
, int offset
)
46 uintBPL_t
*p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
47 int result
= offset
& ~(BITS_PER_LONG
- 1);
53 offset
&= (BITS_PER_LONG
- 1);
55 tmp
= leBPL_to_cpup(p
++);
56 tmp
&= ~0UL << offset
;
57 if (size
< BITS_PER_LONG
)
61 size
-= BITS_PER_LONG
;
62 result
+= BITS_PER_LONG
;
64 while (size
& ~(BITS_PER_LONG
- 1)) {
65 if ((tmp
= leBPL_to_cpup(p
++)))
67 result
+= BITS_PER_LONG
;
68 size
-= BITS_PER_LONG
;
72 tmp
= leBPL_to_cpup(p
);
74 tmp
&= ~0UL >> (BITS_PER_LONG
- size
);
76 return result
+ ffz(~tmp
);
79 #define find_first_one_bit(addr, size)\
80 find_next_one_bit((addr), (size), 0)
82 static int read_block_bitmap(struct super_block
*sb
,
83 struct udf_bitmap
*bitmap
, unsigned int block
,
84 unsigned long bitmap_nr
)
86 struct buffer_head
*bh
= NULL
;
90 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
91 loc
.partitionReferenceNum
= UDF_SB_PARTITION(sb
);
93 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
97 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
101 static int __load_block_bitmap(struct super_block
*sb
,
102 struct udf_bitmap
*bitmap
,
103 unsigned int block_group
)
106 int nr_groups
= bitmap
->s_nr_groups
;
108 if (block_group
>= nr_groups
) {
109 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
,
113 if (bitmap
->s_block_bitmap
[block_group
]) {
116 retval
= read_block_bitmap(sb
, bitmap
, block_group
,
124 static inline int load_block_bitmap(struct super_block
*sb
,
125 struct udf_bitmap
*bitmap
,
126 unsigned int block_group
)
130 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
135 if (!bitmap
->s_block_bitmap
[slot
])
141 static void udf_bitmap_free_blocks(struct super_block
*sb
,
143 struct udf_bitmap
*bitmap
,
144 kernel_lb_addr bloc
, uint32_t offset
,
147 struct udf_sb_info
*sbi
= UDF_SB(sb
);
148 struct buffer_head
*bh
= NULL
;
150 unsigned long block_group
;
154 unsigned long overflow
;
156 mutex_lock(&sbi
->s_alloc_mutex
);
157 if (bloc
.logicalBlockNum
< 0 ||
158 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
)) {
159 udf_debug("%d < %d || %d + %d > %d\n",
160 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
161 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
165 block
= bloc
.logicalBlockNum
+ offset
+ (sizeof(struct spaceBitmapDesc
) << 3);
169 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
170 bit
= block
% (sb
->s_blocksize
<< 3);
173 * Check to see if we are freeing blocks across a group boundary.
175 if (bit
+ count
> (sb
->s_blocksize
<< 3)) {
176 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
179 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
183 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
184 for (i
= 0; i
< count
; i
++) {
185 if (udf_set_bit(bit
+ i
, bh
->b_data
)) {
186 udf_debug("bit %ld already set\n", bit
+ i
);
187 udf_debug("byte=%2x\n", ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
190 DQUOT_FREE_BLOCK(inode
, 1);
191 if (UDF_SB_LVIDBH(sb
)) {
192 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
193 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)]) + 1);
197 mark_buffer_dirty(bh
);
205 if (UDF_SB_LVIDBH(sb
))
206 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
207 mutex_unlock(&sbi
->s_alloc_mutex
);
211 static int udf_bitmap_prealloc_blocks(struct super_block
*sb
,
213 struct udf_bitmap
*bitmap
,
214 uint16_t partition
, uint32_t first_block
,
215 uint32_t block_count
)
217 struct udf_sb_info
*sbi
= UDF_SB(sb
);
219 int bit
, block
, block_group
, group_start
;
220 int nr_groups
, bitmap_nr
;
221 struct buffer_head
*bh
;
223 mutex_lock(&sbi
->s_alloc_mutex
);
224 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
227 if (first_block
+ block_count
> UDF_SB_PARTLEN(sb
, partition
))
228 block_count
= UDF_SB_PARTLEN(sb
, partition
) - first_block
;
231 nr_groups
= (UDF_SB_PARTLEN(sb
, partition
) +
232 (sizeof(struct spaceBitmapDesc
) << 3) +
233 (sb
->s_blocksize
* 8) - 1) / (sb
->s_blocksize
* 8);
234 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
235 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
236 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
238 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
241 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
243 bit
= block
% (sb
->s_blocksize
<< 3);
245 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0) {
246 if (!udf_test_bit(bit
, bh
->b_data
)) {
248 } else if (DQUOT_PREALLOC_BLOCK(inode
, 1)) {
250 } else if (!udf_clear_bit(bit
, bh
->b_data
)) {
251 udf_debug("bit already cleared for block %d\n", bit
);
252 DQUOT_FREE_BLOCK(inode
, 1);
260 mark_buffer_dirty(bh
);
264 if (UDF_SB_LVIDBH(sb
)) {
265 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
266 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) - alloc_count
);
267 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
270 mutex_unlock(&sbi
->s_alloc_mutex
);
274 static int udf_bitmap_new_block(struct super_block
*sb
,
276 struct udf_bitmap
*bitmap
, uint16_t partition
,
277 uint32_t goal
, int *err
)
279 struct udf_sb_info
*sbi
= UDF_SB(sb
);
280 int newbit
, bit
= 0, block
, block_group
, group_start
;
281 int end_goal
, nr_groups
, bitmap_nr
, i
;
282 struct buffer_head
*bh
= NULL
;
287 mutex_lock(&sbi
->s_alloc_mutex
);
290 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
293 nr_groups
= bitmap
->s_nr_groups
;
294 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
295 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
296 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
298 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
301 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
302 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
303 sb
->s_blocksize
- group_start
);
305 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
306 bit
= block
% (sb
->s_blocksize
<< 3);
307 if (udf_test_bit(bit
, bh
->b_data
))
310 end_goal
= (bit
+ 63) & ~63;
311 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
315 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF, sb
->s_blocksize
- ((bit
+ 7) >> 3));
316 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
317 if (newbit
< sb
->s_blocksize
<< 3) {
322 newbit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, bit
);
323 if (newbit
< sb
->s_blocksize
<< 3) {
329 for (i
= 0; i
< (nr_groups
* 2); i
++) {
331 if (block_group
>= nr_groups
)
333 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
335 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
338 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
340 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF,
341 sb
->s_blocksize
- group_start
);
342 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
) {
343 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
347 bit
= udf_find_next_one_bit((char *)bh
->b_data
,
348 sb
->s_blocksize
<< 3,
350 if (bit
< sb
->s_blocksize
<< 3)
354 if (i
>= (nr_groups
* 2)) {
355 mutex_unlock(&sbi
->s_alloc_mutex
);
358 if (bit
< sb
->s_blocksize
<< 3)
361 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, group_start
<< 3);
362 if (bit
>= sb
->s_blocksize
<< 3) {
363 mutex_unlock(&sbi
->s_alloc_mutex
);
368 for (i
= 0; i
< 7 && bit
> (group_start
<< 3) && udf_test_bit(bit
- 1, bh
->b_data
); i
++, bit
--)
374 * Check quota for allocation of this block.
376 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
377 mutex_unlock(&sbi
->s_alloc_mutex
);
382 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
383 (sizeof(struct spaceBitmapDesc
) << 3);
385 if (!udf_clear_bit(bit
, bh
->b_data
)) {
386 udf_debug("bit already cleared for block %d\n", bit
);
390 mark_buffer_dirty(bh
);
392 if (UDF_SB_LVIDBH(sb
)) {
393 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
394 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) - 1);
395 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
398 mutex_unlock(&sbi
->s_alloc_mutex
);
404 mutex_unlock(&sbi
->s_alloc_mutex
);
408 static void udf_table_free_blocks(struct super_block
*sb
,
411 kernel_lb_addr bloc
, uint32_t offset
,
414 struct udf_sb_info
*sbi
= UDF_SB(sb
);
418 struct extent_position oepos
, epos
;
422 mutex_lock(&sbi
->s_alloc_mutex
);
423 if (bloc
.logicalBlockNum
< 0 ||
424 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
)) {
425 udf_debug("%d < %d || %d + %d > %d\n",
426 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
427 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
431 /* We do this up front - There are some error conditions that could occure,
434 DQUOT_FREE_BLOCK(inode
, count
);
435 if (UDF_SB_LVIDBH(sb
)) {
436 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
437 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)]) + count
);
438 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
441 start
= bloc
.logicalBlockNum
+ offset
;
442 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
444 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
446 epos
.block
= oepos
.block
= UDF_I_LOCATION(table
);
447 epos
.bh
= oepos
.bh
= NULL
;
450 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
451 if (((eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
)) == start
)) {
452 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
)) {
453 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
454 start
+= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
455 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
457 elen
= (etype
<< 30) | (elen
+ (count
<< sb
->s_blocksize_bits
));
461 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
462 } else if (eloc
.logicalBlockNum
== (end
+ 1)) {
463 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
)) {
464 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
465 end
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
466 eloc
.logicalBlockNum
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
467 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
469 eloc
.logicalBlockNum
= start
;
470 elen
= (etype
<< 30) | (elen
+ (count
<< sb
->s_blocksize_bits
));
474 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
477 if (epos
.bh
!= oepos
.bh
) {
479 oepos
.block
= epos
.block
;
485 oepos
.offset
= epos
.offset
;
491 * NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
492 * a new block, and since we hold the super block lock already
493 * very bad things would happen :)
495 * We copy the behavior of udf_add_aext, but instead of
496 * trying to allocate a new block close to the existing one,
497 * we just steal a block from the extent we are trying to add.
499 * It would be nice if the blocks were close together, but it
504 short_ad
*sad
= NULL
;
506 struct allocExtDesc
*aed
;
508 eloc
.logicalBlockNum
= start
;
509 elen
= EXT_RECORDED_ALLOCATED
|
510 (count
<< sb
->s_blocksize_bits
);
512 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
) {
513 adsize
= sizeof(short_ad
);
514 } else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
) {
515 adsize
= sizeof(long_ad
);
522 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
) {
529 /* Steal a block from the extent being free'd */
530 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
531 eloc
.logicalBlockNum
++;
532 elen
-= sb
->s_blocksize
;
534 if (!(epos
.bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, epos
.block
, 0)))) {
538 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
539 aed
->previousAllocExtLocation
= cpu_to_le32(oepos
.block
.logicalBlockNum
);
540 if (epos
.offset
+ adsize
> sb
->s_blocksize
) {
541 loffset
= epos
.offset
;
542 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
543 sptr
= UDF_I_DATA(inode
) + epos
.offset
-
544 udf_file_entry_alloc_offset(inode
) +
545 UDF_I_LENEATTR(inode
) - adsize
;
546 dptr
= epos
.bh
->b_data
+ sizeof(struct allocExtDesc
);
547 memcpy(dptr
, sptr
, adsize
);
548 epos
.offset
= sizeof(struct allocExtDesc
) + adsize
;
550 loffset
= epos
.offset
+ adsize
;
551 aed
->lengthAllocDescs
= cpu_to_le32(0);
552 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
553 epos
.offset
= sizeof(struct allocExtDesc
);
556 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
557 aed
->lengthAllocDescs
=
558 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
560 UDF_I_LENALLOC(table
) += adsize
;
561 mark_inode_dirty(table
);
564 if (UDF_SB_UDFREV(sb
) >= 0x0200)
565 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 3, 1,
566 epos
.block
.logicalBlockNum
, sizeof(tag
));
568 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 2, 1,
569 epos
.block
.logicalBlockNum
, sizeof(tag
));
571 switch (UDF_I_ALLOCTYPE(table
)) {
572 case ICBTAG_FLAG_AD_SHORT
:
573 sad
= (short_ad
*)sptr
;
574 sad
->extLength
= cpu_to_le32(
575 EXT_NEXT_EXTENT_ALLOCDECS
|
577 sad
->extPosition
= cpu_to_le32(epos
.block
.logicalBlockNum
);
579 case ICBTAG_FLAG_AD_LONG
:
580 lad
= (long_ad
*)sptr
;
581 lad
->extLength
= cpu_to_le32(
582 EXT_NEXT_EXTENT_ALLOCDECS
|
584 lad
->extLocation
= cpu_to_lelb(epos
.block
);
588 udf_update_tag(oepos
.bh
->b_data
, loffset
);
589 mark_buffer_dirty(oepos
.bh
);
591 mark_inode_dirty(table
);
595 if (elen
) { /* It's possible that stealing the block emptied the extent */
596 udf_write_aext(table
, &epos
, eloc
, elen
, 1);
599 UDF_I_LENALLOC(table
) += adsize
;
600 mark_inode_dirty(table
);
602 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
603 aed
->lengthAllocDescs
=
604 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
605 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
606 mark_buffer_dirty(epos
.bh
);
616 mutex_unlock(&sbi
->s_alloc_mutex
);
620 static int udf_table_prealloc_blocks(struct super_block
*sb
,
622 struct inode
*table
, uint16_t partition
,
623 uint32_t first_block
, uint32_t block_count
)
625 struct udf_sb_info
*sbi
= UDF_SB(sb
);
627 uint32_t elen
, adsize
;
629 struct extent_position epos
;
632 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
635 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
636 adsize
= sizeof(short_ad
);
637 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
638 adsize
= sizeof(long_ad
);
642 mutex_lock(&sbi
->s_alloc_mutex
);
643 epos
.offset
= sizeof(struct unallocSpaceEntry
);
644 epos
.block
= UDF_I_LOCATION(table
);
646 eloc
.logicalBlockNum
= 0xFFFFFFFF;
648 while (first_block
!= eloc
.logicalBlockNum
&&
649 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
650 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
651 eloc
.logicalBlockNum
, elen
, first_block
);
652 ; /* empty loop body */
655 if (first_block
== eloc
.logicalBlockNum
) {
656 epos
.offset
-= adsize
;
658 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
659 if (inode
&& DQUOT_PREALLOC_BLOCK(inode
, alloc_count
> block_count
? block_count
: alloc_count
)) {
661 } else if (alloc_count
> block_count
) {
662 alloc_count
= block_count
;
663 eloc
.logicalBlockNum
+= alloc_count
;
664 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
665 udf_write_aext(table
, &epos
, eloc
, (etype
<< 30) | elen
, 1);
667 udf_delete_aext(table
, epos
, eloc
, (etype
<< 30) | elen
);
675 if (alloc_count
&& UDF_SB_LVIDBH(sb
)) {
676 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
677 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) - alloc_count
);
678 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
681 mutex_unlock(&sbi
->s_alloc_mutex
);
685 static int udf_table_new_block(struct super_block
*sb
,
687 struct inode
*table
, uint16_t partition
,
688 uint32_t goal
, int *err
)
690 struct udf_sb_info
*sbi
= UDF_SB(sb
);
691 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
692 uint32_t newblock
= 0, adsize
;
693 uint32_t elen
, goal_elen
= 0;
694 kernel_lb_addr eloc
, goal_eloc
;
695 struct extent_position epos
, goal_epos
;
700 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
701 adsize
= sizeof(short_ad
);
702 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
703 adsize
= sizeof(long_ad
);
707 mutex_lock(&sbi
->s_alloc_mutex
);
708 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
711 /* We search for the closest matching block to goal. If we find a exact hit,
712 we stop. Otherwise we keep going till we run out of extents.
713 We store the buffer_head, bloc, and extoffset of the current closest
714 match and use that when we are done.
716 epos
.offset
= sizeof(struct unallocSpaceEntry
);
717 epos
.block
= UDF_I_LOCATION(table
);
718 epos
.bh
= goal_epos
.bh
= NULL
;
721 (etype
= udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1) {
722 if (goal
>= eloc
.logicalBlockNum
) {
723 if (goal
< eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
))
726 nspread
= goal
- eloc
.logicalBlockNum
-
727 (elen
>> sb
->s_blocksize_bits
);
729 nspread
= eloc
.logicalBlockNum
- goal
;
732 if (nspread
< spread
) {
734 if (goal_epos
.bh
!= epos
.bh
) {
735 brelse(goal_epos
.bh
);
736 goal_epos
.bh
= epos
.bh
;
737 get_bh(goal_epos
.bh
);
739 goal_epos
.block
= epos
.block
;
740 goal_epos
.offset
= epos
.offset
- adsize
;
742 goal_elen
= (etype
<< 30) | elen
;
748 if (spread
== 0xFFFFFFFF) {
749 brelse(goal_epos
.bh
);
750 mutex_unlock(&sbi
->s_alloc_mutex
);
754 /* Only allocate blocks from the beginning of the extent.
755 That way, we only delete (empty) extents, never have to insert an
756 extent because of splitting */
757 /* This works, but very poorly.... */
759 newblock
= goal_eloc
.logicalBlockNum
;
760 goal_eloc
.logicalBlockNum
++;
761 goal_elen
-= sb
->s_blocksize
;
763 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1)) {
764 brelse(goal_epos
.bh
);
765 mutex_unlock(&sbi
->s_alloc_mutex
);
771 udf_write_aext(table
, &goal_epos
, goal_eloc
, goal_elen
, 1);
773 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
774 brelse(goal_epos
.bh
);
776 if (UDF_SB_LVIDBH(sb
)) {
777 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
778 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
]) - 1);
779 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
783 mutex_unlock(&sbi
->s_alloc_mutex
);
788 inline void udf_free_blocks(struct super_block
*sb
,
790 kernel_lb_addr bloc
, uint32_t offset
,
793 uint16_t partition
= bloc
.partitionReferenceNum
;
795 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
796 return udf_bitmap_free_blocks(sb
, inode
,
797 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
798 bloc
, offset
, count
);
799 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
) {
800 return udf_table_free_blocks(sb
, inode
,
801 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
802 bloc
, offset
, count
);
803 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
804 return udf_bitmap_free_blocks(sb
, inode
,
805 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
806 bloc
, offset
, count
);
807 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
808 return udf_table_free_blocks(sb
, inode
,
809 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
810 bloc
, offset
, count
);
816 inline int udf_prealloc_blocks(struct super_block
*sb
,
818 uint16_t partition
, uint32_t first_block
,
819 uint32_t block_count
)
821 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
822 return udf_bitmap_prealloc_blocks(sb
, inode
,
823 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
824 partition
, first_block
, block_count
);
825 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
) {
826 return udf_table_prealloc_blocks(sb
, inode
,
827 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
828 partition
, first_block
, block_count
);
829 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
830 return udf_bitmap_prealloc_blocks(sb
, inode
,
831 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
832 partition
, first_block
, block_count
);
833 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
834 return udf_table_prealloc_blocks(sb
, inode
,
835 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
836 partition
, first_block
, block_count
);
842 inline int udf_new_block(struct super_block
*sb
,
844 uint16_t partition
, uint32_t goal
, int *err
)
848 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
) {
849 ret
= udf_bitmap_new_block(sb
, inode
,
850 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
851 partition
, goal
, err
);
853 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
) {
854 return udf_table_new_block(sb
, inode
,
855 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
856 partition
, goal
, err
);
857 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
) {
858 return udf_bitmap_new_block(sb
, inode
,
859 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
860 partition
, goal
, err
);
861 } else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
) {
862 return udf_table_new_block(sb
, inode
,
863 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
864 partition
, goal
, err
);