5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
18 * 02/24/99 blf Created.
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
44 static inline int find_next_one_bit (void * addr
, int size
, int offset
)
46 uintBPL_t
* p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
47 int result
= offset
& ~(BITS_PER_LONG
-1);
53 offset
&= (BITS_PER_LONG
-1);
56 tmp
= leBPL_to_cpup(p
++);
57 tmp
&= ~0UL << offset
;
58 if (size
< BITS_PER_LONG
)
62 size
-= BITS_PER_LONG
;
63 result
+= BITS_PER_LONG
;
65 while (size
& ~(BITS_PER_LONG
-1))
67 if ((tmp
= leBPL_to_cpup(p
++)))
69 result
+= BITS_PER_LONG
;
70 size
-= BITS_PER_LONG
;
74 tmp
= leBPL_to_cpup(p
);
76 tmp
&= ~0UL >> (BITS_PER_LONG
-size
);
78 return result
+ ffz(~tmp
);
81 #define find_first_one_bit(addr, size)\
82 find_next_one_bit((addr), (size), 0)
84 static int read_block_bitmap(struct super_block
* sb
,
85 struct udf_bitmap
*bitmap
, unsigned int block
, unsigned long bitmap_nr
)
87 struct buffer_head
*bh
= NULL
;
91 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
92 loc
.partitionReferenceNum
= UDF_SB_PARTITION(sb
);
94 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
99 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
103 static int __load_block_bitmap(struct super_block
* sb
,
104 struct udf_bitmap
*bitmap
, unsigned int block_group
)
107 int nr_groups
= bitmap
->s_nr_groups
;
109 if (block_group
>= nr_groups
)
111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
, nr_groups
);
114 if (bitmap
->s_block_bitmap
[block_group
])
118 retval
= read_block_bitmap(sb
, bitmap
, block_group
, block_group
);
125 static inline int load_block_bitmap(struct super_block
* sb
,
126 struct udf_bitmap
*bitmap
, unsigned int block_group
)
130 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
135 if (!bitmap
->s_block_bitmap
[slot
])
141 static void udf_bitmap_free_blocks(struct super_block
* sb
,
142 struct inode
* inode
,
143 struct udf_bitmap
*bitmap
,
144 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
146 struct udf_sb_info
*sbi
= UDF_SB(sb
);
147 struct buffer_head
* bh
= NULL
;
149 unsigned long block_group
;
153 unsigned long overflow
;
155 mutex_lock(&sbi
->s_alloc_mutex
);
156 if (bloc
.logicalBlockNum
< 0 ||
157 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
))
159 udf_debug("%d < %d || %d + %d > %d\n",
160 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
161 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
165 block
= bloc
.logicalBlockNum
+ offset
+ (sizeof(struct spaceBitmapDesc
) << 3);
169 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
170 bit
= block
% (sb
->s_blocksize
<< 3);
173 * Check to see if we are freeing blocks across a group boundary.
175 if (bit
+ count
> (sb
->s_blocksize
<< 3))
177 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
180 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
184 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
185 for (i
=0; i
< count
; i
++)
187 if (udf_set_bit(bit
+ i
, bh
->b_data
))
189 udf_debug("bit %ld already set\n", bit
+ i
);
190 udf_debug("byte=%2x\n", ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
195 DQUOT_FREE_BLOCK(inode
, 1);
196 if (UDF_SB_LVIDBH(sb
))
198 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
199 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)])+1);
203 mark_buffer_dirty(bh
);
212 if (UDF_SB_LVIDBH(sb
))
213 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
214 mutex_unlock(&sbi
->s_alloc_mutex
);
218 static int udf_bitmap_prealloc_blocks(struct super_block
* sb
,
219 struct inode
* inode
,
220 struct udf_bitmap
*bitmap
, uint16_t partition
, uint32_t first_block
,
221 uint32_t block_count
)
223 struct udf_sb_info
*sbi
= UDF_SB(sb
);
225 int bit
, block
, block_group
, group_start
;
226 int nr_groups
, bitmap_nr
;
227 struct buffer_head
*bh
;
229 mutex_lock(&sbi
->s_alloc_mutex
);
230 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
233 if (first_block
+ block_count
> UDF_SB_PARTLEN(sb
, partition
))
234 block_count
= UDF_SB_PARTLEN(sb
, partition
) - first_block
;
237 nr_groups
= (UDF_SB_PARTLEN(sb
, partition
) +
238 (sizeof(struct spaceBitmapDesc
) << 3) + (sb
->s_blocksize
* 8) - 1) / (sb
->s_blocksize
* 8);
239 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
240 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
241 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
243 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
246 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
248 bit
= block
% (sb
->s_blocksize
<< 3);
250 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0)
252 if (!udf_test_bit(bit
, bh
->b_data
))
254 else if (DQUOT_PREALLOC_BLOCK(inode
, 1))
256 else if (!udf_clear_bit(bit
, bh
->b_data
))
258 udf_debug("bit already cleared for block %d\n", bit
);
259 DQUOT_FREE_BLOCK(inode
, 1);
267 mark_buffer_dirty(bh
);
271 if (UDF_SB_LVIDBH(sb
))
273 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
274 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-alloc_count
);
275 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
278 mutex_unlock(&sbi
->s_alloc_mutex
);
282 static int udf_bitmap_new_block(struct super_block
* sb
,
283 struct inode
* inode
,
284 struct udf_bitmap
*bitmap
, uint16_t partition
, uint32_t goal
, int *err
)
286 struct udf_sb_info
*sbi
= UDF_SB(sb
);
287 int newbit
, bit
=0, block
, block_group
, group_start
;
288 int end_goal
, nr_groups
, bitmap_nr
, i
;
289 struct buffer_head
*bh
= NULL
;
294 mutex_lock(&sbi
->s_alloc_mutex
);
297 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
300 nr_groups
= bitmap
->s_nr_groups
;
301 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
302 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
303 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
305 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
308 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
309 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF, sb
->s_blocksize
- group_start
);
311 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
)
313 bit
= block
% (sb
->s_blocksize
<< 3);
315 if (udf_test_bit(bit
, bh
->b_data
))
319 end_goal
= (bit
+ 63) & ~63;
320 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
323 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF, sb
->s_blocksize
- ((bit
+ 7) >> 3));
324 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
325 if (newbit
< sb
->s_blocksize
<< 3)
330 newbit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, bit
);
331 if (newbit
< sb
->s_blocksize
<< 3)
338 for (i
=0; i
<(nr_groups
*2); i
++)
341 if (block_group
>= nr_groups
)
343 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
345 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
348 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
351 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF, sb
->s_blocksize
- group_start
);
352 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
)
354 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
360 bit
= udf_find_next_one_bit((char *)bh
->b_data
, sb
->s_blocksize
<< 3, group_start
<< 3);
361 if (bit
< sb
->s_blocksize
<< 3)
365 if (i
>= (nr_groups
*2))
367 mutex_unlock(&sbi
->s_alloc_mutex
);
370 if (bit
< sb
->s_blocksize
<< 3)
373 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, group_start
<< 3);
374 if (bit
>= sb
->s_blocksize
<< 3)
376 mutex_unlock(&sbi
->s_alloc_mutex
);
381 for (i
=0; i
<7 && bit
> (group_start
<< 3) && udf_test_bit(bit
- 1, bh
->b_data
); i
++, bit
--);
386 * Check quota for allocation of this block.
388 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1))
390 mutex_unlock(&sbi
->s_alloc_mutex
);
395 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
396 (sizeof(struct spaceBitmapDesc
) << 3);
398 if (!udf_clear_bit(bit
, bh
->b_data
))
400 udf_debug("bit already cleared for block %d\n", bit
);
404 mark_buffer_dirty(bh
);
406 if (UDF_SB_LVIDBH(sb
))
408 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
409 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-1);
410 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
413 mutex_unlock(&sbi
->s_alloc_mutex
);
419 mutex_unlock(&sbi
->s_alloc_mutex
);
423 static void udf_table_free_blocks(struct super_block
* sb
,
424 struct inode
* inode
,
425 struct inode
* table
,
426 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
428 struct udf_sb_info
*sbi
= UDF_SB(sb
);
432 struct extent_position oepos
, epos
;
436 mutex_lock(&sbi
->s_alloc_mutex
);
437 if (bloc
.logicalBlockNum
< 0 ||
438 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
))
440 udf_debug("%d < %d || %d + %d > %d\n",
441 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
442 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
446 /* We do this up front - There are some error conditions that could occure,
449 DQUOT_FREE_BLOCK(inode
, count
);
450 if (UDF_SB_LVIDBH(sb
))
452 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
453 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)])+count
);
454 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
457 start
= bloc
.logicalBlockNum
+ offset
;
458 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
460 epos
.offset
= oepos
.offset
= sizeof(struct unallocSpaceEntry
);
462 epos
.block
= oepos
.block
= UDF_I_LOCATION(table
);
463 epos
.bh
= oepos
.bh
= NULL
;
465 while (count
&& (etype
=
466 udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1)
468 if (((eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
)) ==
471 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
))
473 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
474 start
+= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
475 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
479 elen
= (etype
<< 30) |
480 (elen
+ (count
<< sb
->s_blocksize_bits
));
484 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
486 else if (eloc
.logicalBlockNum
== (end
+ 1))
488 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
))
490 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
491 end
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
492 eloc
.logicalBlockNum
-=
493 ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
494 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
498 eloc
.logicalBlockNum
= start
;
499 elen
= (etype
<< 30) |
500 (elen
+ (count
<< sb
->s_blocksize_bits
));
504 udf_write_aext(table
, &oepos
, eloc
, elen
, 1);
507 if (epos
.bh
!= oepos
.bh
)
510 oepos
.block
= epos
.block
;
517 oepos
.offset
= epos
.offset
;
522 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
523 a new block, and since we hold the super block lock already
524 very bad things would happen :)
526 We copy the behavior of udf_add_aext, but instead of
527 trying to allocate a new block close to the existing one,
528 we just steal a block from the extent we are trying to add.
530 It would be nice if the blocks were close together, but it
535 short_ad
*sad
= NULL
;
537 struct allocExtDesc
*aed
;
539 eloc
.logicalBlockNum
= start
;
540 elen
= EXT_RECORDED_ALLOCATED
|
541 (count
<< sb
->s_blocksize_bits
);
543 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
544 adsize
= sizeof(short_ad
);
545 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
546 adsize
= sizeof(long_ad
);
554 if (epos
.offset
+ (2 * adsize
) > sb
->s_blocksize
)
562 /* Steal a block from the extent being free'd */
563 epos
.block
.logicalBlockNum
= eloc
.logicalBlockNum
;
564 eloc
.logicalBlockNum
++;
565 elen
-= sb
->s_blocksize
;
567 if (!(epos
.bh
= udf_tread(sb
,
568 udf_get_lb_pblock(sb
, epos
.block
, 0))))
573 aed
= (struct allocExtDesc
*)(epos
.bh
->b_data
);
574 aed
->previousAllocExtLocation
= cpu_to_le32(oepos
.block
.logicalBlockNum
);
575 if (epos
.offset
+ adsize
> sb
->s_blocksize
)
577 loffset
= epos
.offset
;
578 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
579 sptr
= UDF_I_DATA(inode
) + epos
.offset
-
580 udf_file_entry_alloc_offset(inode
) +
581 UDF_I_LENEATTR(inode
) - adsize
;
582 dptr
= epos
.bh
->b_data
+ sizeof(struct allocExtDesc
);
583 memcpy(dptr
, sptr
, adsize
);
584 epos
.offset
= sizeof(struct allocExtDesc
) + adsize
;
588 loffset
= epos
.offset
+ adsize
;
589 aed
->lengthAllocDescs
= cpu_to_le32(0);
590 sptr
= oepos
.bh
->b_data
+ epos
.offset
;
591 epos
.offset
= sizeof(struct allocExtDesc
);
595 aed
= (struct allocExtDesc
*)oepos
.bh
->b_data
;
596 aed
->lengthAllocDescs
=
597 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
601 UDF_I_LENALLOC(table
) += adsize
;
602 mark_inode_dirty(table
);
605 if (UDF_SB_UDFREV(sb
) >= 0x0200)
606 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 3, 1,
607 epos
.block
.logicalBlockNum
, sizeof(tag
));
609 udf_new_tag(epos
.bh
->b_data
, TAG_IDENT_AED
, 2, 1,
610 epos
.block
.logicalBlockNum
, sizeof(tag
));
611 switch (UDF_I_ALLOCTYPE(table
))
613 case ICBTAG_FLAG_AD_SHORT
:
615 sad
= (short_ad
*)sptr
;
616 sad
->extLength
= cpu_to_le32(
617 EXT_NEXT_EXTENT_ALLOCDECS
|
619 sad
->extPosition
= cpu_to_le32(epos
.block
.logicalBlockNum
);
622 case ICBTAG_FLAG_AD_LONG
:
624 lad
= (long_ad
*)sptr
;
625 lad
->extLength
= cpu_to_le32(
626 EXT_NEXT_EXTENT_ALLOCDECS
|
628 lad
->extLocation
= cpu_to_lelb(epos
.block
);
634 udf_update_tag(oepos
.bh
->b_data
, loffset
);
635 mark_buffer_dirty(oepos
.bh
);
638 mark_inode_dirty(table
);
641 if (elen
) /* It's possible that stealing the block emptied the extent */
643 udf_write_aext(table
, &epos
, eloc
, elen
, 1);
647 UDF_I_LENALLOC(table
) += adsize
;
648 mark_inode_dirty(table
);
652 aed
= (struct allocExtDesc
*)epos
.bh
->b_data
;
653 aed
->lengthAllocDescs
=
654 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
655 udf_update_tag(epos
.bh
->b_data
, epos
.offset
);
656 mark_buffer_dirty(epos
.bh
);
666 mutex_unlock(&sbi
->s_alloc_mutex
);
670 static int udf_table_prealloc_blocks(struct super_block
* sb
,
671 struct inode
* inode
,
672 struct inode
*table
, uint16_t partition
, uint32_t first_block
,
673 uint32_t block_count
)
675 struct udf_sb_info
*sbi
= UDF_SB(sb
);
677 uint32_t elen
, adsize
;
679 struct extent_position epos
;
682 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
685 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
686 adsize
= sizeof(short_ad
);
687 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
688 adsize
= sizeof(long_ad
);
692 mutex_lock(&sbi
->s_alloc_mutex
);
693 epos
.offset
= sizeof(struct unallocSpaceEntry
);
694 epos
.block
= UDF_I_LOCATION(table
);
696 eloc
.logicalBlockNum
= 0xFFFFFFFF;
698 while (first_block
!= eloc
.logicalBlockNum
&& (etype
=
699 udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1)
701 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
702 eloc
.logicalBlockNum
, elen
, first_block
);
703 ; /* empty loop body */
706 if (first_block
== eloc
.logicalBlockNum
)
708 epos
.offset
-= adsize
;
710 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
711 if (inode
&& DQUOT_PREALLOC_BLOCK(inode
, alloc_count
> block_count
? block_count
: alloc_count
))
713 else if (alloc_count
> block_count
)
715 alloc_count
= block_count
;
716 eloc
.logicalBlockNum
+= alloc_count
;
717 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
718 udf_write_aext(table
, &epos
, eloc
, (etype
<< 30) | elen
, 1);
721 udf_delete_aext(table
, epos
, eloc
, (etype
<< 30) | elen
);
728 if (alloc_count
&& UDF_SB_LVIDBH(sb
))
730 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
731 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-alloc_count
);
732 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
735 mutex_unlock(&sbi
->s_alloc_mutex
);
739 static int udf_table_new_block(struct super_block
* sb
,
740 struct inode
* inode
,
741 struct inode
*table
, uint16_t partition
, uint32_t goal
, int *err
)
743 struct udf_sb_info
*sbi
= UDF_SB(sb
);
744 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
745 uint32_t newblock
= 0, adsize
;
746 uint32_t elen
, goal_elen
= 0;
747 kernel_lb_addr eloc
, goal_eloc
;
748 struct extent_position epos
, goal_epos
;
753 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
754 adsize
= sizeof(short_ad
);
755 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
756 adsize
= sizeof(long_ad
);
760 mutex_lock(&sbi
->s_alloc_mutex
);
761 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
764 /* We search for the closest matching block to goal. If we find a exact hit,
765 we stop. Otherwise we keep going till we run out of extents.
766 We store the buffer_head, bloc, and extoffset of the current closest
767 match and use that when we are done.
769 epos
.offset
= sizeof(struct unallocSpaceEntry
);
770 epos
.block
= UDF_I_LOCATION(table
);
771 epos
.bh
= goal_epos
.bh
= NULL
;
773 while (spread
&& (etype
=
774 udf_next_aext(table
, &epos
, &eloc
, &elen
, 1)) != -1)
776 if (goal
>= eloc
.logicalBlockNum
)
778 if (goal
< eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
))
781 nspread
= goal
- eloc
.logicalBlockNum
-
782 (elen
>> sb
->s_blocksize_bits
);
785 nspread
= eloc
.logicalBlockNum
- goal
;
787 if (nspread
< spread
)
790 if (goal_epos
.bh
!= epos
.bh
)
792 brelse(goal_epos
.bh
);
793 goal_epos
.bh
= epos
.bh
;
794 get_bh(goal_epos
.bh
);
796 goal_epos
.block
= epos
.block
;
797 goal_epos
.offset
= epos
.offset
- adsize
;
799 goal_elen
= (etype
<< 30) | elen
;
805 if (spread
== 0xFFFFFFFF)
807 brelse(goal_epos
.bh
);
808 mutex_unlock(&sbi
->s_alloc_mutex
);
812 /* Only allocate blocks from the beginning of the extent.
813 That way, we only delete (empty) extents, never have to insert an
814 extent because of splitting */
815 /* This works, but very poorly.... */
817 newblock
= goal_eloc
.logicalBlockNum
;
818 goal_eloc
.logicalBlockNum
++;
819 goal_elen
-= sb
->s_blocksize
;
821 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1))
823 brelse(goal_epos
.bh
);
824 mutex_unlock(&sbi
->s_alloc_mutex
);
830 udf_write_aext(table
, &goal_epos
, goal_eloc
, goal_elen
, 1);
832 udf_delete_aext(table
, goal_epos
, goal_eloc
, goal_elen
);
833 brelse(goal_epos
.bh
);
835 if (UDF_SB_LVIDBH(sb
))
837 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
838 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-1);
839 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
843 mutex_unlock(&sbi
->s_alloc_mutex
);
848 inline void udf_free_blocks(struct super_block
* sb
,
849 struct inode
* inode
,
850 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
852 uint16_t partition
= bloc
.partitionReferenceNum
;
854 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
856 return udf_bitmap_free_blocks(sb
, inode
,
857 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
858 bloc
, offset
, count
);
860 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
862 return udf_table_free_blocks(sb
, inode
,
863 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
864 bloc
, offset
, count
);
866 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
868 return udf_bitmap_free_blocks(sb
, inode
,
869 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
870 bloc
, offset
, count
);
872 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
874 return udf_table_free_blocks(sb
, inode
,
875 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
876 bloc
, offset
, count
);
882 inline int udf_prealloc_blocks(struct super_block
* sb
,
883 struct inode
* inode
,
884 uint16_t partition
, uint32_t first_block
, uint32_t block_count
)
886 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
888 return udf_bitmap_prealloc_blocks(sb
, inode
,
889 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
890 partition
, first_block
, block_count
);
892 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
894 return udf_table_prealloc_blocks(sb
, inode
,
895 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
896 partition
, first_block
, block_count
);
898 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
900 return udf_bitmap_prealloc_blocks(sb
, inode
,
901 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
902 partition
, first_block
, block_count
);
904 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
906 return udf_table_prealloc_blocks(sb
, inode
,
907 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
908 partition
, first_block
, block_count
);
914 inline int udf_new_block(struct super_block
* sb
,
915 struct inode
* inode
,
916 uint16_t partition
, uint32_t goal
, int *err
)
920 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
922 ret
= udf_bitmap_new_block(sb
, inode
,
923 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
924 partition
, goal
, err
);
927 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
929 return udf_table_new_block(sb
, inode
,
930 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
931 partition
, goal
, err
);
933 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
935 return udf_bitmap_new_block(sb
, inode
,
936 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
937 partition
, goal
, err
);
939 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
941 return udf_table_new_block(sb
, inode
,
942 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
943 partition
, goal
, err
);