5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1999-2001 Ben Fennema
19 * (C) 1999 Stelias Computing Inc
23 * 02/24/99 blf Created.
29 #include <linux/quotaops.h>
30 #include <linux/buffer_head.h>
31 #include <asm/bitops.h>
36 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
37 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
38 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
39 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
40 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
42 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
43 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
44 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
45 #define uintBPL_t uint(BITS_PER_LONG)
46 #define uint(x) xuint(x)
47 #define xuint(x) __le ## x
49 extern inline int find_next_one_bit (void * addr
, int size
, int offset
)
51 uintBPL_t
* p
= ((uintBPL_t
*) addr
) + (offset
/ BITS_PER_LONG
);
52 int result
= offset
& ~(BITS_PER_LONG
-1);
58 offset
&= (BITS_PER_LONG
-1);
61 tmp
= leBPL_to_cpup(p
++);
62 tmp
&= ~0UL << offset
;
63 if (size
< BITS_PER_LONG
)
67 size
-= BITS_PER_LONG
;
68 result
+= BITS_PER_LONG
;
70 while (size
& ~(BITS_PER_LONG
-1))
72 if ((tmp
= leBPL_to_cpup(p
++)))
74 result
+= BITS_PER_LONG
;
75 size
-= BITS_PER_LONG
;
79 tmp
= leBPL_to_cpup(p
);
81 tmp
&= ~0UL >> (BITS_PER_LONG
-size
);
83 return result
+ ffz(~tmp
);
86 #define find_first_one_bit(addr, size)\
87 find_next_one_bit((addr), (size), 0)
89 static int read_block_bitmap(struct super_block
* sb
,
90 struct udf_bitmap
*bitmap
, unsigned int block
, unsigned long bitmap_nr
)
92 struct buffer_head
*bh
= NULL
;
96 loc
.logicalBlockNum
= bitmap
->s_extPosition
;
97 loc
.partitionReferenceNum
= UDF_SB_PARTITION(sb
);
99 bh
= udf_tread(sb
, udf_get_lb_pblock(sb
, loc
, block
));
104 bitmap
->s_block_bitmap
[bitmap_nr
] = bh
;
108 static int __load_block_bitmap(struct super_block
* sb
,
109 struct udf_bitmap
*bitmap
, unsigned int block_group
)
112 int nr_groups
= bitmap
->s_nr_groups
;
114 if (block_group
>= nr_groups
)
116 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group
, nr_groups
);
119 if (bitmap
->s_block_bitmap
[block_group
])
123 retval
= read_block_bitmap(sb
, bitmap
, block_group
, block_group
);
130 static inline int load_block_bitmap(struct super_block
* sb
,
131 struct udf_bitmap
*bitmap
, unsigned int block_group
)
135 slot
= __load_block_bitmap(sb
, bitmap
, block_group
);
140 if (!bitmap
->s_block_bitmap
[slot
])
146 static void udf_bitmap_free_blocks(struct super_block
* sb
,
147 struct inode
* inode
,
148 struct udf_bitmap
*bitmap
,
149 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
151 struct buffer_head
* bh
= NULL
;
153 unsigned long block_group
;
157 unsigned long overflow
;
160 if (bloc
.logicalBlockNum
< 0 ||
161 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
))
163 udf_debug("%d < %d || %d + %d > %d\n",
164 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
165 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
169 block
= bloc
.logicalBlockNum
+ offset
+ (sizeof(struct spaceBitmapDesc
) << 3);
173 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
174 bit
= block
% (sb
->s_blocksize
<< 3);
177 * Check to see if we are freeing blocks across a group boundary.
179 if (bit
+ count
> (sb
->s_blocksize
<< 3))
181 overflow
= bit
+ count
- (sb
->s_blocksize
<< 3);
184 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
188 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
189 for (i
=0; i
< count
; i
++)
191 if (udf_set_bit(bit
+ i
, bh
->b_data
))
193 udf_debug("bit %ld already set\n", bit
+ i
);
194 udf_debug("byte=%2x\n", ((char *)bh
->b_data
)[(bit
+ i
) >> 3]);
199 DQUOT_FREE_BLOCK(inode
, 1);
200 if (UDF_SB_LVIDBH(sb
))
202 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
203 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)])+1);
207 mark_buffer_dirty(bh
);
216 if (UDF_SB_LVIDBH(sb
))
217 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
222 static int udf_bitmap_prealloc_blocks(struct super_block
* sb
,
223 struct inode
* inode
,
224 struct udf_bitmap
*bitmap
, uint16_t partition
, uint32_t first_block
,
225 uint32_t block_count
)
228 int bit
, block
, block_group
, group_start
;
229 int nr_groups
, bitmap_nr
;
230 struct buffer_head
*bh
;
234 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
237 if (first_block
+ block_count
> UDF_SB_PARTLEN(sb
, partition
))
238 block_count
= UDF_SB_PARTLEN(sb
, partition
) - first_block
;
241 nr_groups
= (UDF_SB_PARTLEN(sb
, partition
) +
242 (sizeof(struct spaceBitmapDesc
) << 3) + (sb
->s_blocksize
* 8) - 1) / (sb
->s_blocksize
* 8);
243 block
= first_block
+ (sizeof(struct spaceBitmapDesc
) << 3);
244 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
245 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
247 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
250 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
252 bit
= block
% (sb
->s_blocksize
<< 3);
254 while (bit
< (sb
->s_blocksize
<< 3) && block_count
> 0)
256 if (!udf_test_bit(bit
, bh
->b_data
))
258 else if (DQUOT_PREALLOC_BLOCK(inode
, 1))
260 else if (!udf_clear_bit(bit
, bh
->b_data
))
262 udf_debug("bit already cleared for block %d\n", bit
);
263 DQUOT_FREE_BLOCK(inode
, 1);
271 mark_buffer_dirty(bh
);
275 if (UDF_SB_LVIDBH(sb
))
277 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
278 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-alloc_count
);
279 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
286 static int udf_bitmap_new_block(struct super_block
* sb
,
287 struct inode
* inode
,
288 struct udf_bitmap
*bitmap
, uint16_t partition
, uint32_t goal
, int *err
)
290 int newbit
, bit
=0, block
, block_group
, group_start
;
291 int end_goal
, nr_groups
, bitmap_nr
, i
;
292 struct buffer_head
*bh
= NULL
;
300 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
303 nr_groups
= bitmap
->s_nr_groups
;
304 block
= goal
+ (sizeof(struct spaceBitmapDesc
) << 3);
305 block_group
= block
>> (sb
->s_blocksize_bits
+ 3);
306 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
308 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
311 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
312 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF, sb
->s_blocksize
- group_start
);
314 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
)
316 bit
= block
% (sb
->s_blocksize
<< 3);
318 if (udf_test_bit(bit
, bh
->b_data
))
322 end_goal
= (bit
+ 63) & ~63;
323 bit
= udf_find_next_one_bit(bh
->b_data
, end_goal
, bit
);
326 ptr
= memscan((char *)bh
->b_data
+ (bit
>> 3), 0xFF, sb
->s_blocksize
- ((bit
+ 7) >> 3));
327 newbit
= (ptr
- ((char *)bh
->b_data
)) << 3;
328 if (newbit
< sb
->s_blocksize
<< 3)
333 newbit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, bit
);
334 if (newbit
< sb
->s_blocksize
<< 3)
341 for (i
=0; i
<(nr_groups
*2); i
++)
344 if (block_group
>= nr_groups
)
346 group_start
= block_group
? 0 : sizeof(struct spaceBitmapDesc
);
348 bitmap_nr
= load_block_bitmap(sb
, bitmap
, block_group
);
351 bh
= bitmap
->s_block_bitmap
[bitmap_nr
];
354 ptr
= memscan((char *)bh
->b_data
+ group_start
, 0xFF, sb
->s_blocksize
- group_start
);
355 if ((ptr
- ((char *)bh
->b_data
)) < sb
->s_blocksize
)
357 bit
= (ptr
- ((char *)bh
->b_data
)) << 3;
363 bit
= udf_find_next_one_bit((char *)bh
->b_data
, sb
->s_blocksize
<< 3, group_start
<< 3);
364 if (bit
< sb
->s_blocksize
<< 3)
368 if (i
>= (nr_groups
*2))
373 if (bit
< sb
->s_blocksize
<< 3)
376 bit
= udf_find_next_one_bit(bh
->b_data
, sb
->s_blocksize
<< 3, group_start
<< 3);
377 if (bit
>= sb
->s_blocksize
<< 3)
384 for (i
=0; i
<7 && bit
> (group_start
<< 3) && udf_test_bit(bit
- 1, bh
->b_data
); i
++, bit
--);
389 * Check quota for allocation of this block.
391 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1))
398 newblock
= bit
+ (block_group
<< (sb
->s_blocksize_bits
+ 3)) -
399 (sizeof(struct spaceBitmapDesc
) << 3);
401 if (!udf_clear_bit(bit
, bh
->b_data
))
403 udf_debug("bit already cleared for block %d\n", bit
);
407 mark_buffer_dirty(bh
);
409 if (UDF_SB_LVIDBH(sb
))
411 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
412 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-1);
413 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
426 static void udf_table_free_blocks(struct super_block
* sb
,
427 struct inode
* inode
,
428 struct inode
* table
,
429 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
432 uint32_t nextoffset
, oextoffset
, elen
;
433 kernel_lb_addr nbloc
, obloc
, eloc
;
434 struct buffer_head
*obh
, *nbh
;
439 if (bloc
.logicalBlockNum
< 0 ||
440 (bloc
.logicalBlockNum
+ count
) > UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
))
442 udf_debug("%d < %d || %d + %d > %d\n",
443 bloc
.logicalBlockNum
, 0, bloc
.logicalBlockNum
, count
,
444 UDF_SB_PARTLEN(sb
, bloc
.partitionReferenceNum
));
448 /* We do this up front - There are some error conditions that could occure,
451 DQUOT_FREE_BLOCK(inode
, count
);
452 if (UDF_SB_LVIDBH(sb
))
454 UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)] =
455 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[UDF_SB_PARTITION(sb
)])+count
);
456 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
459 start
= bloc
.logicalBlockNum
+ offset
;
460 end
= bloc
.logicalBlockNum
+ offset
+ count
- 1;
462 oextoffset
= nextoffset
= sizeof(struct unallocSpaceEntry
);
464 obloc
= nbloc
= UDF_I_LOCATION(table
);
468 while (count
&& (etype
=
469 udf_next_aext(table
, &nbloc
, &nextoffset
, &eloc
, &elen
, &nbh
, 1)) != -1)
471 if (((eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
)) ==
474 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
))
476 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
477 start
+= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
478 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
482 elen
= (etype
<< 30) |
483 (elen
+ (count
<< sb
->s_blocksize_bits
));
487 udf_write_aext(table
, obloc
, &oextoffset
, eloc
, elen
, obh
, 1);
489 else if (eloc
.logicalBlockNum
== (end
+ 1))
491 if ((0x3FFFFFFF - elen
) < (count
<< sb
->s_blocksize_bits
))
493 count
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
494 end
-= ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
495 eloc
.logicalBlockNum
-=
496 ((0x3FFFFFFF - elen
) >> sb
->s_blocksize_bits
);
497 elen
= (etype
<< 30) | (0x40000000 - sb
->s_blocksize
);
501 eloc
.logicalBlockNum
= start
;
502 elen
= (etype
<< 30) |
503 (elen
+ (count
<< sb
->s_blocksize_bits
));
507 udf_write_aext(table
, obloc
, &oextoffset
, eloc
, elen
, obh
, 1);
514 udf_release_data(obh
);
515 atomic_inc(&nbh
->b_count
);
520 oextoffset
= nextoffset
;
525 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
526 a new block, and since we hold the super block lock already
527 very bad things would happen :)
529 We copy the behavior of udf_add_aext, but instead of
530 trying to allocate a new block close to the existing one,
531 we just steal a block from the extent we are trying to add.
533 It would be nice if the blocks were close together, but it
538 short_ad
*sad
= NULL
;
540 struct allocExtDesc
*aed
;
542 eloc
.logicalBlockNum
= start
;
543 elen
= EXT_RECORDED_ALLOCATED
|
544 (count
<< sb
->s_blocksize_bits
);
546 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
547 adsize
= sizeof(short_ad
);
548 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
549 adsize
= sizeof(long_ad
);
552 udf_release_data(obh
);
553 udf_release_data(nbh
);
557 if (nextoffset
+ (2 * adsize
) > sb
->s_blocksize
)
562 udf_release_data(obh
);
565 oextoffset
= nextoffset
;
567 /* Steal a block from the extent being free'd */
568 nbloc
.logicalBlockNum
= eloc
.logicalBlockNum
;
569 eloc
.logicalBlockNum
++;
570 elen
-= sb
->s_blocksize
;
572 if (!(nbh
= udf_tread(sb
,
573 udf_get_lb_pblock(sb
, nbloc
, 0))))
575 udf_release_data(obh
);
578 aed
= (struct allocExtDesc
*)(nbh
->b_data
);
579 aed
->previousAllocExtLocation
= cpu_to_le32(obloc
.logicalBlockNum
);
580 if (nextoffset
+ adsize
> sb
->s_blocksize
)
582 loffset
= nextoffset
;
583 aed
->lengthAllocDescs
= cpu_to_le32(adsize
);
585 sptr
= UDF_I_DATA(inode
) + nextoffset
- udf_file_entry_alloc_offset(inode
) + UDF_I_LENEATTR(inode
) - adsize
;
587 sptr
= obh
->b_data
+ nextoffset
- adsize
;
588 dptr
= nbh
->b_data
+ sizeof(struct allocExtDesc
);
589 memcpy(dptr
, sptr
, adsize
);
590 nextoffset
= sizeof(struct allocExtDesc
) + adsize
;
594 loffset
= nextoffset
+ adsize
;
595 aed
->lengthAllocDescs
= cpu_to_le32(0);
596 sptr
= (obh
)->b_data
+ nextoffset
;
597 nextoffset
= sizeof(struct allocExtDesc
);
601 aed
= (struct allocExtDesc
*)(obh
)->b_data
;
602 aed
->lengthAllocDescs
=
603 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
607 UDF_I_LENALLOC(table
) += adsize
;
608 mark_inode_dirty(table
);
611 if (UDF_SB_UDFREV(sb
) >= 0x0200)
612 udf_new_tag(nbh
->b_data
, TAG_IDENT_AED
, 3, 1,
613 nbloc
.logicalBlockNum
, sizeof(tag
));
615 udf_new_tag(nbh
->b_data
, TAG_IDENT_AED
, 2, 1,
616 nbloc
.logicalBlockNum
, sizeof(tag
));
617 switch (UDF_I_ALLOCTYPE(table
))
619 case ICBTAG_FLAG_AD_SHORT
:
621 sad
= (short_ad
*)sptr
;
622 sad
->extLength
= cpu_to_le32(
623 EXT_NEXT_EXTENT_ALLOCDECS
|
625 sad
->extPosition
= cpu_to_le32(nbloc
.logicalBlockNum
);
628 case ICBTAG_FLAG_AD_LONG
:
630 lad
= (long_ad
*)sptr
;
631 lad
->extLength
= cpu_to_le32(
632 EXT_NEXT_EXTENT_ALLOCDECS
|
634 lad
->extLocation
= cpu_to_lelb(nbloc
);
640 udf_update_tag(obh
->b_data
, loffset
);
641 mark_buffer_dirty(obh
);
644 mark_inode_dirty(table
);
647 if (elen
) /* It's possible that stealing the block emptied the extent */
649 udf_write_aext(table
, nbloc
, &nextoffset
, eloc
, elen
, nbh
, 1);
653 UDF_I_LENALLOC(table
) += adsize
;
654 mark_inode_dirty(table
);
658 aed
= (struct allocExtDesc
*)nbh
->b_data
;
659 aed
->lengthAllocDescs
=
660 cpu_to_le32(le32_to_cpu(aed
->lengthAllocDescs
) + adsize
);
661 udf_update_tag(nbh
->b_data
, nextoffset
);
662 mark_buffer_dirty(nbh
);
667 udf_release_data(nbh
);
668 udf_release_data(obh
);
676 static int udf_table_prealloc_blocks(struct super_block
* sb
,
677 struct inode
* inode
,
678 struct inode
*table
, uint16_t partition
, uint32_t first_block
,
679 uint32_t block_count
)
682 uint32_t extoffset
, elen
, adsize
;
683 kernel_lb_addr bloc
, eloc
;
684 struct buffer_head
*bh
;
687 if (first_block
< 0 || first_block
>= UDF_SB_PARTLEN(sb
, partition
))
690 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
691 adsize
= sizeof(short_ad
);
692 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
693 adsize
= sizeof(long_ad
);
699 extoffset
= sizeof(struct unallocSpaceEntry
);
700 bloc
= UDF_I_LOCATION(table
);
703 eloc
.logicalBlockNum
= 0xFFFFFFFF;
705 while (first_block
!= eloc
.logicalBlockNum
&& (etype
=
706 udf_next_aext(table
, &bloc
, &extoffset
, &eloc
, &elen
, &bh
, 1)) != -1)
708 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
709 eloc
.logicalBlockNum
, elen
, first_block
);
710 ; /* empty loop body */
713 if (first_block
== eloc
.logicalBlockNum
)
717 alloc_count
= (elen
>> sb
->s_blocksize_bits
);
718 if (inode
&& DQUOT_PREALLOC_BLOCK(inode
, alloc_count
> block_count
? block_count
: alloc_count
))
720 else if (alloc_count
> block_count
)
722 alloc_count
= block_count
;
723 eloc
.logicalBlockNum
+= alloc_count
;
724 elen
-= (alloc_count
<< sb
->s_blocksize_bits
);
725 udf_write_aext(table
, bloc
, &extoffset
, eloc
, (etype
<< 30) | elen
, bh
, 1);
728 udf_delete_aext(table
, bloc
, extoffset
, eloc
, (etype
<< 30) | elen
, bh
);
733 udf_release_data(bh
);
735 if (alloc_count
&& UDF_SB_LVIDBH(sb
))
737 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
738 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-alloc_count
);
739 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
746 static int udf_table_new_block(struct super_block
* sb
,
747 struct inode
* inode
,
748 struct inode
*table
, uint16_t partition
, uint32_t goal
, int *err
)
750 uint32_t spread
= 0xFFFFFFFF, nspread
= 0xFFFFFFFF;
751 uint32_t newblock
= 0, adsize
;
752 uint32_t extoffset
, goal_extoffset
, elen
, goal_elen
= 0;
753 kernel_lb_addr bloc
, goal_bloc
, eloc
, goal_eloc
;
754 struct buffer_head
*bh
, *goal_bh
;
759 if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_SHORT
)
760 adsize
= sizeof(short_ad
);
761 else if (UDF_I_ALLOCTYPE(table
) == ICBTAG_FLAG_AD_LONG
)
762 adsize
= sizeof(long_ad
);
768 if (goal
< 0 || goal
>= UDF_SB_PARTLEN(sb
, partition
))
771 /* We search for the closest matching block to goal. If we find a exact hit,
772 we stop. Otherwise we keep going till we run out of extents.
773 We store the buffer_head, bloc, and extoffset of the current closest
774 match and use that when we are done.
777 extoffset
= sizeof(struct unallocSpaceEntry
);
778 bloc
= UDF_I_LOCATION(table
);
782 while (spread
&& (etype
=
783 udf_next_aext(table
, &bloc
, &extoffset
, &eloc
, &elen
, &bh
, 1)) != -1)
785 if (goal
>= eloc
.logicalBlockNum
)
787 if (goal
< eloc
.logicalBlockNum
+ (elen
>> sb
->s_blocksize_bits
))
790 nspread
= goal
- eloc
.logicalBlockNum
-
791 (elen
>> sb
->s_blocksize_bits
);
794 nspread
= eloc
.logicalBlockNum
- goal
;
796 if (nspread
< spread
)
801 udf_release_data(goal_bh
);
803 atomic_inc(&goal_bh
->b_count
);
806 goal_extoffset
= extoffset
- adsize
;
808 goal_elen
= (etype
<< 30) | elen
;
812 udf_release_data(bh
);
814 if (spread
== 0xFFFFFFFF)
816 udf_release_data(goal_bh
);
821 /* Only allocate blocks from the beginning of the extent.
822 That way, we only delete (empty) extents, never have to insert an
823 extent because of splitting */
824 /* This works, but very poorly.... */
826 newblock
= goal_eloc
.logicalBlockNum
;
827 goal_eloc
.logicalBlockNum
++;
828 goal_elen
-= sb
->s_blocksize
;
830 if (inode
&& DQUOT_ALLOC_BLOCK(inode
, 1))
832 udf_release_data(goal_bh
);
839 udf_write_aext(table
, goal_bloc
, &goal_extoffset
, goal_eloc
, goal_elen
, goal_bh
, 1);
841 udf_delete_aext(table
, goal_bloc
, goal_extoffset
, goal_eloc
, goal_elen
, goal_bh
);
842 udf_release_data(goal_bh
);
844 if (UDF_SB_LVIDBH(sb
))
846 UDF_SB_LVID(sb
)->freeSpaceTable
[partition
] =
847 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb
)->freeSpaceTable
[partition
])-1);
848 mark_buffer_dirty(UDF_SB_LVIDBH(sb
));
857 inline void udf_free_blocks(struct super_block
* sb
,
858 struct inode
* inode
,
859 kernel_lb_addr bloc
, uint32_t offset
, uint32_t count
)
861 uint16_t partition
= bloc
.partitionReferenceNum
;
863 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
865 return udf_bitmap_free_blocks(sb
, inode
,
866 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
867 bloc
, offset
, count
);
869 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
871 return udf_table_free_blocks(sb
, inode
,
872 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
873 bloc
, offset
, count
);
875 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
877 return udf_bitmap_free_blocks(sb
, inode
,
878 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
879 bloc
, offset
, count
);
881 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
883 return udf_table_free_blocks(sb
, inode
,
884 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
885 bloc
, offset
, count
);
891 inline int udf_prealloc_blocks(struct super_block
* sb
,
892 struct inode
* inode
,
893 uint16_t partition
, uint32_t first_block
, uint32_t block_count
)
895 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
897 return udf_bitmap_prealloc_blocks(sb
, inode
,
898 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
899 partition
, first_block
, block_count
);
901 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
903 return udf_table_prealloc_blocks(sb
, inode
,
904 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
905 partition
, first_block
, block_count
);
907 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
909 return udf_bitmap_prealloc_blocks(sb
, inode
,
910 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
911 partition
, first_block
, block_count
);
913 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
915 return udf_table_prealloc_blocks(sb
, inode
,
916 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
917 partition
, first_block
, block_count
);
923 inline int udf_new_block(struct super_block
* sb
,
924 struct inode
* inode
,
925 uint16_t partition
, uint32_t goal
, int *err
)
927 if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_BITMAP
)
929 return udf_bitmap_new_block(sb
, inode
,
930 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_bitmap
,
931 partition
, goal
, err
);
933 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_UNALLOC_TABLE
)
935 return udf_table_new_block(sb
, inode
,
936 UDF_SB_PARTMAPS(sb
)[partition
].s_uspace
.s_table
,
937 partition
, goal
, err
);
939 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_BITMAP
)
941 return udf_bitmap_new_block(sb
, inode
,
942 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_bitmap
,
943 partition
, goal
, err
);
945 else if (UDF_SB_PARTFLAGS(sb
, partition
) & UDF_PART_FLAG_FREED_TABLE
)
947 return udf_table_new_block(sb
, inode
,
948 UDF_SB_PARTMAPS(sb
)[partition
].s_fspace
.s_table
,
949 partition
, goal
, err
);