arm64: kgdb: Fix single-step exception handling oops
[linux/fpc-iii.git] / fs / udf / balloc.c
blobe0fd65fe73e82bc4efb5f3f841b6ed8d0f75f0c7
1 /*
2 * balloc.c
4 * PURPOSE
5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem.
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1999-2001 Ben Fennema
14 * (C) 1999 Stelias Computing Inc
16 * HISTORY
18 * 02/24/99 blf Created.
22 #include "udfdecl.h"
24 #include <linux/bitops.h>
26 #include "udf_i.h"
27 #include "udf_sb.h"
29 #define udf_clear_bit __test_and_clear_bit_le
30 #define udf_set_bit __test_and_set_bit_le
31 #define udf_test_bit test_bit_le
32 #define udf_find_next_one_bit find_next_bit_le
34 static int read_block_bitmap(struct super_block *sb,
35 struct udf_bitmap *bitmap, unsigned int block,
36 unsigned long bitmap_nr)
38 struct buffer_head *bh = NULL;
39 int retval = 0;
40 struct kernel_lb_addr loc;
42 loc.logicalBlockNum = bitmap->s_extPosition;
43 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
45 bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
46 if (!bh)
47 retval = -EIO;
49 bitmap->s_block_bitmap[bitmap_nr] = bh;
50 return retval;
53 static int __load_block_bitmap(struct super_block *sb,
54 struct udf_bitmap *bitmap,
55 unsigned int block_group)
57 int retval = 0;
58 int nr_groups = bitmap->s_nr_groups;
60 if (block_group >= nr_groups) {
61 udf_debug("block_group (%d) > nr_groups (%d)\n",
62 block_group, nr_groups);
65 if (bitmap->s_block_bitmap[block_group])
66 return block_group;
68 retval = read_block_bitmap(sb, bitmap, block_group, block_group);
69 if (retval < 0)
70 return retval;
72 return block_group;
75 static inline int load_block_bitmap(struct super_block *sb,
76 struct udf_bitmap *bitmap,
77 unsigned int block_group)
79 int slot;
81 slot = __load_block_bitmap(sb, bitmap, block_group);
83 if (slot < 0)
84 return slot;
86 if (!bitmap->s_block_bitmap[slot])
87 return -EIO;
89 return slot;
92 static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
94 struct udf_sb_info *sbi = UDF_SB(sb);
95 struct logicalVolIntegrityDesc *lvid;
97 if (!sbi->s_lvid_bh)
98 return;
100 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
101 le32_add_cpu(&lvid->freeSpaceTable[partition], cnt);
102 udf_updated_lvid(sb);
105 static void udf_bitmap_free_blocks(struct super_block *sb,
106 struct udf_bitmap *bitmap,
107 struct kernel_lb_addr *bloc,
108 uint32_t offset,
109 uint32_t count)
111 struct udf_sb_info *sbi = UDF_SB(sb);
112 struct buffer_head *bh = NULL;
113 struct udf_part_map *partmap;
114 unsigned long block;
115 unsigned long block_group;
116 unsigned long bit;
117 unsigned long i;
118 int bitmap_nr;
119 unsigned long overflow;
121 mutex_lock(&sbi->s_alloc_mutex);
122 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
123 if (bloc->logicalBlockNum + count < count ||
124 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
125 udf_debug("%d < %d || %d + %d > %d\n",
126 bloc->logicalBlockNum, 0,
127 bloc->logicalBlockNum, count,
128 partmap->s_partition_len);
129 goto error_return;
132 block = bloc->logicalBlockNum + offset +
133 (sizeof(struct spaceBitmapDesc) << 3);
135 do {
136 overflow = 0;
137 block_group = block >> (sb->s_blocksize_bits + 3);
138 bit = block % (sb->s_blocksize << 3);
141 * Check to see if we are freeing blocks across a group boundary.
143 if (bit + count > (sb->s_blocksize << 3)) {
144 overflow = bit + count - (sb->s_blocksize << 3);
145 count -= overflow;
147 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
148 if (bitmap_nr < 0)
149 goto error_return;
151 bh = bitmap->s_block_bitmap[bitmap_nr];
152 for (i = 0; i < count; i++) {
153 if (udf_set_bit(bit + i, bh->b_data)) {
154 udf_debug("bit %ld already set\n", bit + i);
155 udf_debug("byte=%2x\n",
156 ((char *)bh->b_data)[(bit + i) >> 3]);
159 udf_add_free_space(sb, sbi->s_partition, count);
160 mark_buffer_dirty(bh);
161 if (overflow) {
162 block += count;
163 count = overflow;
165 } while (overflow);
167 error_return:
168 mutex_unlock(&sbi->s_alloc_mutex);
171 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
172 struct udf_bitmap *bitmap,
173 uint16_t partition, uint32_t first_block,
174 uint32_t block_count)
176 struct udf_sb_info *sbi = UDF_SB(sb);
177 int alloc_count = 0;
178 int bit, block, block_group, group_start;
179 int nr_groups, bitmap_nr;
180 struct buffer_head *bh;
181 __u32 part_len;
183 mutex_lock(&sbi->s_alloc_mutex);
184 part_len = sbi->s_partmaps[partition].s_partition_len;
185 if (first_block >= part_len)
186 goto out;
188 if (first_block + block_count > part_len)
189 block_count = part_len - first_block;
191 do {
192 nr_groups = udf_compute_nr_groups(sb, partition);
193 block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
194 block_group = block >> (sb->s_blocksize_bits + 3);
195 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
197 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
198 if (bitmap_nr < 0)
199 goto out;
200 bh = bitmap->s_block_bitmap[bitmap_nr];
202 bit = block % (sb->s_blocksize << 3);
204 while (bit < (sb->s_blocksize << 3) && block_count > 0) {
205 if (!udf_clear_bit(bit, bh->b_data))
206 goto out;
207 block_count--;
208 alloc_count++;
209 bit++;
210 block++;
212 mark_buffer_dirty(bh);
213 } while (block_count > 0);
215 out:
216 udf_add_free_space(sb, partition, -alloc_count);
217 mutex_unlock(&sbi->s_alloc_mutex);
218 return alloc_count;
221 static int udf_bitmap_new_block(struct super_block *sb,
222 struct udf_bitmap *bitmap, uint16_t partition,
223 uint32_t goal, int *err)
225 struct udf_sb_info *sbi = UDF_SB(sb);
226 int newbit, bit = 0, block, block_group, group_start;
227 int end_goal, nr_groups, bitmap_nr, i;
228 struct buffer_head *bh = NULL;
229 char *ptr;
230 int newblock = 0;
232 *err = -ENOSPC;
233 mutex_lock(&sbi->s_alloc_mutex);
235 repeat:
236 if (goal >= sbi->s_partmaps[partition].s_partition_len)
237 goal = 0;
239 nr_groups = bitmap->s_nr_groups;
240 block = goal + (sizeof(struct spaceBitmapDesc) << 3);
241 block_group = block >> (sb->s_blocksize_bits + 3);
242 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
244 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
245 if (bitmap_nr < 0)
246 goto error_return;
247 bh = bitmap->s_block_bitmap[bitmap_nr];
248 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
249 sb->s_blocksize - group_start);
251 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
252 bit = block % (sb->s_blocksize << 3);
253 if (udf_test_bit(bit, bh->b_data))
254 goto got_block;
256 end_goal = (bit + 63) & ~63;
257 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
258 if (bit < end_goal)
259 goto got_block;
261 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
262 sb->s_blocksize - ((bit + 7) >> 3));
263 newbit = (ptr - ((char *)bh->b_data)) << 3;
264 if (newbit < sb->s_blocksize << 3) {
265 bit = newbit;
266 goto search_back;
269 newbit = udf_find_next_one_bit(bh->b_data,
270 sb->s_blocksize << 3, bit);
271 if (newbit < sb->s_blocksize << 3) {
272 bit = newbit;
273 goto got_block;
277 for (i = 0; i < (nr_groups * 2); i++) {
278 block_group++;
279 if (block_group >= nr_groups)
280 block_group = 0;
281 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
283 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
284 if (bitmap_nr < 0)
285 goto error_return;
286 bh = bitmap->s_block_bitmap[bitmap_nr];
287 if (i < nr_groups) {
288 ptr = memscan((char *)bh->b_data + group_start, 0xFF,
289 sb->s_blocksize - group_start);
290 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
291 bit = (ptr - ((char *)bh->b_data)) << 3;
292 break;
294 } else {
295 bit = udf_find_next_one_bit(bh->b_data,
296 sb->s_blocksize << 3,
297 group_start << 3);
298 if (bit < sb->s_blocksize << 3)
299 break;
302 if (i >= (nr_groups * 2)) {
303 mutex_unlock(&sbi->s_alloc_mutex);
304 return newblock;
306 if (bit < sb->s_blocksize << 3)
307 goto search_back;
308 else
309 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
310 group_start << 3);
311 if (bit >= sb->s_blocksize << 3) {
312 mutex_unlock(&sbi->s_alloc_mutex);
313 return 0;
316 search_back:
317 i = 0;
318 while (i < 7 && bit > (group_start << 3) &&
319 udf_test_bit(bit - 1, bh->b_data)) {
320 ++i;
321 --bit;
324 got_block:
325 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
326 (sizeof(struct spaceBitmapDesc) << 3);
328 if (!udf_clear_bit(bit, bh->b_data)) {
329 udf_debug("bit already cleared for block %d\n", bit);
330 goto repeat;
333 mark_buffer_dirty(bh);
335 udf_add_free_space(sb, partition, -1);
336 mutex_unlock(&sbi->s_alloc_mutex);
337 *err = 0;
338 return newblock;
340 error_return:
341 *err = -EIO;
342 mutex_unlock(&sbi->s_alloc_mutex);
343 return 0;
346 static void udf_table_free_blocks(struct super_block *sb,
347 struct inode *table,
348 struct kernel_lb_addr *bloc,
349 uint32_t offset,
350 uint32_t count)
352 struct udf_sb_info *sbi = UDF_SB(sb);
353 struct udf_part_map *partmap;
354 uint32_t start, end;
355 uint32_t elen;
356 struct kernel_lb_addr eloc;
357 struct extent_position oepos, epos;
358 int8_t etype;
359 struct udf_inode_info *iinfo;
361 mutex_lock(&sbi->s_alloc_mutex);
362 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
363 if (bloc->logicalBlockNum + count < count ||
364 (bloc->logicalBlockNum + count) > partmap->s_partition_len) {
365 udf_debug("%d < %d || %d + %d > %d\n",
366 bloc->logicalBlockNum, 0,
367 bloc->logicalBlockNum, count,
368 partmap->s_partition_len);
369 goto error_return;
372 iinfo = UDF_I(table);
373 udf_add_free_space(sb, sbi->s_partition, count);
375 start = bloc->logicalBlockNum + offset;
376 end = bloc->logicalBlockNum + offset + count - 1;
378 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
379 elen = 0;
380 epos.block = oepos.block = iinfo->i_location;
381 epos.bh = oepos.bh = NULL;
383 while (count &&
384 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
385 if (((eloc.logicalBlockNum +
386 (elen >> sb->s_blocksize_bits)) == start)) {
387 if ((0x3FFFFFFF - elen) <
388 (count << sb->s_blocksize_bits)) {
389 uint32_t tmp = ((0x3FFFFFFF - elen) >>
390 sb->s_blocksize_bits);
391 count -= tmp;
392 start += tmp;
393 elen = (etype << 30) |
394 (0x40000000 - sb->s_blocksize);
395 } else {
396 elen = (etype << 30) |
397 (elen +
398 (count << sb->s_blocksize_bits));
399 start += count;
400 count = 0;
402 udf_write_aext(table, &oepos, &eloc, elen, 1);
403 } else if (eloc.logicalBlockNum == (end + 1)) {
404 if ((0x3FFFFFFF - elen) <
405 (count << sb->s_blocksize_bits)) {
406 uint32_t tmp = ((0x3FFFFFFF - elen) >>
407 sb->s_blocksize_bits);
408 count -= tmp;
409 end -= tmp;
410 eloc.logicalBlockNum -= tmp;
411 elen = (etype << 30) |
412 (0x40000000 - sb->s_blocksize);
413 } else {
414 eloc.logicalBlockNum = start;
415 elen = (etype << 30) |
416 (elen +
417 (count << sb->s_blocksize_bits));
418 end -= count;
419 count = 0;
421 udf_write_aext(table, &oepos, &eloc, elen, 1);
424 if (epos.bh != oepos.bh) {
425 oepos.block = epos.block;
426 brelse(oepos.bh);
427 get_bh(epos.bh);
428 oepos.bh = epos.bh;
429 oepos.offset = 0;
430 } else {
431 oepos.offset = epos.offset;
435 if (count) {
437 * NOTE: we CANNOT use udf_add_aext here, as it can try to
438 * allocate a new block, and since we hold the super block
439 * lock already very bad things would happen :)
441 * We copy the behavior of udf_add_aext, but instead of
442 * trying to allocate a new block close to the existing one,
443 * we just steal a block from the extent we are trying to add.
445 * It would be nice if the blocks were close together, but it
446 * isn't required.
449 int adsize;
451 eloc.logicalBlockNum = start;
452 elen = EXT_RECORDED_ALLOCATED |
453 (count << sb->s_blocksize_bits);
455 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
456 adsize = sizeof(struct short_ad);
457 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
458 adsize = sizeof(struct long_ad);
459 else {
460 brelse(oepos.bh);
461 brelse(epos.bh);
462 goto error_return;
465 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
466 /* Steal a block from the extent being free'd */
467 udf_setup_indirect_aext(table, eloc.logicalBlockNum,
468 &epos);
470 eloc.logicalBlockNum++;
471 elen -= sb->s_blocksize;
474 /* It's possible that stealing the block emptied the extent */
475 if (elen)
476 __udf_add_aext(table, &epos, &eloc, elen, 1);
479 brelse(epos.bh);
480 brelse(oepos.bh);
482 error_return:
483 mutex_unlock(&sbi->s_alloc_mutex);
484 return;
487 static int udf_table_prealloc_blocks(struct super_block *sb,
488 struct inode *table, uint16_t partition,
489 uint32_t first_block, uint32_t block_count)
491 struct udf_sb_info *sbi = UDF_SB(sb);
492 int alloc_count = 0;
493 uint32_t elen, adsize;
494 struct kernel_lb_addr eloc;
495 struct extent_position epos;
496 int8_t etype = -1;
497 struct udf_inode_info *iinfo;
499 if (first_block >= sbi->s_partmaps[partition].s_partition_len)
500 return 0;
502 iinfo = UDF_I(table);
503 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
504 adsize = sizeof(struct short_ad);
505 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
506 adsize = sizeof(struct long_ad);
507 else
508 return 0;
510 mutex_lock(&sbi->s_alloc_mutex);
511 epos.offset = sizeof(struct unallocSpaceEntry);
512 epos.block = iinfo->i_location;
513 epos.bh = NULL;
514 eloc.logicalBlockNum = 0xFFFFFFFF;
516 while (first_block != eloc.logicalBlockNum &&
517 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
518 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
519 eloc.logicalBlockNum, elen, first_block);
520 ; /* empty loop body */
523 if (first_block == eloc.logicalBlockNum) {
524 epos.offset -= adsize;
526 alloc_count = (elen >> sb->s_blocksize_bits);
527 if (alloc_count > block_count) {
528 alloc_count = block_count;
529 eloc.logicalBlockNum += alloc_count;
530 elen -= (alloc_count << sb->s_blocksize_bits);
531 udf_write_aext(table, &epos, &eloc,
532 (etype << 30) | elen, 1);
533 } else
534 udf_delete_aext(table, epos, eloc,
535 (etype << 30) | elen);
536 } else {
537 alloc_count = 0;
540 brelse(epos.bh);
542 if (alloc_count)
543 udf_add_free_space(sb, partition, -alloc_count);
544 mutex_unlock(&sbi->s_alloc_mutex);
545 return alloc_count;
548 static int udf_table_new_block(struct super_block *sb,
549 struct inode *table, uint16_t partition,
550 uint32_t goal, int *err)
552 struct udf_sb_info *sbi = UDF_SB(sb);
553 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
554 uint32_t newblock = 0, adsize;
555 uint32_t elen, goal_elen = 0;
556 struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
557 struct extent_position epos, goal_epos;
558 int8_t etype;
559 struct udf_inode_info *iinfo = UDF_I(table);
561 *err = -ENOSPC;
563 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
564 adsize = sizeof(struct short_ad);
565 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
566 adsize = sizeof(struct long_ad);
567 else
568 return newblock;
570 mutex_lock(&sbi->s_alloc_mutex);
571 if (goal >= sbi->s_partmaps[partition].s_partition_len)
572 goal = 0;
574 /* We search for the closest matching block to goal. If we find
575 a exact hit, we stop. Otherwise we keep going till we run out
576 of extents. We store the buffer_head, bloc, and extoffset
577 of the current closest match and use that when we are done.
579 epos.offset = sizeof(struct unallocSpaceEntry);
580 epos.block = iinfo->i_location;
581 epos.bh = goal_epos.bh = NULL;
583 while (spread &&
584 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
585 if (goal >= eloc.logicalBlockNum) {
586 if (goal < eloc.logicalBlockNum +
587 (elen >> sb->s_blocksize_bits))
588 nspread = 0;
589 else
590 nspread = goal - eloc.logicalBlockNum -
591 (elen >> sb->s_blocksize_bits);
592 } else {
593 nspread = eloc.logicalBlockNum - goal;
596 if (nspread < spread) {
597 spread = nspread;
598 if (goal_epos.bh != epos.bh) {
599 brelse(goal_epos.bh);
600 goal_epos.bh = epos.bh;
601 get_bh(goal_epos.bh);
603 goal_epos.block = epos.block;
604 goal_epos.offset = epos.offset - adsize;
605 goal_eloc = eloc;
606 goal_elen = (etype << 30) | elen;
610 brelse(epos.bh);
612 if (spread == 0xFFFFFFFF) {
613 brelse(goal_epos.bh);
614 mutex_unlock(&sbi->s_alloc_mutex);
615 return 0;
618 /* Only allocate blocks from the beginning of the extent.
619 That way, we only delete (empty) extents, never have to insert an
620 extent because of splitting */
621 /* This works, but very poorly.... */
623 newblock = goal_eloc.logicalBlockNum;
624 goal_eloc.logicalBlockNum++;
625 goal_elen -= sb->s_blocksize;
627 if (goal_elen)
628 udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
629 else
630 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
631 brelse(goal_epos.bh);
633 udf_add_free_space(sb, partition, -1);
635 mutex_unlock(&sbi->s_alloc_mutex);
636 *err = 0;
637 return newblock;
640 void udf_free_blocks(struct super_block *sb, struct inode *inode,
641 struct kernel_lb_addr *bloc, uint32_t offset,
642 uint32_t count)
644 uint16_t partition = bloc->partitionReferenceNum;
645 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
647 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
648 udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
649 bloc, offset, count);
650 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
651 udf_table_free_blocks(sb, map->s_uspace.s_table,
652 bloc, offset, count);
653 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
654 udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
655 bloc, offset, count);
656 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
657 udf_table_free_blocks(sb, map->s_fspace.s_table,
658 bloc, offset, count);
661 if (inode) {
662 inode_sub_bytes(inode,
663 ((sector_t)count) << sb->s_blocksize_bits);
667 inline int udf_prealloc_blocks(struct super_block *sb,
668 struct inode *inode,
669 uint16_t partition, uint32_t first_block,
670 uint32_t block_count)
672 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
673 int allocated;
675 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
676 allocated = udf_bitmap_prealloc_blocks(sb,
677 map->s_uspace.s_bitmap,
678 partition, first_block,
679 block_count);
680 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
681 allocated = udf_table_prealloc_blocks(sb,
682 map->s_uspace.s_table,
683 partition, first_block,
684 block_count);
685 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
686 allocated = udf_bitmap_prealloc_blocks(sb,
687 map->s_fspace.s_bitmap,
688 partition, first_block,
689 block_count);
690 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
691 allocated = udf_table_prealloc_blocks(sb,
692 map->s_fspace.s_table,
693 partition, first_block,
694 block_count);
695 else
696 return 0;
698 if (inode && allocated > 0)
699 inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
700 return allocated;
703 inline int udf_new_block(struct super_block *sb,
704 struct inode *inode,
705 uint16_t partition, uint32_t goal, int *err)
707 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
708 int block;
710 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
711 block = udf_bitmap_new_block(sb,
712 map->s_uspace.s_bitmap,
713 partition, goal, err);
714 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
715 block = udf_table_new_block(sb,
716 map->s_uspace.s_table,
717 partition, goal, err);
718 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
719 block = udf_bitmap_new_block(sb,
720 map->s_fspace.s_bitmap,
721 partition, goal, err);
722 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
723 block = udf_table_new_block(sb,
724 map->s_fspace.s_table,
725 partition, goal, err);
726 else {
727 *err = -EIO;
728 return 0;
730 if (inode && block)
731 inode_add_bytes(inode, sb->s_blocksize);
732 return block;