1 /* $NetBSD: udf_allocation.c,v 1.26 2009/06/24 17:09:13 reinoud Exp $ */
4 * Copyright (c) 2006, 2008 Reinoud Zandijk
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: udf_allocation.c,v 1.26 2009/06/24 17:09:13 reinoud Exp $");
35 #if defined(_KERNEL_OPT)
36 #include "opt_compat_netbsd.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/sysctl.h>
43 #include <sys/namei.h>
45 #include <sys/kernel.h>
46 #include <sys/vnode.h>
47 #include <miscfs/genfs/genfs_node.h>
48 #include <sys/mount.h>
51 #include <sys/device.h>
52 #include <sys/disklabel.h>
53 #include <sys/ioctl.h>
54 #include <sys/malloc.h>
55 #include <sys/dirent.h>
58 #include <sys/kauth.h>
59 #include <sys/kthread.h>
60 #include <dev/clock_subr.h>
62 #include <fs/udf/ecma167-udf.h>
63 #include <fs/udf/udf_mount.h>
67 #include "udf_bswap.h"
70 #define VTOI(vnode) ((struct udf_node *) vnode->v_data)
72 static void udf_record_allocation_in_node(struct udf_mount
*ump
,
73 struct buf
*buf
, uint16_t vpart_num
, uint64_t *mapping
,
74 struct long_ad
*node_ad_cpy
);
77 * IDEA/BUSY: Each udf_node gets its own extentwalker state for all operations;
78 * this will hopefully/likely reduce O(nlog(n)) to O(1) for most functionality
79 * since actions are most likely sequencial and thus seeking doesn't need
80 * searching for the same or adjacent position again.
83 /* --------------------------------------------------------------------- */
88 udf_node_dump(struct udf_node
*udf_node
) {
89 struct file_entry
*fe
;
90 struct extfile_entry
*efe
;
91 struct icb_tag
*icbtag
;
94 uint32_t icbflags
, addr_type
;
98 int lb_size
, eof
, slot
;
100 if ((udf_verbose
& UDF_DEBUG_NODEDUMP
) == 0)
103 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
108 icbtag
= &fe
->icbtag
;
109 inflen
= udf_rw64(fe
->inf_len
);
111 icbtag
= &efe
->icbtag
;
112 inflen
= udf_rw64(efe
->inf_len
);
115 icbflags
= udf_rw16(icbtag
->flags
);
116 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
118 printf("udf_node_dump %p :\n", udf_node
);
120 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
121 printf("\tIntern alloc, len = %"PRIu64
"\n", inflen
);
125 printf("\tInflen = %"PRIu64
"\n", inflen
);
130 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
133 part_num
= udf_rw16(s_ad
.loc
.part_num
);
134 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
135 len
= udf_rw32(s_ad
.len
);
136 flags
= UDF_EXT_FLAGS(len
);
137 len
= UDF_EXT_LEN(len
);
141 printf("part %d, ", part_num
);
142 printf("lb_num %d, len %d", lb_num
, len
);
144 printf(", flags %d", flags
>>30);
147 if (flags
== UDF_EXT_REDIRECT
) {
148 printf("\n\textent END\n\tallocation extent\n\t\t");
153 printf("\n\tl_ad END\n\n");
156 #define udf_node_dump(a)
161 udf_assert_allocated(struct udf_mount
*ump
, uint16_t vpart_num
,
162 uint32_t lb_num
, uint32_t num_lb
)
164 struct udf_bitmap
*bitmap
;
165 struct part_desc
*pdesc
;
173 DPRINTF(PARANOIA
, ("udf_assert_allocated: check virt lbnum %d "
174 "part %d + %d sect\n", lb_num
, vpart_num
, num_lb
));
176 /* get partition backing up this vpart_num */
177 pdesc
= ump
->partitions
[ump
->vtop
[vpart_num
]];
179 switch (ump
->vtop_tp
[vpart_num
]) {
180 case UDF_VTOP_TYPE_PHYS
:
181 case UDF_VTOP_TYPE_SPARABLE
:
182 /* free space to freed or unallocated space bitmap */
183 ptov
= udf_rw32(pdesc
->start_loc
);
184 phys_part
= ump
->vtop
[vpart_num
];
186 /* use unallocated bitmap */
187 bitmap
= &ump
->part_unalloc_bits
[phys_part
];
189 /* if no bitmaps are defined, bail out */
190 if (bitmap
->bits
== NULL
)
194 KASSERT(bitmap
->bits
);
196 bpos
= bitmap
->bits
+ lb_num
/8;
200 DPRINTF(PARANOIA
, ("XXX : check %d, %p, bit %d\n",
202 KASSERT(bitmap
->bits
+ lb_num
/8 == bpos
);
203 if (*bpos
& bitval
) {
204 printf("\tlb_num %d is NOT marked busy\n",
218 case UDF_VTOP_TYPE_VIRT
:
219 /* TODO check space */
220 KASSERT(num_lb
== 1);
222 case UDF_VTOP_TYPE_META
:
223 /* TODO check space in the metadata bitmap */
225 /* not implemented */
232 udf_node_sanity_check(struct udf_node
*udf_node
,
233 uint64_t *cnt_inflen
, uint64_t *cnt_logblksrec
)
236 struct file_entry
*fe
;
237 struct extfile_entry
*efe
;
238 struct icb_tag
*icbtag
;
240 uint64_t inflen
, logblksrec
;
241 uint32_t icbflags
, addr_type
;
242 uint32_t len
, lb_num
, l_ea
, l_ad
, max_l_ad
;
245 int dscr_size
, lb_size
, flags
, whole_lb
;
248 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
251 udf_node_dump(udf_node
);
253 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
258 dscr
= (union dscrptr
*) fe
;
259 icbtag
= &fe
->icbtag
;
260 inflen
= udf_rw64(fe
->inf_len
);
261 dscr_size
= sizeof(struct file_entry
) -1;
262 logblksrec
= udf_rw64(fe
->logblks_rec
);
263 l_ad
= udf_rw32(fe
->l_ad
);
264 l_ea
= udf_rw32(fe
->l_ea
);
266 dscr
= (union dscrptr
*) efe
;
267 icbtag
= &efe
->icbtag
;
268 inflen
= udf_rw64(efe
->inf_len
);
269 dscr_size
= sizeof(struct extfile_entry
) -1;
270 logblksrec
= udf_rw64(efe
->logblks_rec
);
271 l_ad
= udf_rw32(efe
->l_ad
);
272 l_ea
= udf_rw32(efe
->l_ea
);
274 data_pos
= (uint8_t *) dscr
+ dscr_size
+ l_ea
;
275 max_l_ad
= lb_size
- dscr_size
- l_ea
;
276 icbflags
= udf_rw16(icbtag
->flags
);
277 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
279 /* check if tail is zero */
280 DPRINTF(PARANOIA
, ("Sanity check blank tail\n"));
281 for (i
= l_ad
; i
< max_l_ad
; i
++) {
282 if (data_pos
[i
] != 0)
283 printf( "sanity_check: violation: node byte %d "
284 "has value %d\n", i
, data_pos
[i
]);
291 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
292 KASSERT(l_ad
<= max_l_ad
);
293 KASSERT(l_ad
== inflen
);
294 *cnt_inflen
= inflen
;
302 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
305 KASSERT(whole_lb
== 1);
307 part_num
= udf_rw16(s_ad
.loc
.part_num
);
308 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
309 len
= udf_rw32(s_ad
.len
);
310 flags
= UDF_EXT_FLAGS(len
);
311 len
= UDF_EXT_LEN(len
);
313 if (flags
!= UDF_EXT_REDIRECT
) {
315 if (flags
== UDF_EXT_ALLOCATED
) {
316 *cnt_logblksrec
+= (len
+ lb_size
-1) / lb_size
;
319 KASSERT(len
== lb_size
);
321 /* check allocation */
322 if (flags
== UDF_EXT_ALLOCATED
)
323 udf_assert_allocated(udf_node
->ump
, part_num
, lb_num
,
324 (len
+ lb_size
- 1) / lb_size
);
327 whole_lb
= ((len
% lb_size
) == 0);
331 /* rest should be zero (ad_off > l_ad < max_l_ad - adlen) */
333 KASSERT(*cnt_inflen
== inflen
);
334 KASSERT(*cnt_logblksrec
== logblksrec
);
336 // KASSERT(mutex_owned(&udf_node->ump->allocate_mutex));
340 udf_node_sanity_check(struct udf_node
*udf_node
,
341 uint64_t *cnt_inflen
, uint64_t *cnt_logblksrec
) {
342 struct file_entry
*fe
;
343 struct extfile_entry
*efe
;
344 struct icb_tag
*icbtag
;
345 uint64_t inflen
, logblksrec
;
346 int dscr_size
, lb_size
;
348 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
353 icbtag
= &fe
->icbtag
;
354 inflen
= udf_rw64(fe
->inf_len
);
355 dscr_size
= sizeof(struct file_entry
) -1;
356 logblksrec
= udf_rw64(fe
->logblks_rec
);
358 icbtag
= &efe
->icbtag
;
359 inflen
= udf_rw64(efe
->inf_len
);
360 dscr_size
= sizeof(struct extfile_entry
) -1;
361 logblksrec
= udf_rw64(efe
->logblks_rec
);
363 *cnt_logblksrec
= logblksrec
;
364 *cnt_inflen
= inflen
;
368 /* --------------------------------------------------------------------- */
371 udf_calc_freespace(struct udf_mount
*ump
, uint64_t *sizeblks
, uint64_t *freeblks
)
373 struct logvol_int_desc
*lvid
;
374 uint32_t *pos1
, *pos2
;
375 int vpart
, num_vpart
;
377 lvid
= ump
->logvol_integrity
;
378 *freeblks
= *sizeblks
= 0;
381 * Sequentials media report free space directly (CD/DVD/BD-R), for the
382 * other media we need the logical volume integrity.
384 * We sum all free space up here regardless of type.
388 num_vpart
= udf_rw32(lvid
->num_part
);
390 if (ump
->discinfo
.mmc_cur
& MMC_CAP_SEQUENTIAL
) {
391 /* use track info directly summing if there are 2 open */
392 /* XXX assumption at most two tracks open */
393 *freeblks
= ump
->data_track
.free_blocks
;
394 if (ump
->data_track
.tracknr
!= ump
->metadata_track
.tracknr
)
395 *freeblks
+= ump
->metadata_track
.free_blocks
;
396 *sizeblks
= ump
->discinfo
.last_possible_lba
;
398 /* free and used space for mountpoint based on logvol integrity */
399 for (vpart
= 0; vpart
< num_vpart
; vpart
++) {
400 pos1
= &lvid
->tables
[0] + vpart
;
401 pos2
= &lvid
->tables
[0] + num_vpart
+ vpart
;
402 if (udf_rw32(*pos1
) != (uint32_t) -1) {
403 *freeblks
+= udf_rw32(*pos1
);
404 *sizeblks
+= udf_rw32(*pos2
);
408 /* adjust for accounted uncommitted blocks */
409 for (vpart
= 0; vpart
< num_vpart
; vpart
++)
410 *freeblks
-= ump
->uncommitted_lbs
[vpart
];
412 if (*freeblks
> UDF_DISC_SLACK
) {
413 *freeblks
-= UDF_DISC_SLACK
;
421 udf_calc_vpart_freespace(struct udf_mount
*ump
, uint16_t vpart_num
, uint64_t *freeblks
)
423 struct logvol_int_desc
*lvid
;
426 lvid
= ump
->logvol_integrity
;
430 * Sequentials media report free space directly (CD/DVD/BD-R), for the
431 * other media we need the logical volume integrity.
433 * We sum all free space up here regardless of type.
437 if (ump
->discinfo
.mmc_cur
& MMC_CAP_SEQUENTIAL
) {
438 /* XXX assumption at most two tracks open */
439 if (vpart_num
== ump
->data_part
) {
440 *freeblks
= ump
->data_track
.free_blocks
;
442 *freeblks
= ump
->metadata_track
.free_blocks
;
445 /* free and used space for mountpoint based on logvol integrity */
446 pos1
= &lvid
->tables
[0] + vpart_num
;
447 if (udf_rw32(*pos1
) != (uint32_t) -1)
448 *freeblks
+= udf_rw32(*pos1
);
451 /* adjust for accounted uncommitted blocks */
452 *freeblks
-= ump
->uncommitted_lbs
[vpart_num
];
455 /* --------------------------------------------------------------------- */
458 udf_translate_vtop(struct udf_mount
*ump
, struct long_ad
*icb_loc
,
459 uint32_t *lb_numres
, uint32_t *extres
)
461 struct part_desc
*pdesc
;
462 struct spare_map_entry
*sme
;
463 struct long_ad s_icb_loc
;
464 uint64_t foffset
, end_foffset
;
465 uint32_t lb_size
, len
;
466 uint32_t lb_num
, lb_rel
, lb_packet
;
467 uint32_t udf_rw32_lbmap
, ext_offset
;
469 int rel
, part
, error
, eof
, slot
, flags
;
471 assert(ump
&& icb_loc
&& lb_numres
);
473 vpart
= udf_rw16(icb_loc
->loc
.part_num
);
474 lb_num
= udf_rw32(icb_loc
->loc
.lb_num
);
475 if (vpart
> UDF_VTOP_RAWPART
)
479 part
= ump
->vtop
[vpart
];
480 pdesc
= ump
->partitions
[part
];
482 switch (ump
->vtop_tp
[vpart
]) {
483 case UDF_VTOP_TYPE_RAW
:
484 /* 1:1 to the end of the device */
488 case UDF_VTOP_TYPE_PHYS
:
489 /* transform into its disc logical block */
490 if (lb_num
> udf_rw32(pdesc
->part_len
))
492 *lb_numres
= lb_num
+ udf_rw32(pdesc
->start_loc
);
494 /* extent from here to the end of the partition */
495 *extres
= udf_rw32(pdesc
->part_len
) - lb_num
;
497 case UDF_VTOP_TYPE_VIRT
:
498 /* only maps one logical block, lookup in VAT */
499 if (lb_num
>= ump
->vat_entries
) /* XXX > or >= ? */
502 /* lookup in virtual allocation table file */
503 mutex_enter(&ump
->allocate_mutex
);
504 error
= udf_vat_read(ump
->vat_node
,
505 (uint8_t *) &udf_rw32_lbmap
, 4,
506 ump
->vat_offset
+ lb_num
* 4);
507 mutex_exit(&ump
->allocate_mutex
);
512 lb_num
= udf_rw32(udf_rw32_lbmap
);
514 /* transform into its disc logical block */
515 if (lb_num
> udf_rw32(pdesc
->part_len
))
517 *lb_numres
= lb_num
+ udf_rw32(pdesc
->start_loc
);
519 /* just one logical block */
522 case UDF_VTOP_TYPE_SPARABLE
:
523 /* check if the packet containing the lb_num is remapped */
524 lb_packet
= lb_num
/ ump
->sparable_packet_size
;
525 lb_rel
= lb_num
% ump
->sparable_packet_size
;
527 for (rel
= 0; rel
< udf_rw16(ump
->sparing_table
->rt_l
); rel
++) {
528 sme
= &ump
->sparing_table
->entries
[rel
];
529 if (lb_packet
== udf_rw32(sme
->org
)) {
530 /* NOTE maps to absolute disc logical block! */
531 *lb_numres
= udf_rw32(sme
->map
) + lb_rel
;
532 *extres
= ump
->sparable_packet_size
- lb_rel
;
537 /* transform into its disc logical block */
538 if (lb_num
> udf_rw32(pdesc
->part_len
))
540 *lb_numres
= lb_num
+ udf_rw32(pdesc
->start_loc
);
543 *extres
= ump
->sparable_packet_size
- lb_rel
;
545 case UDF_VTOP_TYPE_META
:
546 /* we have to look into the file's allocation descriptors */
548 /* use metadatafile allocation mutex */
549 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
551 UDF_LOCK_NODE(ump
->metadata_node
, 0);
553 /* get first overlapping extent */
557 udf_get_adslot(ump
->metadata_node
,
558 slot
, &s_icb_loc
, &eof
);
559 DPRINTF(ADWLK
, ("slot %d, eof = %d, flags = %d, "
560 "len = %d, lb_num = %d, part = %d\n",
562 UDF_EXT_FLAGS(udf_rw32(s_icb_loc
.len
)),
563 UDF_EXT_LEN(udf_rw32(s_icb_loc
.len
)),
564 udf_rw32(s_icb_loc
.loc
.lb_num
),
565 udf_rw16(s_icb_loc
.loc
.part_num
)));
568 ("Meta partition translation "
569 "failed: can't seek location\n"));
570 UDF_UNLOCK_NODE(ump
->metadata_node
, 0);
573 len
= udf_rw32(s_icb_loc
.len
);
574 flags
= UDF_EXT_FLAGS(len
);
575 len
= UDF_EXT_LEN(len
);
577 if (flags
== UDF_EXT_REDIRECT
) {
582 end_foffset
= foffset
+ len
;
584 if (end_foffset
> lb_num
* lb_size
)
586 foffset
= end_foffset
;
589 /* found overlapping slot */
590 ext_offset
= lb_num
* lb_size
- foffset
;
592 /* process extent offset */
593 lb_num
= udf_rw32(s_icb_loc
.loc
.lb_num
);
594 vpart
= udf_rw16(s_icb_loc
.loc
.part_num
);
595 lb_num
+= (ext_offset
+ lb_size
-1) / lb_size
;
598 UDF_UNLOCK_NODE(ump
->metadata_node
, 0);
599 if (flags
!= UDF_EXT_ALLOCATED
) {
600 DPRINTF(TRANSLATE
, ("Metadata partition translation "
601 "failed: not allocated\n"));
606 * vpart and lb_num are updated, translate again since we
607 * might be mapped on sparable media
609 goto translate_again
;
611 printf("UDF vtop translation scheme %d unimplemented yet\n",
612 ump
->vtop_tp
[vpart
]);
619 /* XXX provisional primitive braindead version */
620 /* TODO use ext_res */
622 udf_translate_vtop_list(struct udf_mount
*ump
, uint32_t sectors
,
623 uint16_t vpart_num
, uint64_t *lmapping
, uint64_t *pmapping
)
626 uint32_t lb_numres
, ext_res
;
629 for (sector
= 0; sector
< sectors
; sector
++) {
630 memset(&loc
, 0, sizeof(struct long_ad
));
631 loc
.loc
.part_num
= udf_rw16(vpart_num
);
632 loc
.loc
.lb_num
= udf_rw32(*lmapping
);
633 udf_translate_vtop(ump
, &loc
, &lb_numres
, &ext_res
);
634 *pmapping
= lb_numres
;
635 lmapping
++; pmapping
++;
640 /* --------------------------------------------------------------------- */
643 * Translate an extent (in logical_blocks) into logical block numbers; used
644 * for read and write operations. DOESNT't check extents.
648 udf_translate_file_extent(struct udf_node
*udf_node
,
649 uint32_t from
, uint32_t num_lb
,
652 struct udf_mount
*ump
;
653 struct icb_tag
*icbtag
;
654 struct long_ad t_ad
, s_ad
;
656 uint64_t foffset
, end_foffset
;
660 uint32_t lb_num
, len
;
661 uint32_t overlap
, translen
;
663 int eof
, error
, flags
;
664 int slot
, addr_type
, icbflags
;
671 UDF_LOCK_NODE(udf_node
, 0);
673 /* initialise derivative vars */
675 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
678 icbtag
= &udf_node
->fe
->icbtag
;
680 icbtag
= &udf_node
->efe
->icbtag
;
682 icbflags
= udf_rw16(icbtag
->flags
);
683 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
686 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
687 *map
= UDF_TRANS_INTERN
;
688 UDF_UNLOCK_NODE(udf_node
, 0);
692 /* find first overlapping extent */
696 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
697 DPRINTF(ADWLK
, ("slot %d, eof = %d, flags = %d, len = %d, "
698 "lb_num = %d, part = %d\n", slot
, eof
,
699 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)),
700 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
701 udf_rw32(s_ad
.loc
.lb_num
),
702 udf_rw16(s_ad
.loc
.part_num
)));
705 ("Translate file extent "
706 "failed: can't seek location\n"));
707 UDF_UNLOCK_NODE(udf_node
, 0);
710 len
= udf_rw32(s_ad
.len
);
711 flags
= UDF_EXT_FLAGS(len
);
712 len
= UDF_EXT_LEN(len
);
713 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
715 if (flags
== UDF_EXT_REDIRECT
) {
720 end_foffset
= foffset
+ len
;
722 if (end_foffset
> from
* lb_size
)
724 foffset
= end_foffset
;
727 /* found overlapping slot */
728 ext_offset
= from
* lb_size
- foffset
;
731 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
732 DPRINTF(ADWLK
, ("slot %d, eof = %d, flags = %d, len = %d, "
733 "lb_num = %d, part = %d\n", slot
, eof
,
734 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)),
735 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
736 udf_rw32(s_ad
.loc
.lb_num
),
737 udf_rw16(s_ad
.loc
.part_num
)));
740 ("Translate file extent "
741 "failed: past eof\n"));
742 UDF_UNLOCK_NODE(udf_node
, 0);
746 len
= udf_rw32(s_ad
.len
);
747 flags
= UDF_EXT_FLAGS(len
);
748 len
= UDF_EXT_LEN(len
);
750 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
751 vpart_num
= udf_rw16(s_ad
.loc
.part_num
);
753 end_foffset
= foffset
+ len
;
755 /* process extent, don't forget to advance on ext_offset! */
756 lb_num
+= (ext_offset
+ lb_size
-1) / lb_size
;
757 overlap
= (len
- ext_offset
+ lb_size
-1) / lb_size
;
761 * note that the while(){} is nessisary for the extent that
762 * the udf_translate_vtop() returns doens't have to span the
766 overlap
= MIN(overlap
, num_lb
);
767 while (overlap
&& (flags
!= UDF_EXT_REDIRECT
)) {
770 case UDF_EXT_ALLOCATED_BUT_NOT_USED
:
771 transsec
= UDF_TRANS_ZERO
;
773 while (overlap
&& num_lb
&& translen
) {
776 overlap
--; num_lb
--; translen
--;
779 case UDF_EXT_ALLOCATED
:
780 t_ad
.loc
.lb_num
= udf_rw32(lb_num
);
781 t_ad
.loc
.part_num
= udf_rw16(vpart_num
);
782 error
= udf_translate_vtop(ump
,
783 &t_ad
, &transsec32
, &translen
);
784 transsec
= transsec32
;
786 UDF_UNLOCK_NODE(udf_node
, 0);
789 while (overlap
&& num_lb
&& translen
) {
791 lb_num
++; transsec
++;
792 overlap
--; num_lb
--; translen
--;
797 ("Translate file extent "
798 "failed: bad flags %x\n", flags
));
799 UDF_UNLOCK_NODE(udf_node
, 0);
806 if (flags
!= UDF_EXT_REDIRECT
)
807 foffset
= end_foffset
;
810 UDF_UNLOCK_NODE(udf_node
, 0);
815 /* --------------------------------------------------------------------- */
818 udf_search_free_vatloc(struct udf_mount
*ump
, uint32_t *lbnumres
)
820 uint32_t lb_size
, lb_num
, lb_map
, udf_rw32_lbmap
;
822 int entry
, chunk
, found
, error
;
825 KASSERT(ump
->logical_vol
);
827 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
828 blob
= malloc(lb_size
, M_UDFTEMP
, M_WAITOK
);
830 /* TODO static allocation of search chunk */
832 lb_num
= MIN(ump
->vat_entries
, ump
->vat_last_free_lb
);
837 chunk
= MIN(lb_size
, (ump
->vat_entries
- lb_num
) * 4);
841 error
= udf_vat_read(ump
->vat_node
, blob
, chunk
,
842 ump
->vat_offset
+ lb_num
* 4);
847 /* search this chunk */
848 for (entry
=0; entry
< chunk
/4; entry
++, lb_num
++) {
849 udf_rw32_lbmap
= *((uint32_t *) (blob
+ entry
* 4));
850 lb_map
= udf_rw32(udf_rw32_lbmap
);
851 if (lb_map
== 0xffffffff) {
858 printf("udf_search_free_vatloc: error reading in vat chunk "
859 "(lb %d, size %d)\n", lb_num
, chunk
);
864 DPRINTF(WRITE
, ("udf_search_free_vatloc: extending\n"));
865 lb_num
= ump
->vat_entries
;
869 /* mark entry with initialiser just in case */
870 lb_map
= udf_rw32(0xfffffffe);
871 udf_vat_write(ump
->vat_node
, (uint8_t *) &lb_map
, 4,
872 ump
->vat_offset
+ lb_num
*4);
873 ump
->vat_last_free_lb
= lb_num
;
875 free(blob
, M_UDFTEMP
);
882 udf_bitmap_allocate(struct udf_bitmap
*bitmap
, int ismetadata
,
883 uint32_t *num_lb
, uint64_t *lmappos
)
885 uint32_t offset
, lb_num
, bit
;
891 /* heuristic to keep the two pointers not too close */
892 diff
= bitmap
->data_pos
- bitmap
->metadata_pos
;
893 if ((diff
>= 0) && (diff
< 1024))
894 bitmap
->data_pos
= bitmap
->metadata_pos
+ 1024;
896 offset
= ismetadata
? bitmap
->metadata_pos
: bitmap
->data_pos
;
898 for (pass
= 0; pass
< 2; pass
++) {
899 if (offset
>= bitmap
->max_offset
)
902 while (offset
< bitmap
->max_offset
) {
906 /* use first bit not set */
907 bpos
= bitmap
->bits
+ offset
/8;
908 bit
= ffs(*bpos
); /* returns 0 or 1..8 */
914 /* check for ffs overshoot */
915 if (offset
+ bit
-1 >= bitmap
->max_offset
) {
916 offset
= bitmap
->max_offset
;
920 DPRINTF(PARANOIA
, ("XXX : allocate %d, %p, bit %d\n",
921 offset
+ bit
-1, bpos
, bit
-1));
922 *bpos
&= ~(1 << (bit
-1));
923 lb_num
= offset
+ bit
-1;
925 *num_lb
= *num_lb
- 1;
926 // offset = (offset & ~7);
931 bitmap
->metadata_pos
= offset
;
933 bitmap
->data_pos
= offset
;
939 udf_bitmap_free(struct udf_bitmap
*bitmap
, uint32_t lb_num
, uint32_t num_lb
)
942 uint32_t bit
, bitval
;
948 bpos
= bitmap
->bits
+ offset
/8;
950 while ((bit
!= 0) && (num_lb
> 0)) {
952 KASSERT((*bpos
& bitval
) == 0);
953 DPRINTF(PARANOIA
, ("XXX : free %d, %p, %d\n",
964 bpos
= bitmap
->bits
+ offset
/ 8;
965 while (num_lb
>= 8) {
966 KASSERT((*bpos
== 0));
967 DPRINTF(PARANOIA
, ("XXX : free %d + 8, %p\n", offset
, bpos
));
969 offset
+= 8; num_lb
-= 8;
978 KASSERT((*bpos
& bitval
) == 0);
979 DPRINTF(PARANOIA
, ("XXX : free %d, %p, %d\n",
987 /* --------------------------------------------------------------------- */
990 * We check for overall disc space with a margin to prevent critical
991 * conditions. If disc space is low we try to force a sync() to improve our
992 * estimates. When confronted with meta-data partition size shortage we know
993 * we have to check if it can be extended and we need to extend it when
996 * A 2nd strategy we could use when disc space is getting low on a disc
997 * formatted with a meta-data partition is to see if there are sparse areas in
998 * the meta-data partition and free blocks there for extra data.
1002 udf_do_reserve_space(struct udf_mount
*ump
, struct udf_node
*udf_node
,
1003 uint16_t vpart_num
, uint32_t num_lb
)
1005 ump
->uncommitted_lbs
[vpart_num
] += num_lb
;
1007 udf_node
->uncommitted_lbs
+= num_lb
;
1012 udf_do_unreserve_space(struct udf_mount
*ump
, struct udf_node
*udf_node
,
1013 uint16_t vpart_num
, uint32_t num_lb
)
1015 ump
->uncommitted_lbs
[vpart_num
] -= num_lb
;
1016 if (ump
->uncommitted_lbs
[vpart_num
] < 0) {
1017 DPRINTF(RESERVE
, ("UDF: underflow on partition reservation, "
1018 "part %d: %d\n", vpart_num
,
1019 ump
->uncommitted_lbs
[vpart_num
]));
1020 ump
->uncommitted_lbs
[vpart_num
] = 0;
1023 udf_node
->uncommitted_lbs
-= num_lb
;
1024 if (udf_node
->uncommitted_lbs
< 0) {
1025 DPRINTF(RESERVE
, ("UDF: underflow of node "
1026 "reservation : %d\n",
1027 udf_node
->uncommitted_lbs
));
1028 udf_node
->uncommitted_lbs
= 0;
1035 udf_reserve_space(struct udf_mount
*ump
, struct udf_node
*udf_node
,
1036 int udf_c_type
, uint16_t vpart_num
, uint32_t num_lb
, int can_fail
)
1044 slack
= UDF_DISC_SLACK
;
1047 mutex_enter(&ump
->allocate_mutex
);
1049 /* check if there is enough space available */
1050 for (i
= 0; i
< 16; i
++) { /* XXX arbitrary number */
1051 udf_calc_vpart_freespace(ump
, vpart_num
, &freeblks
);
1052 if (num_lb
+ slack
< freeblks
)
1055 DPRINTF(RESERVE
, ("udf_reserve_space: issuing sync\n"));
1056 mutex_exit(&ump
->allocate_mutex
);
1057 udf_do_sync(ump
, FSCRED
, 0);
1058 mutex_enter(&mntvnode_lock
);
1059 /* 1/4 second wait */
1060 cv_timedwait(&ump
->dirtynodes_cv
, &mntvnode_lock
,
1062 mutex_exit(&mntvnode_lock
);
1063 mutex_enter(&ump
->allocate_mutex
);
1066 /* check if there is enough space available now */
1067 udf_calc_vpart_freespace(ump
, vpart_num
, &freeblks
);
1068 if (num_lb
+ slack
>= freeblks
) {
1069 DPRINTF(RESERVE
, ("udf_reserve_space: try to juggle partitions\n"));
1070 /* TODO juggle with data and metadata partitions if possible */
1073 /* check if there is enough space available now */
1074 udf_calc_vpart_freespace(ump
, vpart_num
, &freeblks
);
1075 if (num_lb
+ slack
<= freeblks
) {
1076 udf_do_reserve_space(ump
, udf_node
, vpart_num
, num_lb
);
1078 DPRINTF(RESERVE
, ("udf_reserve_space: out of disc space\n"));
1082 mutex_exit(&ump
->allocate_mutex
);
1088 udf_cleanup_reservation(struct udf_node
*udf_node
)
1090 struct udf_mount
*ump
= udf_node
->ump
;
1093 mutex_enter(&ump
->allocate_mutex
);
1095 /* compensate for overlapping blocks */
1096 DPRINTF(RESERVE
, ("UDF: overlapped %d blocks in count\n", udf_node
->uncommitted_lbs
));
1098 vpart_num
= udf_get_record_vpart(ump
, udf_get_c_type(udf_node
));
1099 udf_do_unreserve_space(ump
, udf_node
, vpart_num
, udf_node
->uncommitted_lbs
);
1101 DPRINTF(RESERVE
, ("\ttotal now %d\n", ump
->uncommitted_lbs
[vpart_num
]));
1104 if (ump
->uncommitted_lbs
[vpart_num
] < 0)
1105 ump
->uncommitted_lbs
[vpart_num
] = 0;
1107 mutex_exit(&ump
->allocate_mutex
);
1110 /* --------------------------------------------------------------------- */
1113 * Allocate an extent of given length on given virt. partition. It doesn't
1114 * have to be one stretch.
1118 udf_allocate_space(struct udf_mount
*ump
, struct udf_node
*udf_node
,
1119 int udf_c_type
, uint16_t vpart_num
, uint32_t num_lb
, uint64_t *lmapping
)
1121 struct mmc_trackinfo
*alloc_track
, *other_track
;
1122 struct udf_bitmap
*bitmap
;
1123 struct part_desc
*pdesc
;
1124 struct logvol_int_desc
*lvid
;
1126 uint32_t ptov
, lb_num
, *freepos
, free_lbs
;
1127 int lb_size
, alloc_num_lb
;
1128 int alloc_type
, error
;
1131 DPRINTF(CALL
, ("udf_allocate_space(ctype %d, vpart %d, num_lb %d\n",
1132 udf_c_type
, vpart_num
, num_lb
));
1133 mutex_enter(&ump
->allocate_mutex
);
1135 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
1136 KASSERT(lb_size
== ump
->discinfo
.sector_size
);
1138 alloc_type
= ump
->vtop_alloc
[vpart_num
];
1139 is_node
= (udf_c_type
== UDF_C_NODE
);
1143 switch (alloc_type
) {
1144 case UDF_ALLOC_VAT
:
1145 /* search empty slot in VAT file */
1146 KASSERT(num_lb
== 1);
1147 error
= udf_search_free_vatloc(ump
, &lb_num
);
1151 /* reserve on the backing sequential partition since
1152 * that partition is credited back later */
1153 udf_do_reserve_space(ump
, udf_node
,
1154 ump
->vtop
[vpart_num
], num_lb
);
1157 case UDF_ALLOC_SEQUENTIAL
:
1158 /* sequential allocation on recordable media */
1159 /* get partition backing up this vpart_num_num */
1160 pdesc
= ump
->partitions
[ump
->vtop
[vpart_num
]];
1162 /* calculate offset from physical base partition */
1163 ptov
= udf_rw32(pdesc
->start_loc
);
1165 /* get our track descriptors */
1166 if (vpart_num
== ump
->node_part
) {
1167 alloc_track
= &ump
->metadata_track
;
1168 other_track
= &ump
->data_track
;
1170 alloc_track
= &ump
->data_track
;
1171 other_track
= &ump
->metadata_track
;
1175 for (lb_num
= 0; lb_num
< num_lb
; lb_num
++) {
1176 *lmappos
++ = alloc_track
->next_writable
- ptov
;
1177 alloc_track
->next_writable
++;
1178 alloc_track
->free_blocks
--;
1181 /* keep other track up-to-date */
1182 if (alloc_track
->tracknr
== other_track
->tracknr
)
1183 memcpy(other_track
, alloc_track
,
1184 sizeof(struct mmc_trackinfo
));
1186 case UDF_ALLOC_SPACEMAP
:
1187 /* try to allocate on unallocated bits */
1188 alloc_num_lb
= num_lb
;
1189 bitmap
= &ump
->part_unalloc_bits
[vpart_num
];
1190 udf_bitmap_allocate(bitmap
, is_node
, &alloc_num_lb
, lmappos
);
1191 ump
->lvclose
|= UDF_WRITE_PART_BITMAPS
;
1193 /* have we allocated all? */
1195 /* TODO convert freed to unalloc and try again */
1196 /* free allocated piece for now */
1198 for (lb_num
=0; lb_num
< num_lb
-alloc_num_lb
; lb_num
++) {
1199 udf_bitmap_free(bitmap
, *lmappos
++, 1);
1204 /* adjust freecount */
1205 lvid
= ump
->logvol_integrity
;
1206 freepos
= &lvid
->tables
[0] + vpart_num
;
1207 free_lbs
= udf_rw32(*freepos
);
1208 *freepos
= udf_rw32(free_lbs
- num_lb
);
1211 case UDF_ALLOC_METABITMAP
: /* UDF 2.50, 2.60 BluRay-RE */
1212 /* allocate on metadata unallocated bits */
1213 alloc_num_lb
= num_lb
;
1214 bitmap
= &ump
->metadata_unalloc_bits
;
1215 udf_bitmap_allocate(bitmap
, is_node
, &alloc_num_lb
, lmappos
);
1216 ump
->lvclose
|= UDF_WRITE_PART_BITMAPS
;
1218 /* have we allocated all? */
1220 /* YIKES! TODO we need to extend the metadata partition */
1221 /* free allocated piece for now */
1223 for (lb_num
=0; lb_num
< num_lb
-alloc_num_lb
; lb_num
++) {
1224 udf_bitmap_free(bitmap
, *lmappos
++, 1);
1229 /* adjust freecount */
1230 lvid
= ump
->logvol_integrity
;
1231 freepos
= &lvid
->tables
[0] + vpart_num
;
1232 free_lbs
= udf_rw32(*freepos
);
1233 *freepos
= udf_rw32(free_lbs
- num_lb
);
1236 case UDF_ALLOC_METASEQUENTIAL
: /* UDF 2.60 BluRay-R */
1237 case UDF_ALLOC_RELAXEDSEQUENTIAL
: /* UDF 2.50/~meta BluRay-R */
1238 printf("ALERT: udf_allocate_space : allocation %d "
1239 "not implemented yet!\n", alloc_type
);
1240 /* TODO implement, doesn't have to be contiguous */
1246 /* credit our partition since we have committed the space */
1247 udf_do_unreserve_space(ump
, udf_node
, vpart_num
, num_lb
);
1251 if (udf_verbose
& UDF_DEBUG_ALLOC
) {
1253 printf("udf_allocate_space, allocated logical lba :\n");
1254 for (lb_num
= 0; lb_num
< num_lb
; lb_num
++) {
1255 printf("%s %"PRIu64
, (lb_num
> 0)?",":"",
1261 mutex_exit(&ump
->allocate_mutex
);
1266 /* --------------------------------------------------------------------- */
1269 udf_free_allocated_space(struct udf_mount
*ump
, uint32_t lb_num
,
1270 uint16_t vpart_num
, uint32_t num_lb
)
1272 struct udf_bitmap
*bitmap
;
1273 struct part_desc
*pdesc
;
1274 struct logvol_int_desc
*lvid
;
1275 uint32_t ptov
, lb_map
, udf_rw32_lbmap
;
1276 uint32_t *freepos
, free_lbs
;
1280 DPRINTF(ALLOC
, ("udf_free_allocated_space: freeing virt lbnum %d "
1281 "part %d + %d sect\n", lb_num
, vpart_num
, num_lb
));
1283 /* no use freeing zero length */
1287 mutex_enter(&ump
->allocate_mutex
);
1289 /* get partition backing up this vpart_num */
1290 pdesc
= ump
->partitions
[ump
->vtop
[vpart_num
]];
1292 switch (ump
->vtop_tp
[vpart_num
]) {
1293 case UDF_VTOP_TYPE_PHYS
:
1294 case UDF_VTOP_TYPE_SPARABLE
:
1295 /* free space to freed or unallocated space bitmap */
1296 ptov
= udf_rw32(pdesc
->start_loc
);
1297 phys_part
= ump
->vtop
[vpart_num
];
1299 /* first try freed space bitmap */
1300 bitmap
= &ump
->part_freed_bits
[phys_part
];
1302 /* if not defined, use unallocated bitmap */
1303 if (bitmap
->bits
== NULL
)
1304 bitmap
= &ump
->part_unalloc_bits
[phys_part
];
1306 /* if no bitmaps are defined, bail out; XXX OK? */
1307 if (bitmap
->bits
== NULL
)
1310 /* free bits if its defined */
1311 KASSERT(bitmap
->bits
);
1312 ump
->lvclose
|= UDF_WRITE_PART_BITMAPS
;
1313 udf_bitmap_free(bitmap
, lb_num
, num_lb
);
1315 /* adjust freecount */
1316 lvid
= ump
->logvol_integrity
;
1317 freepos
= &lvid
->tables
[0] + vpart_num
;
1318 free_lbs
= udf_rw32(*freepos
);
1319 *freepos
= udf_rw32(free_lbs
+ num_lb
);
1321 case UDF_VTOP_TYPE_VIRT
:
1322 /* free this VAT entry */
1323 KASSERT(num_lb
== 1);
1325 lb_map
= 0xffffffff;
1326 udf_rw32_lbmap
= udf_rw32(lb_map
);
1327 error
= udf_vat_write(ump
->vat_node
,
1328 (uint8_t *) &udf_rw32_lbmap
, 4,
1329 ump
->vat_offset
+ lb_num
* 4);
1330 KASSERT(error
== 0);
1331 ump
->vat_last_free_lb
= MIN(ump
->vat_last_free_lb
, lb_num
);
1333 case UDF_VTOP_TYPE_META
:
1334 /* free space in the metadata bitmap */
1335 bitmap
= &ump
->metadata_unalloc_bits
;
1336 KASSERT(bitmap
->bits
);
1338 ump
->lvclose
|= UDF_WRITE_PART_BITMAPS
;
1339 udf_bitmap_free(bitmap
, lb_num
, num_lb
);
1341 /* adjust freecount */
1342 lvid
= ump
->logvol_integrity
;
1343 freepos
= &lvid
->tables
[0] + vpart_num
;
1344 free_lbs
= udf_rw32(*freepos
);
1345 *freepos
= udf_rw32(free_lbs
+ num_lb
);
1348 printf("ALERT: udf_free_allocated_space : allocation %d "
1349 "not implemented yet!\n", ump
->vtop_tp
[vpart_num
]);
1353 mutex_exit(&ump
->allocate_mutex
);
1356 /* --------------------------------------------------------------------- */
1359 * Allocate a buf on disc for direct write out. The space doesn't have to be
1360 * contiguous as the caller takes care of this.
1364 udf_late_allocate_buf(struct udf_mount
*ump
, struct buf
*buf
,
1365 uint64_t *lmapping
, struct long_ad
*node_ad_cpy
, uint16_t *vpart_nump
)
1367 struct udf_node
*udf_node
= VTOI(buf
->b_vp
);
1368 int lb_size
, blks
, udf_c_type
;
1369 int vpart_num
, num_lb
;
1373 * for each sector in the buf, allocate a sector on disc and record
1374 * its position in the provided mapping array.
1376 * If its userdata or FIDs, record its location in its node.
1379 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
1380 num_lb
= (buf
->b_bcount
+ lb_size
-1) / lb_size
;
1381 blks
= lb_size
/ DEV_BSIZE
;
1382 udf_c_type
= buf
->b_udf_c_type
;
1384 KASSERT(lb_size
== ump
->discinfo
.sector_size
);
1386 /* select partition to record the buffer on */
1387 vpart_num
= *vpart_nump
= udf_get_record_vpart(ump
, udf_c_type
);
1389 if (udf_c_type
== UDF_C_NODE
) {
1390 /* if not VAT, its allready allocated */
1391 if (ump
->vtop_alloc
[ump
->node_part
] != UDF_ALLOC_VAT
)
1394 /* allocate on its backing sequential partition */
1395 vpart_num
= ump
->data_part
;
1398 /* XXX can this still happen? */
1399 /* do allocation on the selected partition */
1400 error
= udf_allocate_space(ump
, udf_node
, udf_c_type
,
1401 vpart_num
, num_lb
, lmapping
);
1404 * ARGH! we haven't done our accounting right! it should
1407 panic("UDF disc allocation accounting gone wrong");
1410 /* If its userdata or FIDs, record its allocation in its node. */
1411 if ((udf_c_type
== UDF_C_USERDATA
) ||
1412 (udf_c_type
== UDF_C_FIDS
) ||
1413 (udf_c_type
== UDF_C_METADATA_SBM
))
1415 udf_record_allocation_in_node(ump
, buf
, vpart_num
, lmapping
,
1417 /* decrement our outstanding bufs counter */
1419 udf_node
->outstanding_bufs
--;
1424 /* --------------------------------------------------------------------- */
1427 * Try to merge a1 with the new piece a2. udf_ads_merge returns error when not
1428 * possible (anymore); a2 returns the rest piece.
1432 udf_ads_merge(uint32_t lb_size
, struct long_ad
*a1
, struct long_ad
*a2
)
1434 uint32_t max_len
, merge_len
;
1435 uint32_t a1_len
, a2_len
;
1436 uint32_t a1_flags
, a2_flags
;
1437 uint32_t a1_lbnum
, a2_lbnum
;
1438 uint16_t a1_part
, a2_part
;
1440 max_len
= ((UDF_EXT_MAXLEN
/ lb_size
) * lb_size
);
1442 a1_flags
= UDF_EXT_FLAGS(udf_rw32(a1
->len
));
1443 a1_len
= UDF_EXT_LEN(udf_rw32(a1
->len
));
1444 a1_lbnum
= udf_rw32(a1
->loc
.lb_num
);
1445 a1_part
= udf_rw16(a1
->loc
.part_num
);
1447 a2_flags
= UDF_EXT_FLAGS(udf_rw32(a2
->len
));
1448 a2_len
= UDF_EXT_LEN(udf_rw32(a2
->len
));
1449 a2_lbnum
= udf_rw32(a2
->loc
.lb_num
);
1450 a2_part
= udf_rw16(a2
->loc
.part_num
);
1452 /* defines same space */
1453 if (a1_flags
!= a2_flags
)
1456 if (a1_flags
!= UDF_EXT_FREE
) {
1457 /* the same partition */
1458 if (a1_part
!= a2_part
)
1461 /* a2 is successor of a1 */
1462 if (a1_lbnum
* lb_size
+ a1_len
!= a2_lbnum
* lb_size
)
1466 /* merge as most from a2 if possible */
1467 merge_len
= MIN(a2_len
, max_len
- a1_len
);
1468 a1_len
+= merge_len
;
1469 a2_len
-= merge_len
;
1470 a2_lbnum
+= merge_len
/lb_size
;
1472 a1
->len
= udf_rw32(a1_len
| a1_flags
);
1473 a2
->len
= udf_rw32(a2_len
| a2_flags
);
1474 a2
->loc
.lb_num
= udf_rw32(a2_lbnum
);
1479 /* there is space over to merge */
1483 /* --------------------------------------------------------------------- */
1486 udf_wipe_adslots(struct udf_node
*udf_node
)
1488 struct file_entry
*fe
;
1489 struct extfile_entry
*efe
;
1490 struct alloc_ext_entry
*ext
;
1491 uint64_t inflen
, objsize
;
1492 uint32_t lb_size
, dscr_size
, l_ea
, l_ad
, max_l_ad
, crclen
;
1496 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
1499 efe
= udf_node
->efe
;
1501 inflen
= udf_rw64(fe
->inf_len
);
1503 dscr_size
= sizeof(struct file_entry
) -1;
1504 l_ea
= udf_rw32(fe
->l_ea
);
1505 l_ad
= udf_rw32(fe
->l_ad
);
1506 data_pos
= (uint8_t *) fe
+ dscr_size
+ l_ea
;
1508 inflen
= udf_rw64(efe
->inf_len
);
1509 objsize
= udf_rw64(efe
->obj_size
);
1510 dscr_size
= sizeof(struct extfile_entry
) -1;
1511 l_ea
= udf_rw32(efe
->l_ea
);
1512 l_ad
= udf_rw32(efe
->l_ad
);
1513 data_pos
= (uint8_t *) efe
+ dscr_size
+ l_ea
;
1515 max_l_ad
= lb_size
- dscr_size
- l_ea
;
1518 memset(data_pos
, 0, max_l_ad
);
1519 crclen
= dscr_size
- UDF_DESC_TAG_LENGTH
+ l_ea
;
1521 fe
->l_ad
= udf_rw32(0);
1522 fe
->logblks_rec
= udf_rw64(0);
1523 fe
->tag
.desc_crc_len
= udf_rw16(crclen
);
1525 efe
->l_ad
= udf_rw32(0);
1526 efe
->logblks_rec
= udf_rw64(0);
1527 efe
->tag
.desc_crc_len
= udf_rw16(crclen
);
1530 /* wipe all allocation extent entries */
1531 for (extnr
= 0; extnr
< udf_node
->num_extensions
; extnr
++) {
1532 ext
= udf_node
->ext
[extnr
];
1533 dscr_size
= sizeof(struct alloc_ext_entry
) -1;
1534 data_pos
= (uint8_t *) ext
->data
;
1535 max_l_ad
= lb_size
- dscr_size
;
1536 memset(data_pos
, 0, max_l_ad
);
1537 ext
->l_ad
= udf_rw32(0);
1539 crclen
= dscr_size
- UDF_DESC_TAG_LENGTH
;
1540 ext
->tag
.desc_crc_len
= udf_rw16(crclen
);
1542 udf_node
->i_flags
|= IN_NODE_REBUILD
;
1545 /* --------------------------------------------------------------------- */
1548 udf_get_adslot(struct udf_node
*udf_node
, int slot
, struct long_ad
*icb
,
1550 struct file_entry
*fe
;
1551 struct extfile_entry
*efe
;
1552 struct alloc_ext_entry
*ext
;
1553 struct icb_tag
*icbtag
;
1554 struct short_ad
*short_ad
;
1555 struct long_ad
*long_ad
, l_icb
;
1557 uint32_t lb_size
, dscr_size
, l_ea
, l_ad
, flags
;
1559 int icbflags
, addr_type
, adlen
, extnr
;
1561 /* determine what descriptor we are in */
1562 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
1565 efe
= udf_node
->efe
;
1567 icbtag
= &fe
->icbtag
;
1568 dscr_size
= sizeof(struct file_entry
) -1;
1569 l_ea
= udf_rw32(fe
->l_ea
);
1570 l_ad
= udf_rw32(fe
->l_ad
);
1571 data_pos
= (uint8_t *) fe
+ dscr_size
+ l_ea
;
1573 icbtag
= &efe
->icbtag
;
1574 dscr_size
= sizeof(struct extfile_entry
) -1;
1575 l_ea
= udf_rw32(efe
->l_ea
);
1576 l_ad
= udf_rw32(efe
->l_ad
);
1577 data_pos
= (uint8_t *) efe
+ dscr_size
+ l_ea
;
1580 icbflags
= udf_rw16(icbtag
->flags
);
1581 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
1583 /* just in case we're called on an intern, its EOF */
1584 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
1585 memset(icb
, 0, sizeof(struct long_ad
));
1591 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1592 adlen
= sizeof(struct short_ad
);
1593 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1594 adlen
= sizeof(struct long_ad
);
1597 /* if offset too big, we go to the allocation extensions */
1598 offset
= slot
* adlen
;
1600 while (offset
>= l_ad
) {
1601 /* check if our last entry is a redirect */
1602 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1603 short_ad
= (struct short_ad
*) (data_pos
+ l_ad
-adlen
);
1604 l_icb
.len
= short_ad
->len
;
1605 l_icb
.loc
.part_num
= udf_node
->loc
.loc
.part_num
;
1606 l_icb
.loc
.lb_num
= short_ad
->lb_num
;
1608 KASSERT(addr_type
== UDF_ICB_LONG_ALLOC
);
1609 long_ad
= (struct long_ad
*) (data_pos
+ l_ad
-adlen
);
1612 flags
= UDF_EXT_FLAGS(udf_rw32(l_icb
.len
));
1613 if (flags
!= UDF_EXT_REDIRECT
) {
1614 l_ad
= 0; /* force EOF */
1618 /* advance to next extent */
1620 if (extnr
>= udf_node
->num_extensions
) {
1621 l_ad
= 0; /* force EOF */
1624 offset
= offset
- l_ad
;
1625 ext
= udf_node
->ext
[extnr
];
1626 dscr_size
= sizeof(struct alloc_ext_entry
) -1;
1627 l_ad
= udf_rw32(ext
->l_ad
);
1628 data_pos
= (uint8_t *) ext
+ dscr_size
;
1631 /* XXX l_ad == 0 should be enough to check */
1632 *eof
= (offset
>= l_ad
) || (l_ad
== 0);
1634 DPRINTF(PARANOIDADWLK
, ("returning EOF, extnr %d, offset %d, "
1635 "l_ad %d\n", extnr
, offset
, l_ad
));
1636 memset(icb
, 0, sizeof(struct long_ad
));
1640 /* get the element */
1641 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1642 short_ad
= (struct short_ad
*) (data_pos
+ offset
);
1643 icb
->len
= short_ad
->len
;
1644 icb
->loc
.part_num
= udf_node
->loc
.loc
.part_num
;
1645 icb
->loc
.lb_num
= short_ad
->lb_num
;
1646 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1647 long_ad
= (struct long_ad
*) (data_pos
+ offset
);
1650 DPRINTF(PARANOIDADWLK
, ("returning element : v %d, lb %d, len %d, "
1651 "flags %d\n", icb
->loc
.part_num
, icb
->loc
.lb_num
,
1652 UDF_EXT_LEN(icb
->len
), UDF_EXT_FLAGS(icb
->len
)));
1655 /* --------------------------------------------------------------------- */
1658 udf_append_adslot(struct udf_node
*udf_node
, int *slot
, struct long_ad
*icb
) {
1659 struct udf_mount
*ump
= udf_node
->ump
;
1660 union dscrptr
*dscr
, *extdscr
;
1661 struct file_entry
*fe
;
1662 struct extfile_entry
*efe
;
1663 struct alloc_ext_entry
*ext
;
1664 struct icb_tag
*icbtag
;
1665 struct short_ad
*short_ad
;
1666 struct long_ad
*long_ad
, o_icb
, l_icb
;
1667 uint64_t logblks_rec
, *logblks_rec_p
;
1669 uint32_t offset
, rest
, len
, lb_num
;
1670 uint32_t lb_size
, dscr_size
, l_ea
, l_ad
, *l_ad_p
, max_l_ad
, crclen
;
1674 int icbflags
, addr_type
, adlen
, extnr
;
1677 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
1678 vpart_num
= udf_rw16(udf_node
->loc
.loc
.part_num
);
1680 /* determine what descriptor we are in */
1682 efe
= udf_node
->efe
;
1684 icbtag
= &fe
->icbtag
;
1685 dscr
= (union dscrptr
*) fe
;
1686 dscr_size
= sizeof(struct file_entry
) -1;
1688 l_ea
= udf_rw32(fe
->l_ea
);
1690 logblks_rec_p
= &fe
->logblks_rec
;
1692 icbtag
= &efe
->icbtag
;
1693 dscr
= (union dscrptr
*) efe
;
1694 dscr_size
= sizeof(struct extfile_entry
) -1;
1696 l_ea
= udf_rw32(efe
->l_ea
);
1697 l_ad_p
= &efe
->l_ad
;
1698 logblks_rec_p
= &efe
->logblks_rec
;
1700 data_pos
= (uint8_t *) dscr
+ dscr_size
+ l_ea
;
1701 max_l_ad
= lb_size
- dscr_size
- l_ea
;
1703 icbflags
= udf_rw16(icbtag
->flags
);
1704 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
1706 /* just in case we're called on an intern, its EOF */
1707 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
1708 panic("udf_append_adslot on UDF_ICB_INTERN_ALLOC\n");
1712 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1713 adlen
= sizeof(struct short_ad
);
1714 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1715 adlen
= sizeof(struct long_ad
);
1718 /* clean up given long_ad since it can be a synthesized one */
1719 flags
= UDF_EXT_FLAGS(udf_rw32(icb
->len
));
1720 if (flags
== UDF_EXT_FREE
) {
1721 icb
->loc
.part_num
= udf_rw16(0);
1722 icb
->loc
.lb_num
= udf_rw32(0);
1725 /* if offset too big, we go to the allocation extensions */
1726 l_ad
= udf_rw32(*l_ad_p
);
1727 offset
= (*slot
) * adlen
;
1729 while (offset
>= l_ad
) {
1730 /* check if our last entry is a redirect */
1731 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1732 short_ad
= (struct short_ad
*) (data_pos
+ l_ad
-adlen
);
1733 l_icb
.len
= short_ad
->len
;
1734 l_icb
.loc
.part_num
= udf_node
->loc
.loc
.part_num
;
1735 l_icb
.loc
.lb_num
= short_ad
->lb_num
;
1737 KASSERT(addr_type
== UDF_ICB_LONG_ALLOC
);
1738 long_ad
= (struct long_ad
*) (data_pos
+ l_ad
-adlen
);
1741 flags
= UDF_EXT_FLAGS(udf_rw32(l_icb
.len
));
1742 if (flags
!= UDF_EXT_REDIRECT
) {
1743 /* only one past the last one is adressable */
1747 /* advance to next extent */
1749 KASSERT(extnr
< udf_node
->num_extensions
);
1750 offset
= offset
- l_ad
;
1752 ext
= udf_node
->ext
[extnr
];
1753 dscr
= (union dscrptr
*) ext
;
1754 dscr_size
= sizeof(struct alloc_ext_entry
) -1;
1755 max_l_ad
= lb_size
- dscr_size
;
1756 l_ad_p
= &ext
->l_ad
;
1757 l_ad
= udf_rw32(*l_ad_p
);
1758 data_pos
= (uint8_t *) ext
+ dscr_size
;
1760 DPRINTF(PARANOIDADWLK
, ("append, ext %d, offset %d, l_ad %d\n",
1761 extnr
, offset
, udf_rw32(*l_ad_p
)));
1762 KASSERT(l_ad
== udf_rw32(*l_ad_p
));
1764 /* offset is offset within the current (E)FE/AED */
1765 l_ad
= udf_rw32(*l_ad_p
);
1766 crclen
= udf_rw16(dscr
->tag
.desc_crc_len
);
1767 logblks_rec
= udf_rw64(*logblks_rec_p
);
1769 /* overwriting old piece? */
1770 if (offset
< l_ad
) {
1771 /* overwrite entry; compensate for the old element */
1772 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1773 short_ad
= (struct short_ad
*) (data_pos
+ offset
);
1774 o_icb
.len
= short_ad
->len
;
1775 o_icb
.loc
.part_num
= udf_rw16(0); /* ignore */
1776 o_icb
.loc
.lb_num
= short_ad
->lb_num
;
1777 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1778 long_ad
= (struct long_ad
*) (data_pos
+ offset
);
1781 panic("Invalid address type in udf_append_adslot\n");
1784 len
= udf_rw32(o_icb
.len
);
1785 if (UDF_EXT_FLAGS(len
) == UDF_EXT_ALLOCATED
) {
1787 len
= UDF_EXT_LEN(len
);
1788 logblks_rec
-= (len
+ lb_size
-1) / lb_size
;
1792 /* check if we're not appending a redirection */
1793 flags
= UDF_EXT_FLAGS(udf_rw32(icb
->len
));
1794 KASSERT(flags
!= UDF_EXT_REDIRECT
);
1796 /* round down available space */
1797 rest
= adlen
* ((max_l_ad
- offset
) / adlen
);
1798 if (rest
<= adlen
) {
1799 /* have to append aed, see if we already have a spare one */
1801 ext
= udf_node
->ext
[extnr
];
1802 l_icb
= udf_node
->ext_loc
[extnr
];
1804 DPRINTF(ALLOC
,("adding allocation extent %d\n", extnr
));
1806 error
= udf_reserve_space(ump
, NULL
, UDF_C_NODE
,
1807 vpart_num
, 1, /* can fail */ false);
1809 printf("UDF: couldn't reserve space for AED!\n");
1812 error
= udf_allocate_space(ump
, NULL
, UDF_C_NODE
,
1813 vpart_num
, 1, &lmapping
);
1816 panic("UDF: couldn't allocate AED!\n");
1818 /* initialise pointer to location */
1819 memset(&l_icb
, 0, sizeof(struct long_ad
));
1820 l_icb
.len
= udf_rw32(lb_size
| UDF_EXT_REDIRECT
);
1821 l_icb
.loc
.lb_num
= udf_rw32(lb_num
);
1822 l_icb
.loc
.part_num
= udf_rw16(vpart_num
);
1824 /* create new aed descriptor */
1825 udf_create_logvol_dscr(ump
, udf_node
, &l_icb
, &extdscr
);
1826 ext
= &extdscr
->aee
;
1828 udf_inittag(ump
, &ext
->tag
, TAGID_ALLOCEXTENT
, lb_num
);
1829 dscr_size
= sizeof(struct alloc_ext_entry
) -1;
1830 max_l_ad
= lb_size
- dscr_size
;
1831 memset(ext
->data
, 0, max_l_ad
);
1832 ext
->l_ad
= udf_rw32(0);
1833 ext
->tag
.desc_crc_len
=
1834 udf_rw16(dscr_size
- UDF_DESC_TAG_LENGTH
);
1837 udf_node
->num_extensions
++;
1838 udf_node
->ext_loc
[extnr
] = l_icb
;
1839 udf_node
->ext
[extnr
] = ext
;
1841 /* add redirect and adjust l_ad and crclen for old descr */
1842 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1843 short_ad
= (struct short_ad
*) (data_pos
+ offset
);
1844 short_ad
->len
= l_icb
.len
;
1845 short_ad
->lb_num
= l_icb
.loc
.lb_num
;
1846 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1847 long_ad
= (struct long_ad
*) (data_pos
+ offset
);
1852 dscr
->tag
.desc_crc_len
= udf_rw16(crclen
);
1853 *l_ad_p
= udf_rw32(l_ad
);
1855 /* advance to the new extension */
1856 KASSERT(ext
!= NULL
);
1857 dscr
= (union dscrptr
*) ext
;
1858 dscr_size
= sizeof(struct alloc_ext_entry
) -1;
1859 max_l_ad
= lb_size
- dscr_size
;
1860 data_pos
= (uint8_t *) dscr
+ dscr_size
;
1862 l_ad_p
= &ext
->l_ad
;
1863 l_ad
= udf_rw32(*l_ad_p
);
1864 crclen
= udf_rw16(dscr
->tag
.desc_crc_len
);
1867 /* adjust callees slot count for link insert */
1871 /* write out the element */
1872 DPRINTF(PARANOIDADWLK
, ("adding element : %p : v %d, lb %d, "
1873 "len %d, flags %d\n", data_pos
+ offset
,
1874 icb
->loc
.part_num
, icb
->loc
.lb_num
,
1875 UDF_EXT_LEN(icb
->len
), UDF_EXT_FLAGS(icb
->len
)));
1876 if (addr_type
== UDF_ICB_SHORT_ALLOC
) {
1877 short_ad
= (struct short_ad
*) (data_pos
+ offset
);
1878 short_ad
->len
= icb
->len
;
1879 short_ad
->lb_num
= icb
->loc
.lb_num
;
1880 } else if (addr_type
== UDF_ICB_LONG_ALLOC
) {
1881 long_ad
= (struct long_ad
*) (data_pos
+ offset
);
1885 /* adjust logblks recorded count */
1886 len
= udf_rw32(icb
->len
);
1887 flags
= UDF_EXT_FLAGS(len
);
1888 if (flags
== UDF_EXT_ALLOCATED
)
1889 logblks_rec
+= (UDF_EXT_LEN(len
) + lb_size
-1) / lb_size
;
1890 *logblks_rec_p
= udf_rw64(logblks_rec
);
1892 /* adjust l_ad and crclen when needed */
1893 if (offset
>= l_ad
) {
1896 dscr
->tag
.desc_crc_len
= udf_rw16(crclen
);
1897 *l_ad_p
= udf_rw32(l_ad
);
1903 /* --------------------------------------------------------------------- */
1906 udf_count_alloc_exts(struct udf_node
*udf_node
)
1908 struct long_ad s_ad
;
1909 uint32_t lb_num
, len
, flags
;
1912 int num_extents
, extnr
;
1915 if (udf_node
->num_extensions
== 0)
1918 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
1919 /* count number of allocation extents in use */
1923 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
1926 len
= udf_rw32(s_ad
.len
);
1927 flags
= UDF_EXT_FLAGS(len
);
1929 if (flags
== UDF_EXT_REDIRECT
)
1935 DPRINTF(ALLOC
, ("udf_count_alloc_ext counted %d live extents\n",
1938 /* XXX choice: we could delay freeing them on node writeout */
1939 /* free excess entries */
1940 extnr
= num_extents
;
1941 for (;extnr
< udf_node
->num_extensions
; extnr
++) {
1942 DPRINTF(ALLOC
, ("freeing alloc ext %d\n", extnr
));
1943 /* free dscriptor */
1944 s_ad
= udf_node
->ext_loc
[extnr
];
1945 udf_free_logvol_dscr(udf_node
->ump
, &s_ad
,
1946 udf_node
->ext
[extnr
]);
1947 udf_node
->ext
[extnr
] = NULL
;
1949 /* free disc space */
1950 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
1951 vpart_num
= udf_rw16(s_ad
.loc
.part_num
);
1952 udf_free_allocated_space(udf_node
->ump
, lb_num
, vpart_num
, 1);
1954 memset(&udf_node
->ext_loc
[extnr
], 0, sizeof(struct long_ad
));
1957 /* set our new number of allocation extents */
1958 udf_node
->num_extensions
= num_extents
;
1962 /* --------------------------------------------------------------------- */
1965 * Adjust the node's allocation descriptors to reflect the new mapping; do
1966 * take note that we might glue to existing allocation descriptors.
1968 * XXX Note there can only be one allocation being recorded/mount; maybe
1969 * explicit allocation in shedule thread?
1973 udf_record_allocation_in_node(struct udf_mount
*ump
, struct buf
*buf
,
1974 uint16_t vpart_num
, uint64_t *mapping
, struct long_ad
*node_ad_cpy
)
1976 struct vnode
*vp
= buf
->b_vp
;
1977 struct udf_node
*udf_node
= VTOI(vp
);
1978 struct file_entry
*fe
;
1979 struct extfile_entry
*efe
;
1980 struct icb_tag
*icbtag
;
1981 struct long_ad s_ad
, c_ad
;
1982 uint64_t inflen
, from
, till
;
1983 uint64_t foffset
, end_foffset
, restart_foffset
;
1984 uint64_t orig_inflen
, orig_lbrec
, new_inflen
, new_lbrec
;
1985 uint32_t num_lb
, len
, flags
, lb_num
;
1987 uint32_t slot_offset
, replace_len
, replace
;
1988 int addr_type
, icbflags
;
1989 // int udf_c_type = buf->b_udf_c_type;
1990 int lb_size
, run_length
, eof
;
1991 int slot
, cpy_slot
, cpy_slots
, restart_slot
;
1994 DPRINTF(ALLOC
, ("udf_record_allocation_in_node\n"));
1997 /* XXX disable sanity check for now */
1998 /* sanity check ... should be panic ? */
1999 if ((udf_c_type
!= UDF_C_USERDATA
) && (udf_c_type
!= UDF_C_FIDS
))
2003 lb_size
= udf_rw32(udf_node
->ump
->logical_vol
->lb_size
);
2006 UDF_LOCK_NODE(udf_node
, 0); /* XXX can deadlock ? */
2007 udf_node_sanity_check(udf_node
, &orig_inflen
, &orig_lbrec
);
2010 efe
= udf_node
->efe
;
2012 icbtag
= &fe
->icbtag
;
2013 inflen
= udf_rw64(fe
->inf_len
);
2015 icbtag
= &efe
->icbtag
;
2016 inflen
= udf_rw64(efe
->inf_len
);
2019 /* do check if `till' is not past file information length */
2020 from
= buf
->b_lblkno
* lb_size
;
2021 till
= MIN(inflen
, from
+ buf
->b_resid
);
2023 num_lb
= (till
- from
+ lb_size
-1) / lb_size
;
2025 DPRINTF(ALLOC
, ("record allocation from %"PRIu64
" + %d\n", from
, buf
->b_bcount
));
2027 icbflags
= udf_rw16(icbtag
->flags
);
2028 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
2030 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
2032 /* XXX clean up rest of node? just in case? */
2033 UDF_UNLOCK_NODE(udf_node
, 0);
2041 /* 1) copy till first overlap piece to the rewrite buffer */
2043 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
2046 ("Record allocation in node "
2047 "failed: encountered EOF\n"));
2048 UDF_UNLOCK_NODE(udf_node
, 0);
2049 buf
->b_error
= EINVAL
;
2052 len
= udf_rw32(s_ad
.len
);
2053 flags
= UDF_EXT_FLAGS(len
);
2054 len
= UDF_EXT_LEN(len
);
2056 if (flags
== UDF_EXT_REDIRECT
) {
2061 end_foffset
= foffset
+ len
;
2062 if (end_foffset
> from
)
2065 node_ad_cpy
[cpy_slot
++] = s_ad
;
2067 DPRINTF(ALLOC
, ("\t1: vp %d, lb %d, len %d, flags %d "
2069 udf_rw16(s_ad
.loc
.part_num
),
2070 udf_rw32(s_ad
.loc
.lb_num
),
2071 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2072 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2074 foffset
= end_foffset
;
2077 restart_slot
= slot
;
2078 restart_foffset
= foffset
;
2080 /* 2) trunc overlapping slot at overlap and copy it */
2081 slot_offset
= from
- foffset
;
2082 if (slot_offset
> 0) {
2083 DPRINTF(ALLOC
, ("\tslot_offset = %d, flags = %d (%d)\n",
2084 slot_offset
, flags
>> 30, flags
));
2086 s_ad
.len
= udf_rw32(slot_offset
| flags
);
2087 node_ad_cpy
[cpy_slot
++] = s_ad
;
2089 DPRINTF(ALLOC
, ("\t2: vp %d, lb %d, len %d, flags %d "
2091 udf_rw16(s_ad
.loc
.part_num
),
2092 udf_rw32(s_ad
.loc
.lb_num
),
2093 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2094 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2096 foffset
+= slot_offset
;
2098 /* 3) insert new mappings */
2099 memset(&s_ad
, 0, sizeof(struct long_ad
));
2101 for (lb_num
= 0; lb_num
< num_lb
; lb_num
++) {
2102 run_start
= mapping
[lb_num
];
2104 while (lb_num
< num_lb
-1) {
2105 if (mapping
[lb_num
+1] != mapping
[lb_num
]+1)
2106 if (mapping
[lb_num
+1] != mapping
[lb_num
])
2111 /* insert slot for this mapping */
2112 len
= run_length
* lb_size
;
2114 /* bounds checking */
2115 if (foffset
+ len
> till
)
2116 len
= till
- foffset
;
2117 KASSERT(foffset
+ len
<= inflen
);
2119 s_ad
.len
= udf_rw32(len
| UDF_EXT_ALLOCATED
);
2120 s_ad
.loc
.part_num
= udf_rw16(vpart_num
);
2121 s_ad
.loc
.lb_num
= udf_rw32(run_start
);
2128 ("Record allocation in node "
2129 "failed: insert failed\n"));
2130 UDF_UNLOCK_NODE(udf_node
, 0);
2131 buf
->b_error
= EINVAL
;
2134 node_ad_cpy
[cpy_slot
++] = s_ad
;
2136 DPRINTF(ALLOC
, ("\t3: insert new mapping vp %d lb %d, len %d, "
2137 "flags %d -> stack\n",
2138 udf_rw16(s_ad
.loc
.part_num
), udf_rw32(s_ad
.loc
.lb_num
),
2139 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2140 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2143 /* 4) pop replaced length */
2144 slot
= restart_slot
;
2145 foffset
= restart_foffset
;
2147 replace_len
= till
- foffset
; /* total amount of bytes to pop */
2148 slot_offset
= from
- foffset
; /* offset in first encounted slot */
2149 KASSERT((slot_offset
% lb_size
) == 0);
2152 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
2156 len
= udf_rw32(s_ad
.len
);
2157 flags
= UDF_EXT_FLAGS(len
);
2158 len
= UDF_EXT_LEN(len
);
2159 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
2161 if (flags
== UDF_EXT_REDIRECT
) {
2166 DPRINTF(ALLOC
, ("\t4i: got slot %d, slot_offset %d, "
2168 "vp %d, lb %d, len %d, flags %d\n",
2169 slot
, slot_offset
, replace_len
,
2170 udf_rw16(s_ad
.loc
.part_num
),
2171 udf_rw32(s_ad
.loc
.lb_num
),
2172 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2173 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2175 /* adjust for slot offset */
2177 DPRINTF(ALLOC
, ("\t4s: skipping %d\n", slot_offset
));
2178 lb_num
+= slot_offset
/ lb_size
;
2180 foffset
+= slot_offset
;
2181 replace_len
-= slot_offset
;
2187 /* advance for (the rest of) this slot */
2188 replace
= MIN(len
, replace_len
);
2189 DPRINTF(ALLOC
, ("\t4d: replacing %d\n", replace
));
2191 /* advance for this slot */
2193 /* note: dont round DOWN on num_lb since we then
2194 * forget the last partial one */
2195 num_lb
= (replace
+ lb_size
- 1) / lb_size
;
2196 if (flags
!= UDF_EXT_FREE
) {
2197 udf_free_allocated_space(ump
, lb_num
,
2198 udf_rw16(s_ad
.loc
.part_num
), num_lb
);
2203 replace_len
-= replace
;
2206 /* do we have a slot tail ? */
2208 KASSERT(foffset
% lb_size
== 0);
2210 /* we arrived at our point, push remainder */
2211 s_ad
.len
= udf_rw32(len
| flags
);
2212 s_ad
.loc
.lb_num
= udf_rw32(lb_num
);
2213 if (flags
== UDF_EXT_FREE
)
2214 s_ad
.loc
.lb_num
= udf_rw32(0);
2215 node_ad_cpy
[cpy_slot
++] = s_ad
;
2219 DPRINTF(ALLOC
, ("\t4: vp %d, lb %d, len %d, flags %d "
2221 udf_rw16(s_ad
.loc
.part_num
),
2222 udf_rw32(s_ad
.loc
.lb_num
),
2223 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2224 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2231 /* 5) copy remainder */
2233 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
2237 len
= udf_rw32(s_ad
.len
);
2238 flags
= UDF_EXT_FLAGS(len
);
2239 len
= UDF_EXT_LEN(len
);
2241 if (flags
== UDF_EXT_REDIRECT
) {
2246 node_ad_cpy
[cpy_slot
++] = s_ad
;
2248 DPRINTF(ALLOC
, ("\t5: insert new mapping "
2249 "vp %d lb %d, len %d, flags %d "
2251 udf_rw16(s_ad
.loc
.part_num
),
2252 udf_rw32(s_ad
.loc
.lb_num
),
2253 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2254 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2259 /* 6) reset node descriptors */
2260 udf_wipe_adslots(udf_node
);
2262 /* 7) copy back extents; merge when possible. Recounting on the fly */
2263 cpy_slots
= cpy_slot
;
2265 c_ad
= node_ad_cpy
[0];
2267 DPRINTF(ALLOC
, ("\t7s: stack -> got mapping vp %d "
2268 "lb %d, len %d, flags %d\n",
2269 udf_rw16(c_ad
.loc
.part_num
),
2270 udf_rw32(c_ad
.loc
.lb_num
),
2271 UDF_EXT_LEN(udf_rw32(c_ad
.len
)),
2272 UDF_EXT_FLAGS(udf_rw32(c_ad
.len
)) >> 30));
2274 for (cpy_slot
= 1; cpy_slot
< cpy_slots
; cpy_slot
++) {
2275 s_ad
= node_ad_cpy
[cpy_slot
];
2277 DPRINTF(ALLOC
, ("\t7i: stack -> got mapping vp %d "
2278 "lb %d, len %d, flags %d\n",
2279 udf_rw16(s_ad
.loc
.part_num
),
2280 udf_rw32(s_ad
.loc
.lb_num
),
2281 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2282 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2284 /* see if we can merge */
2285 if (udf_ads_merge(lb_size
, &c_ad
, &s_ad
)) {
2286 /* not mergable (anymore) */
2287 DPRINTF(ALLOC
, ("\t7: appending vp %d lb %d, "
2288 "len %d, flags %d\n",
2289 udf_rw16(c_ad
.loc
.part_num
),
2290 udf_rw32(c_ad
.loc
.lb_num
),
2291 UDF_EXT_LEN(udf_rw32(c_ad
.len
)),
2292 UDF_EXT_FLAGS(udf_rw32(c_ad
.len
)) >> 30));
2294 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2296 buf
->b_error
= error
;
2304 /* 8) push rest slot (if any) */
2305 if (UDF_EXT_LEN(c_ad
.len
) > 0) {
2306 DPRINTF(ALLOC
, ("\t8: last append vp %d lb %d, "
2307 "len %d, flags %d\n",
2308 udf_rw16(c_ad
.loc
.part_num
),
2309 udf_rw32(c_ad
.loc
.lb_num
),
2310 UDF_EXT_LEN(udf_rw32(c_ad
.len
)),
2311 UDF_EXT_FLAGS(udf_rw32(c_ad
.len
)) >> 30));
2313 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2315 buf
->b_error
= error
;
2321 udf_count_alloc_exts(udf_node
);
2323 /* the node's descriptors should now be sane */
2324 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2325 UDF_UNLOCK_NODE(udf_node
, 0);
2327 KASSERT(orig_inflen
== new_inflen
);
2328 KASSERT(new_lbrec
>= orig_lbrec
);
2333 /* --------------------------------------------------------------------- */
2336 udf_grow_node(struct udf_node
*udf_node
, uint64_t new_size
)
2338 union dscrptr
*dscr
;
2339 struct vnode
*vp
= udf_node
->vnode
;
2340 struct udf_mount
*ump
= udf_node
->ump
;
2341 struct file_entry
*fe
;
2342 struct extfile_entry
*efe
;
2343 struct icb_tag
*icbtag
;
2344 struct long_ad c_ad
, s_ad
;
2345 uint64_t size_diff
, old_size
, inflen
, objsize
, chunk
, append_len
;
2346 uint64_t foffset
, end_foffset
;
2347 uint64_t orig_inflen
, orig_lbrec
, new_inflen
, new_lbrec
;
2348 uint32_t lb_size
, dscr_size
, crclen
, lastblock_grow
;
2349 uint32_t icbflags
, len
, flags
, max_len
;
2350 uint32_t max_l_ad
, l_ad
, l_ea
;
2351 uint16_t my_part
, dst_part
;
2352 uint8_t *data_pos
, *evacuated_data
;
2357 DPRINTF(ALLOC
, ("udf_grow_node\n"));
2359 UDF_LOCK_NODE(udf_node
, 0);
2360 udf_node_sanity_check(udf_node
, &orig_inflen
, &orig_lbrec
);
2362 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
2363 max_len
= ((UDF_EXT_MAXLEN
/ lb_size
) * lb_size
);
2366 efe
= udf_node
->efe
;
2368 dscr
= (union dscrptr
*) fe
;
2369 icbtag
= &fe
->icbtag
;
2370 inflen
= udf_rw64(fe
->inf_len
);
2372 dscr_size
= sizeof(struct file_entry
) -1;
2373 l_ea
= udf_rw32(fe
->l_ea
);
2374 l_ad
= udf_rw32(fe
->l_ad
);
2376 dscr
= (union dscrptr
*) efe
;
2377 icbtag
= &efe
->icbtag
;
2378 inflen
= udf_rw64(efe
->inf_len
);
2379 objsize
= udf_rw64(efe
->obj_size
);
2380 dscr_size
= sizeof(struct extfile_entry
) -1;
2381 l_ea
= udf_rw32(efe
->l_ea
);
2382 l_ad
= udf_rw32(efe
->l_ad
);
2384 data_pos
= (uint8_t *) dscr
+ dscr_size
+ l_ea
;
2385 max_l_ad
= lb_size
- dscr_size
- l_ea
;
2387 icbflags
= udf_rw16(icbtag
->flags
);
2388 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
2391 size_diff
= new_size
- old_size
;
2393 DPRINTF(ALLOC
, ("\tfrom %"PRIu64
" to %"PRIu64
"\n", old_size
, new_size
));
2395 evacuated_data
= NULL
;
2396 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
2397 if (l_ad
+ size_diff
<= max_l_ad
) {
2398 /* only reflect size change directly in the node */
2399 inflen
+= size_diff
;
2400 objsize
+= size_diff
;
2402 crclen
= dscr_size
- UDF_DESC_TAG_LENGTH
+ l_ea
+ l_ad
;
2404 fe
->inf_len
= udf_rw64(inflen
);
2405 fe
->l_ad
= udf_rw32(l_ad
);
2406 fe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2408 efe
->inf_len
= udf_rw64(inflen
);
2409 efe
->obj_size
= udf_rw64(objsize
);
2410 efe
->l_ad
= udf_rw32(l_ad
);
2411 efe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2415 /* set new size for uvm */
2416 uvm_vnp_setsize(vp
, old_size
);
2417 uvm_vnp_setwritesize(vp
, new_size
);
2420 /* zero append space in buffer */
2421 uvm_vnp_zerorange(vp
, old_size
, new_size
- old_size
);
2424 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2427 UDF_UNLOCK_NODE(udf_node
, 0);
2429 KASSERT(new_inflen
== orig_inflen
+ size_diff
);
2430 KASSERT(new_lbrec
== orig_lbrec
);
2431 KASSERT(new_lbrec
== 0);
2435 DPRINTF(ALLOC
, ("\tCONVERT from internal\n"));
2438 /* allocate some space and copy in the stuff to keep */
2439 evacuated_data
= malloc(lb_size
, M_UDFTEMP
, M_WAITOK
);
2440 memset(evacuated_data
, 0, lb_size
);
2442 /* node is locked, so safe to exit mutex */
2443 UDF_UNLOCK_NODE(udf_node
, 0);
2445 /* read in using the `normal' vn_rdwr() */
2446 error
= vn_rdwr(UIO_READ
, udf_node
->vnode
,
2447 evacuated_data
, old_size
, 0,
2448 UIO_SYSSPACE
, IO_ALTSEMANTICS
| IO_NODELOCKED
,
2449 FSCRED
, NULL
, NULL
);
2452 UDF_LOCK_NODE(udf_node
, 0);
2455 /* convert to a normal alloc and select type */
2456 my_part
= udf_rw16(udf_node
->loc
.loc
.part_num
);
2457 dst_part
= udf_get_record_vpart(ump
, udf_get_c_type(udf_node
));
2458 addr_type
= UDF_ICB_SHORT_ALLOC
;
2459 if (dst_part
!= my_part
)
2460 addr_type
= UDF_ICB_LONG_ALLOC
;
2462 icbflags
&= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
2463 icbflags
|= addr_type
;
2464 icbtag
->flags
= udf_rw16(icbflags
);
2466 /* wipe old descriptor space */
2467 udf_wipe_adslots(udf_node
);
2469 memset(&c_ad
, 0, sizeof(struct long_ad
));
2470 c_ad
.len
= udf_rw32(old_size
| UDF_EXT_FREE
);
2471 c_ad
.loc
.part_num
= udf_rw16(0); /* not relevant */
2472 c_ad
.loc
.lb_num
= udf_rw32(0); /* not relevant */
2476 /* goto the last entry (if any) */
2480 memset(&c_ad
, 0, sizeof(struct long_ad
));
2482 udf_get_adslot(udf_node
, slot
, &c_ad
, &eof
);
2486 len
= udf_rw32(c_ad
.len
);
2487 flags
= UDF_EXT_FLAGS(len
);
2488 len
= UDF_EXT_LEN(len
);
2490 end_foffset
= foffset
+ len
;
2491 if (flags
!= UDF_EXT_REDIRECT
)
2492 foffset
= end_foffset
;
2496 /* at end of adslots */
2498 /* special case if the old size was zero, then there is no last slot */
2499 if (old_size
== 0) {
2500 c_ad
.len
= udf_rw32(0 | UDF_EXT_FREE
);
2501 c_ad
.loc
.part_num
= udf_rw16(0); /* not relevant */
2502 c_ad
.loc
.lb_num
= udf_rw32(0); /* not relevant */
2504 /* refetch last slot */
2506 udf_get_adslot(udf_node
, slot
, &c_ad
, &eof
);
2511 * If the length of the last slot is not a multiple of lb_size, adjust
2512 * length so that it is; don't forget to adjust `append_len'! relevant for
2513 * extending existing files
2515 len
= udf_rw32(c_ad
.len
);
2516 flags
= UDF_EXT_FLAGS(len
);
2517 len
= UDF_EXT_LEN(len
);
2520 if (len
% lb_size
> 0) {
2521 lastblock_grow
= lb_size
- (len
% lb_size
);
2522 lastblock_grow
= MIN(size_diff
, lastblock_grow
);
2523 len
+= lastblock_grow
;
2524 c_ad
.len
= udf_rw32(len
| flags
);
2526 /* TODO zero appened space in buffer! */
2527 /* using uvm_vnp_zerorange(vp, old_size, new_size - old_size); ? */
2529 memset(&s_ad
, 0, sizeof(struct long_ad
));
2531 /* size_diff can be bigger than allowed, so grow in chunks */
2532 append_len
= size_diff
- lastblock_grow
;
2533 while (append_len
> 0) {
2534 chunk
= MIN(append_len
, max_len
);
2535 s_ad
.len
= udf_rw32(chunk
| UDF_EXT_FREE
);
2536 s_ad
.loc
.part_num
= udf_rw16(0);
2537 s_ad
.loc
.lb_num
= udf_rw32(0);
2539 if (udf_ads_merge(lb_size
, &c_ad
, &s_ad
)) {
2540 /* not mergable (anymore) */
2541 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2546 memset(&s_ad
, 0, sizeof(struct long_ad
));
2548 append_len
-= chunk
;
2551 /* if there is a rest piece in the accumulator, append it */
2552 if (UDF_EXT_LEN(udf_rw32(c_ad
.len
)) > 0) {
2553 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2559 /* if there is a rest piece that didn't fit, append it */
2560 if (UDF_EXT_LEN(udf_rw32(s_ad
.len
)) > 0) {
2561 error
= udf_append_adslot(udf_node
, &slot
, &s_ad
);
2567 inflen
+= size_diff
;
2568 objsize
+= size_diff
;
2570 fe
->inf_len
= udf_rw64(inflen
);
2572 efe
->inf_len
= udf_rw64(inflen
);
2573 efe
->obj_size
= udf_rw64(objsize
);
2577 if (evacuated_data
) {
2578 /* set new write size for uvm */
2579 uvm_vnp_setwritesize(vp
, old_size
);
2581 /* write out evacuated data */
2582 error
= vn_rdwr(UIO_WRITE
, udf_node
->vnode
,
2583 evacuated_data
, old_size
, 0,
2584 UIO_SYSSPACE
, IO_ALTSEMANTICS
| IO_NODELOCKED
,
2585 FSCRED
, NULL
, NULL
);
2586 uvm_vnp_setsize(vp
, old_size
);
2591 free(evacuated_data
, M_UDFTEMP
);
2593 udf_count_alloc_exts(udf_node
);
2595 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2596 UDF_UNLOCK_NODE(udf_node
, 0);
2598 KASSERT(new_inflen
== orig_inflen
+ size_diff
);
2599 KASSERT(new_lbrec
== orig_lbrec
);
2604 /* --------------------------------------------------------------------- */
2607 udf_shrink_node(struct udf_node
*udf_node
, uint64_t new_size
)
2609 struct vnode
*vp
= udf_node
->vnode
;
2610 struct udf_mount
*ump
= udf_node
->ump
;
2611 struct file_entry
*fe
;
2612 struct extfile_entry
*efe
;
2613 struct icb_tag
*icbtag
;
2614 struct long_ad c_ad
, s_ad
, *node_ad_cpy
;
2615 uint64_t size_diff
, old_size
, inflen
, objsize
;
2616 uint64_t foffset
, end_foffset
;
2617 uint64_t orig_inflen
, orig_lbrec
, new_inflen
, new_lbrec
;
2618 uint32_t lb_size
, dscr_size
, crclen
;
2619 uint32_t slot_offset
, slot_offset_lb
;
2620 uint32_t len
, flags
, max_len
;
2621 uint32_t num_lb
, lb_num
;
2622 uint32_t max_l_ad
, l_ad
, l_ea
;
2625 int icbflags
, addr_type
;
2626 int slot
, cpy_slot
, cpy_slots
;
2629 DPRINTF(ALLOC
, ("udf_shrink_node\n"));
2631 UDF_LOCK_NODE(udf_node
, 0);
2632 udf_node_sanity_check(udf_node
, &orig_inflen
, &orig_lbrec
);
2634 lb_size
= udf_rw32(ump
->logical_vol
->lb_size
);
2635 max_len
= ((UDF_EXT_MAXLEN
/ lb_size
) * lb_size
);
2639 efe
= udf_node
->efe
;
2641 icbtag
= &fe
->icbtag
;
2642 inflen
= udf_rw64(fe
->inf_len
);
2644 dscr_size
= sizeof(struct file_entry
) -1;
2645 l_ea
= udf_rw32(fe
->l_ea
);
2646 l_ad
= udf_rw32(fe
->l_ad
);
2647 data_pos
= (uint8_t *) fe
+ dscr_size
+ l_ea
;
2649 icbtag
= &efe
->icbtag
;
2650 inflen
= udf_rw64(efe
->inf_len
);
2651 objsize
= udf_rw64(efe
->obj_size
);
2652 dscr_size
= sizeof(struct extfile_entry
) -1;
2653 l_ea
= udf_rw32(efe
->l_ea
);
2654 l_ad
= udf_rw32(efe
->l_ad
);
2655 data_pos
= (uint8_t *) efe
+ dscr_size
+ l_ea
;
2657 max_l_ad
= lb_size
- dscr_size
- l_ea
;
2659 icbflags
= udf_rw16(icbtag
->flags
);
2660 addr_type
= icbflags
& UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
2663 size_diff
= old_size
- new_size
;
2665 DPRINTF(ALLOC
, ("\tfrom %"PRIu64
" to %"PRIu64
"\n", old_size
, new_size
));
2667 /* shrink the node to its new size */
2668 if (addr_type
== UDF_ICB_INTERN_ALLOC
) {
2669 /* only reflect size change directly in the node */
2670 KASSERT(new_size
<= max_l_ad
);
2671 inflen
-= size_diff
;
2672 objsize
-= size_diff
;
2674 crclen
= dscr_size
- UDF_DESC_TAG_LENGTH
+ l_ea
+ l_ad
;
2676 fe
->inf_len
= udf_rw64(inflen
);
2677 fe
->l_ad
= udf_rw32(l_ad
);
2678 fe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2680 efe
->inf_len
= udf_rw64(inflen
);
2681 efe
->obj_size
= udf_rw64(objsize
);
2682 efe
->l_ad
= udf_rw32(l_ad
);
2683 efe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2687 /* clear the space in the descriptor */
2688 KASSERT(old_size
> new_size
);
2689 memset(data_pos
+ new_size
, 0, old_size
- new_size
);
2691 /* TODO zero appened space in buffer! */
2692 /* using uvm_vnp_zerorange(vp, old_size, old_size - new_size); ? */
2694 /* set new size for uvm */
2695 uvm_vnp_setsize(vp
, new_size
);
2697 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2698 UDF_UNLOCK_NODE(udf_node
, 0);
2700 KASSERT(new_inflen
== orig_inflen
- size_diff
);
2701 KASSERT(new_lbrec
== orig_lbrec
);
2702 KASSERT(new_lbrec
== 0);
2707 /* setup node cleanup extents copy space */
2708 node_ad_cpy
= malloc(lb_size
* UDF_MAX_ALLOC_EXTENTS
,
2709 M_UDFMNT
, M_WAITOK
);
2710 memset(node_ad_cpy
, 0, lb_size
* UDF_MAX_ALLOC_EXTENTS
);
2713 * Shrink the node by releasing the allocations and truncate the last
2714 * allocation to the new size. If the new size fits into the
2715 * allocation descriptor itself, transform it into an
2716 * UDF_ICB_INTERN_ALLOC.
2722 /* 1) copy till first overlap piece to the rewrite buffer */
2724 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
2727 ("Shrink node failed: "
2728 "encountered EOF\n"));
2730 goto errorout
; /* panic? */
2732 len
= udf_rw32(s_ad
.len
);
2733 flags
= UDF_EXT_FLAGS(len
);
2734 len
= UDF_EXT_LEN(len
);
2736 if (flags
== UDF_EXT_REDIRECT
) {
2741 end_foffset
= foffset
+ len
;
2742 if (end_foffset
> new_size
)
2745 node_ad_cpy
[cpy_slot
++] = s_ad
;
2747 DPRINTF(ALLOC
, ("\t1: vp %d, lb %d, len %d, flags %d "
2749 udf_rw16(s_ad
.loc
.part_num
),
2750 udf_rw32(s_ad
.loc
.lb_num
),
2751 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2752 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2754 foffset
= end_foffset
;
2757 slot_offset
= new_size
- foffset
;
2759 /* 2) trunc overlapping slot at overlap and copy it */
2760 if (slot_offset
> 0) {
2761 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
2762 vpart_num
= udf_rw16(s_ad
.loc
.part_num
);
2764 if (flags
== UDF_EXT_ALLOCATED
) {
2765 /* calculate extent in lb, and offset in lb */
2766 num_lb
= (len
+ lb_size
-1) / lb_size
;
2767 slot_offset_lb
= (slot_offset
+ lb_size
-1) / lb_size
;
2769 /* adjust our slot */
2770 lb_num
+= slot_offset_lb
;
2771 num_lb
-= slot_offset_lb
;
2773 udf_free_allocated_space(ump
, lb_num
, vpart_num
, num_lb
);
2776 s_ad
.len
= udf_rw32(slot_offset
| flags
);
2777 node_ad_cpy
[cpy_slot
++] = s_ad
;
2780 DPRINTF(ALLOC
, ("\t2: vp %d, lb %d, len %d, flags %d "
2782 udf_rw16(s_ad
.loc
.part_num
),
2783 udf_rw32(s_ad
.loc
.lb_num
),
2784 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2785 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2788 /* 3) delete remainder */
2790 udf_get_adslot(udf_node
, slot
, &s_ad
, &eof
);
2794 len
= udf_rw32(s_ad
.len
);
2795 flags
= UDF_EXT_FLAGS(len
);
2796 len
= UDF_EXT_LEN(len
);
2798 if (flags
== UDF_EXT_REDIRECT
) {
2803 DPRINTF(ALLOC
, ("\t3: delete remainder "
2804 "vp %d lb %d, len %d, flags %d\n",
2805 udf_rw16(s_ad
.loc
.part_num
),
2806 udf_rw32(s_ad
.loc
.lb_num
),
2807 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2808 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2810 if (flags
== UDF_EXT_ALLOCATED
) {
2811 lb_num
= udf_rw32(s_ad
.loc
.lb_num
);
2812 vpart_num
= udf_rw16(s_ad
.loc
.part_num
);
2813 num_lb
= (len
+ lb_size
- 1) / lb_size
;
2815 udf_free_allocated_space(ump
, lb_num
, vpart_num
,
2822 /* 4) if it will fit into the descriptor then convert */
2823 if (new_size
< max_l_ad
) {
2825 * resque/evacuate old piece by reading it in, and convert it
2826 * to internal alloc.
2828 if (new_size
== 0) {
2829 /* XXX/TODO only for zero sizing now */
2830 udf_wipe_adslots(udf_node
);
2832 icbflags
&= ~UDF_ICB_TAG_FLAGS_ALLOC_MASK
;
2833 icbflags
|= UDF_ICB_INTERN_ALLOC
;
2834 icbtag
->flags
= udf_rw16(icbflags
);
2836 inflen
-= size_diff
; KASSERT(inflen
== 0);
2837 objsize
-= size_diff
;
2839 crclen
= dscr_size
- UDF_DESC_TAG_LENGTH
+ l_ea
+ l_ad
;
2841 fe
->inf_len
= udf_rw64(inflen
);
2842 fe
->l_ad
= udf_rw32(l_ad
);
2843 fe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2845 efe
->inf_len
= udf_rw64(inflen
);
2846 efe
->obj_size
= udf_rw64(objsize
);
2847 efe
->l_ad
= udf_rw32(l_ad
);
2848 efe
->tag
.desc_crc_len
= udf_rw16(crclen
);
2850 /* eventually copy in evacuated piece */
2851 /* set new size for uvm */
2852 uvm_vnp_setsize(vp
, new_size
);
2854 free(node_ad_cpy
, M_UDFMNT
);
2855 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2857 UDF_UNLOCK_NODE(udf_node
, 0);
2859 KASSERT(new_inflen
== orig_inflen
- size_diff
);
2860 KASSERT(new_inflen
== 0);
2861 KASSERT(new_lbrec
== 0);
2866 printf("UDF_SHRINK_NODE: could convert to internal alloc!\n");
2869 /* 5) reset node descriptors */
2870 udf_wipe_adslots(udf_node
);
2872 /* 6) copy back extents; merge when possible. Recounting on the fly */
2873 cpy_slots
= cpy_slot
;
2875 c_ad
= node_ad_cpy
[0];
2877 for (cpy_slot
= 1; cpy_slot
< cpy_slots
; cpy_slot
++) {
2878 s_ad
= node_ad_cpy
[cpy_slot
];
2880 DPRINTF(ALLOC
, ("\t6: stack -> got mapping vp %d "
2881 "lb %d, len %d, flags %d\n",
2882 udf_rw16(s_ad
.loc
.part_num
),
2883 udf_rw32(s_ad
.loc
.lb_num
),
2884 UDF_EXT_LEN(udf_rw32(s_ad
.len
)),
2885 UDF_EXT_FLAGS(udf_rw32(s_ad
.len
)) >> 30));
2887 /* see if we can merge */
2888 if (udf_ads_merge(lb_size
, &c_ad
, &s_ad
)) {
2889 /* not mergable (anymore) */
2890 DPRINTF(ALLOC
, ("\t6: appending vp %d lb %d, "
2891 "len %d, flags %d\n",
2892 udf_rw16(c_ad
.loc
.part_num
),
2893 udf_rw32(c_ad
.loc
.lb_num
),
2894 UDF_EXT_LEN(udf_rw32(c_ad
.len
)),
2895 UDF_EXT_FLAGS(udf_rw32(c_ad
.len
)) >> 30));
2897 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2899 goto errorout
; /* panic? */
2905 /* 7) push rest slot (if any) */
2906 if (UDF_EXT_LEN(c_ad
.len
) > 0) {
2907 DPRINTF(ALLOC
, ("\t7: last append vp %d lb %d, "
2908 "len %d, flags %d\n",
2909 udf_rw16(c_ad
.loc
.part_num
),
2910 udf_rw32(c_ad
.loc
.lb_num
),
2911 UDF_EXT_LEN(udf_rw32(c_ad
.len
)),
2912 UDF_EXT_FLAGS(udf_rw32(c_ad
.len
)) >> 30));
2914 error
= udf_append_adslot(udf_node
, &slot
, &c_ad
);
2916 goto errorout
; /* panic? */
2920 inflen
-= size_diff
;
2921 objsize
-= size_diff
;
2923 fe
->inf_len
= udf_rw64(inflen
);
2925 efe
->inf_len
= udf_rw64(inflen
);
2926 efe
->obj_size
= udf_rw64(objsize
);
2930 /* set new size for uvm */
2931 uvm_vnp_setsize(vp
, new_size
);
2934 free(node_ad_cpy
, M_UDFMNT
);
2936 udf_count_alloc_exts(udf_node
);
2938 udf_node_sanity_check(udf_node
, &new_inflen
, &new_lbrec
);
2939 UDF_UNLOCK_NODE(udf_node
, 0);
2941 KASSERT(new_inflen
== orig_inflen
- size_diff
);