4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #include <sys/types.h>
28 #include <sys/t_lock.h>
29 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/signal.h>
40 #include <sys/vnode.h>
45 #include <sys/fcntl.h>
46 #include <sys/flock.h>
51 #include <sys/errno.h>
54 #include <sys/pathname.h>
55 #include <sys/debug.h>
56 #include <sys/vmsystm.h>
57 #include <sys/cmn_err.h>
58 #include <sys/dirent.h>
59 #include <sys/errno.h>
60 #include <sys/modctl.h>
61 #include <sys/statvfs.h>
62 #include <sys/mount.h>
63 #include <sys/sunddi.h>
64 #include <sys/bootconf.h>
65 #include <sys/policy.h>
72 #include <vm/seg_map.h>
73 #include <vm/seg_kmem.h>
74 #include <vm/seg_vn.h>
79 #include <sys/fs_subr.h>
81 #include <sys/fs/udf_volume.h>
82 #include <sys/fs/udf_inode.h>
85 extern struct ud_inode
*ud_search_icache(struct vfs
*, uint16_t, uint32_t);
88 int32_t ud_alloc_space_bmap(struct vfs
*, struct ud_part
*,
89 uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
90 int32_t ud_check_free_and_mark_used(struct vfs
*,
91 struct ud_part
*, uint32_t, uint32_t *);
92 int32_t ud_check_free(uint8_t *, uint8_t *, uint32_t, uint32_t);
93 void ud_mark_used(uint8_t *, uint32_t, uint32_t);
94 void ud_mark_free(uint8_t *, uint32_t, uint32_t);
95 int32_t ud_alloc_space_stbl(struct vfs
*, struct ud_part
*,
96 uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
97 int32_t ud_free_space_bmap(struct vfs
*,
98 struct ud_part
*, uint32_t, uint32_t);
99 int32_t ud_free_space_stbl(struct vfs
*,
100 struct ud_part
*, uint32_t, uint32_t);
104 * WORKSAROUND to the buffer cache crap
105 * If the requested block exists in the buffer cache
106 * buffer cache does not care about the count
107 * it just returns the old buffer(does not even
108 * set resid value). Same problem exists if the
109 * block that is requested is not the first block
110 * in the cached buffer then this will return
111 * a different buffer. We work around the above by
112 * using a fixed size request to the buffer cache
113 * all the time. This is currently udf_lbsize.
114 * (Actually it is restricted to udf_lbsize
115 * because iget always does udf_lbsize requests)
120 * allocate blkcount blocks continuously
121 * near "proximity" block in partion defined by prn.
122 * if proximity != 0 means less_is_ok = 0
123 * return the starting block no and count
124 * of blocks allocated in start_blkno & size
125 * if less_is_ok == 0 then allocate only if
126 * entire requirement can be met.
129 ud_alloc_space(struct vfs
*vfsp
, uint16_t prn
,
130 uint32_t proximity
, uint32_t blkcount
,
131 uint32_t *start_blkno
, uint32_t *size
,
132 int32_t less_is_ok
, int32_t metadata
)
134 int32_t i
, error
= 0;
135 struct udf_vfs
*udf_vfsp
;
136 struct ud_part
*ud_part
;
138 ud_printf("ud_alloc_space\n");
142 * prom_printf("ud_alloc_space %x %x %x %x\n",
143 * proximity, blkcount, less_is_ok, metadata);
152 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
153 ud_part
= udf_vfsp
->udf_parts
;
154 for (i
= 0; i
< udf_vfsp
->udf_npart
; i
++) {
155 if (prn
== ud_part
->udp_number
) {
161 if (i
== udf_vfsp
->udf_npart
) {
167 error
= ud_alloc_from_cache(udf_vfsp
, ud_part
, start_blkno
);
173 if (ud_part
->udp_nfree
!= 0) {
174 if (ud_part
->udp_flags
== UDP_BITMAPS
) {
175 error
= ud_alloc_space_bmap(vfsp
, ud_part
, proximity
,
176 blkcount
, start_blkno
, size
, less_is_ok
);
178 error
= ud_alloc_space_stbl(vfsp
, ud_part
, proximity
,
179 blkcount
, start_blkno
, size
, less_is_ok
);
182 mutex_enter(&udf_vfsp
->udf_lock
);
183 ASSERT(ud_part
->udp_nfree
>= *size
);
184 ASSERT(udf_vfsp
->udf_freeblks
>= *size
);
185 ud_part
->udp_nfree
-= *size
;
186 udf_vfsp
->udf_freeblks
-= *size
;
187 mutex_exit(&udf_vfsp
->udf_lock
);
193 * prom_printf("end %x %x %x\n", error, *start_blkno, *size);
199 #ifdef SKIP_USED_BLOCKS
201 * This table is manually constructed
204 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
205 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
206 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
207 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
208 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
209 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
210 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
211 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
212 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
213 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
214 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
215 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
216 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
217 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
218 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
219 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
223 #define HDR_BLKS (24 * 8)
226 ud_alloc_space_bmap(struct vfs
*vfsp
,
227 struct ud_part
*ud_part
, uint32_t proximity
,
228 uint32_t blkcount
, uint32_t *start_blkno
,
229 uint32_t *size
, int32_t less_is_ok
)
231 struct buf
*bp
= NULL
;
232 struct udf_vfs
*udf_vfsp
;
233 uint32_t old_loc
, old_size
, new_size
;
234 uint8_t *addr
, *eaddr
;
235 uint32_t loop_count
, loop_begin
, loop_end
;
236 uint32_t bno
, begin
, dummy
, temp
, lbsz
, bb_count
;
237 uint32_t bblk
= 0, eblk
= 0;
240 ud_printf("ud_alloc_space_bmap\n");
243 ASSERT(ud_part
->udp_flags
== UDP_BITMAPS
);
245 if (ud_part
->udp_unall_len
== 0) {
248 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
249 lbsz
= udf_vfsp
->udf_lbsize
;
250 bb_count
= udf_vfsp
->udf_lbsize
<< 3;
252 if (proximity
!= 0) {
254 * directly try allocating
258 if (ud_check_free_and_mark_used(vfsp
,
259 ud_part
, proximity
, &temp
) == 0) {
261 *start_blkno
= proximity
;
270 mutex_enter(&udf_vfsp
->udf_lock
);
271 fragmented
= udf_vfsp
->udf_fragmented
;
272 mutex_exit(&udf_vfsp
->udf_lock
);
274 old_loc
= old_size
= 0;
276 mutex_enter(&udf_vfsp
->udf_lock
);
277 loop_begin
= (ud_part
->udp_last_alloc
+ CLSTR_MASK
) & ~CLSTR_MASK
;
278 mutex_exit(&udf_vfsp
->udf_lock
);
280 loop_end
= ud_part
->udp_nblocks
+ HDR_BLKS
;
281 loop_count
= (loop_begin
) ? 2 : 1;
282 while (loop_count
--) {
283 for (bno
= loop_begin
+ HDR_BLKS
; bno
+ blkcount
< loop_end
; ) {
287 * Each bread is restricted to lbsize
288 * due to the way bread is implemented
291 ((eblk
- bno
) < blkcount
)) {
295 begin
= ud_part
->udp_unall_loc
+
297 bp
= ud_bread(vfsp
->vfs_dev
,
298 ud_xlate_to_daddr(udf_vfsp
,
301 udf_vfsp
->udf_l2d_shift
, lbsz
);
302 if (bp
->b_flags
& B_ERROR
) {
306 bblk
= begin
* bb_count
;
307 eblk
= bblk
+ bb_count
;
308 addr
= (uint8_t *)bp
->b_un
.b_addr
;
309 eaddr
= addr
+ bp
->b_bcount
;
312 if (blkcount
> (eblk
- bno
)) {
317 if ((new_size
= ud_check_free(addr
, eaddr
,
318 bno
- bblk
, temp
)) == temp
) {
319 ud_mark_used(addr
, bno
- bblk
, temp
);
321 *start_blkno
= bno
- HDR_BLKS
;
323 mutex_enter(&udf_vfsp
->udf_lock
);
324 ud_part
->udp_last_alloc
=
325 bno
+ temp
- HDR_BLKS
;
326 mutex_exit(&udf_vfsp
->udf_lock
);
330 if (old_size
< new_size
) {
331 old_loc
= bno
- HDR_BLKS
;
338 #ifdef SKIP_USED_BLOCKS
341 * implement a allocated block skip
342 * using a while loop with an
343 * preinitialised array of 256 elements
344 * for number of blocks skipped
347 while (skip
[addr
[(bno
- bblk
) >> 3]] == 8)
349 bno
+= skip
[addr
[(bno
- bblk
) >> 3]];
355 bno
= (bno
+ CLSTR_MASK
) & ~CLSTR_MASK
;
363 loop_end
= loop_begin
+ HDR_BLKS
;
367 if ((old_size
== 0) && (!fragmented
)) {
368 mutex_enter(&udf_vfsp
->udf_lock
);
369 fragmented
= udf_vfsp
->udf_fragmented
= 1;
370 mutex_exit(&udf_vfsp
->udf_lock
);
373 if (less_is_ok
&& (old_size
!= 0)) {
377 * somebody else might have
378 * already allocated behind us
380 if (ud_check_free_and_mark_used(vfsp
,
381 ud_part
, old_loc
, &old_size
) == 0) {
383 *start_blkno
= old_loc
;
385 mutex_enter(&udf_vfsp
->udf_lock
);
386 ud_part
->udp_last_alloc
= old_loc
+ old_size
;
387 mutex_exit(&udf_vfsp
->udf_lock
);
393 * Failed what ever the reason
401 * start is the block from the begining
402 * of the partition ud_part
405 ud_check_free_and_mark_used(struct vfs
*vfsp
,
406 struct ud_part
*ud_part
, uint32_t start
, uint32_t *count
)
409 struct udf_vfs
*udf_vfsp
;
410 uint32_t begin
, dummy
, bb_count
;
413 * Adjust start for the header
416 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
417 bb_count
= udf_vfsp
->udf_lbsize
<< 3;
420 * Read just on block worth of bitmap
422 begin
= ud_part
->udp_unall_loc
+ (start
/ bb_count
);
423 bp
= ud_bread(vfsp
->vfs_dev
,
424 ud_xlate_to_daddr(udf_vfsp
, ud_part
->udp_number
,
425 begin
, 1, &dummy
) << udf_vfsp
->udf_l2d_shift
,
426 udf_vfsp
->udf_lbsize
);
427 if (bp
->b_flags
& B_ERROR
) {
433 * Adjust the count if necessary
435 start
-= begin
* bb_count
;
436 if ((start
+ *count
) > bb_count
) {
437 *count
= bb_count
- start
;
440 if (ud_check_free((uint8_t *)bp
->b_un
.b_addr
,
441 (uint8_t *)bp
->b_un
.b_addr
+ bp
->b_bcount
, start
,
446 ud_mark_used((uint8_t *)bp
->b_un
.b_addr
, start
, *count
);
453 ud_check_free(uint8_t *addr
, uint8_t *eaddr
, uint32_t start
, uint32_t count
)
457 for (i
= 0; i
< count
; i
++) {
458 if (&addr
[start
>> 3] >= eaddr
) {
461 if ((addr
[start
>> 3] & (1 << (start
& 0x7))) == 0) {
470 ud_mark_used(uint8_t *addr
, uint32_t start
, uint32_t count
)
474 for (i
= 0; i
< count
; i
++) {
475 addr
[start
>> 3] &= ~(1 << (start
& 0x7));
481 ud_mark_free(uint8_t *addr
, uint32_t start
, uint32_t count
)
485 for (i
= 0; i
< count
; i
++) {
486 addr
[start
>> 3] |= (1 << (start
& 0x7));
493 ud_alloc_space_stbl(struct vfs
*vfsp
,
494 struct ud_part
*ud_part
, uint32_t proximity
,
495 uint32_t blkcount
, uint32_t *start_blkno
,
496 uint32_t *size
, int32_t less_is_ok
)
500 int32_t error
, index
, count
, larg_index
, larg_sz
;
502 struct udf_vfs
*udf_vfsp
;
503 struct unall_space_ent
*use
;
506 ASSERT(ud_part
->udp_flags
== UDP_SPACETBLS
);
508 ud_printf("ud_alloc_space_stbl\n");
510 if (ud_part
->udp_unall_len
== 0) {
514 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
515 ASSERT((ud_part
->udp_unall_len
+ 40) <= udf_vfsp
->udf_lbsize
);
517 bp
= ud_bread(vfsp
->vfs_dev
,
518 ud_xlate_to_daddr(udf_vfsp
, ud_part
->udp_number
,
519 ud_part
->udp_unall_loc
, 1, &temp
), udf_vfsp
->udf_lbsize
);
521 use
= (struct unall_space_ent
*)bp
->b_un
.b_addr
;
522 sz
= SWAP_32(use
->use_len_ad
);
523 adesc
= SWAP_16(use
->use_icb_tag
.itag_flags
) & 0x7;
524 if (adesc
== ICB_FLAG_SHORT_AD
) {
525 struct short_ad
*sad
;
527 sad
= (struct short_ad
*)use
->use_ad
;
528 count
= sz
/ sizeof (struct short_ad
);
531 * Search the entire list for
532 * a extent which can give the entire data
535 larg_index
= larg_sz
= 0;
536 for (index
= 0; index
< count
; index
++, sad
++) {
537 temp
= SWAP_32(sad
->sad_ext_len
) >>
538 udf_vfsp
->udf_l2b_shift
;
539 if (temp
== blkcount
) {
541 * We found the right fit
542 * return the values and
549 } else if (temp
> blkcount
) {
551 * We found an entry larger than the
552 * requirement. Change the start block
553 * number and the count to reflect the
556 *start_blkno
= SWAP_32(sad
->sad_ext_loc
);
558 temp
= (temp
- blkcount
) <<
559 udf_vfsp
->udf_l2b_shift
;
560 sad
->sad_ext_len
= SWAP_32(temp
);
561 temp
= SWAP_32(sad
->sad_ext_loc
) + blkcount
;
562 sad
->sad_ext_loc
= SWAP_32(temp
);
566 * Let us keep track of the largest
567 * extent available if less_is_ok.
570 if (temp
> larg_sz
) {
577 if ((less_is_ok
) && (larg_sz
!= 0)) {
579 * If we came here we could
580 * not find a extent to cover the entire size
581 * return whatever could be allocated
582 * and compress the table
584 sad
= (struct short_ad
*)use
->use_ad
;
586 *start_blkno
= SWAP_32(sad
->sad_ext_loc
);
588 for (index
= larg_index
; index
< count
;
592 sz
-= sizeof (struct short_ad
);
593 use
->use_len_ad
= SWAP_32(sz
);
598 } else if (adesc
== ICB_FLAG_LONG_AD
) {
601 lad
= (struct long_ad
*)use
->use_ad
;
602 count
= sz
/ sizeof (struct long_ad
);
605 * Search the entire list for
606 * a extent which can give the entire data
609 larg_index
= larg_sz
= 0;
610 for (index
= 0; index
< count
; index
++, lad
++) {
611 temp
= SWAP_32(lad
->lad_ext_len
) >>
612 udf_vfsp
->udf_l2b_shift
;
613 if (temp
== blkcount
) {
615 * We found the right fit
616 * return the values and
623 } else if (temp
> blkcount
) {
625 * We found an entry larger than the
626 * requirement. Change the start block
627 * number and the count to reflect the
630 *start_blkno
= SWAP_32(lad
->lad_ext_loc
);
632 temp
= (temp
- blkcount
) <<
633 udf_vfsp
->udf_l2b_shift
;
634 lad
->lad_ext_len
= SWAP_32(temp
);
635 temp
= SWAP_32(lad
->lad_ext_loc
) + blkcount
;
636 lad
->lad_ext_loc
= SWAP_32(temp
);
640 * Let us keep track of the largest
641 * extent available if less_is_ok.
644 if (temp
> larg_sz
) {
651 if ((less_is_ok
) && (larg_sz
!= 0)) {
653 * If we came here we could
654 * not find a extent to cover the entire size
655 * return whatever could be allocated
656 * and compress the table
658 lad
= (struct long_ad
*)use
->use_ad
;
660 *start_blkno
= SWAP_32(lad
->lad_ext_loc
);
662 for (index
= larg_index
; index
< count
;
666 sz
-= sizeof (struct long_ad
);
667 use
->use_len_ad
= SWAP_32(sz
);
686 * release blkcount blocks starting from beginblk
687 * Call appropriate bmap/space table fucntions
690 ud_free_space(struct vfs
*vfsp
, uint16_t prn
,
691 uint32_t beginblk
, uint32_t blkcount
)
694 struct ud_part
*ud_part
;
695 struct udf_vfs
*udf_vfsp
;
697 ud_printf("ud_free_space\n");
703 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
704 ud_part
= udf_vfsp
->udf_parts
;
705 for (i
= 0; i
< udf_vfsp
->udf_npart
; i
++) {
706 if (prn
== ud_part
->udp_number
) {
712 if (i
== udf_vfsp
->udf_npart
) {
716 if (ud_part
->udp_flags
== UDP_BITMAPS
) {
717 error
= ud_free_space_bmap(vfsp
, ud_part
, beginblk
, blkcount
);
719 error
= ud_free_space_stbl(vfsp
, ud_part
, beginblk
, blkcount
);
723 udf_vfsp
->udf_mark_bad
= 1;
728 * If there is a freed table then
729 * release blocks to the freed table
730 * other wise release to the un allocated table.
731 * Findout the offset into the bitmap and
732 * mark the blocks as free blocks
735 ud_free_space_bmap(struct vfs
*vfsp
,
736 struct ud_part
*ud_part
,
737 uint32_t beginblk
, uint32_t blkcount
)
740 struct udf_vfs
*udf_vfsp
;
741 uint32_t block
, begin
, end
, blkno
, count
, map_end_blk
, dummy
;
743 ud_printf("ud_free_space_bmap\n");
746 ASSERT(ud_part
->udp_flags
== UDP_BITMAPS
);
748 * prom_printf("%x %x\n", udblock, udcount);
751 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
752 if ((ud_part
->udp_freed_len
== 0) &&
753 (ud_part
->udp_unall_len
== 0)) {
757 * decide unallocated/freed table to use
759 if (ud_part
->udp_freed_len
== 0) {
760 begin
= ud_part
->udp_unall_loc
;
761 map_end_blk
= ud_part
->udp_unall_len
<< 3;
763 begin
= ud_part
->udp_freed_loc
;
764 map_end_blk
= ud_part
->udp_freed_len
<< 3;
767 if (beginblk
+ blkcount
> map_end_blk
) {
771 /* adjust for the bitmap header */
772 beginblk
+= HDR_BLKS
;
774 end
= begin
+ ((beginblk
+ blkcount
) / (udf_vfsp
->udf_lbsize
<< 3));
775 begin
+= (beginblk
/ (udf_vfsp
->udf_lbsize
<< 3));
777 for (block
= begin
; block
<= end
; block
++) {
779 bp
= ud_bread(vfsp
->vfs_dev
,
780 ud_xlate_to_daddr(udf_vfsp
, ud_part
->udp_number
, block
, 1,
781 &dummy
) << udf_vfsp
->udf_l2d_shift
, udf_vfsp
->udf_lbsize
);
782 if (bp
->b_flags
& B_ERROR
) {
788 mutex_enter(&udf_vfsp
->udf_lock
);
791 * add freed blocks to the bitmap
794 blkno
= beginblk
- (block
* (udf_vfsp
->udf_lbsize
<< 3));
795 if (blkno
+ blkcount
> (udf_vfsp
->udf_lbsize
<< 3)) {
796 count
= (udf_vfsp
->udf_lbsize
<< 3) - blkno
;
802 * if (begin != end) {
803 * printf("%x %x %x %x %x %x\n",
804 * begin, end, block, blkno, count);
805 * printf("%x %x %x\n", bp->b_un.b_addr, blkno, count);
809 ud_mark_free((uint8_t *)bp
->b_un
.b_addr
, blkno
, count
);
814 if (ud_part
->udp_freed_len
== 0) {
815 ud_part
->udp_nfree
+= count
;
816 udf_vfsp
->udf_freeblks
+= count
;
818 mutex_exit(&udf_vfsp
->udf_lock
);
829 * search the entire table if there is
830 * a entry with which we can merge the
831 * current entry. Other wise create
832 * a new entry at the end of the table
835 ud_free_space_stbl(struct vfs
*vfsp
,
836 struct ud_part
*ud_part
,
837 uint32_t beginblk
, uint32_t blkcount
)
840 int32_t error
= 0, index
, count
;
841 uint32_t block
, dummy
, sz
;
843 struct udf_vfs
*udf_vfsp
;
844 struct unall_space_ent
*use
;
846 ud_printf("ud_free_space_stbl\n");
849 ASSERT(ud_part
->udp_flags
== UDP_SPACETBLS
);
851 if ((ud_part
->udp_freed_len
== 0) && (ud_part
->udp_unall_len
== 0)) {
855 if (ud_part
->udp_freed_len
!= 0) {
856 block
= ud_part
->udp_freed_loc
;
858 block
= ud_part
->udp_unall_loc
;
861 udf_vfsp
= (struct udf_vfs
*)vfsp
->vfs_data
;
862 ASSERT((ud_part
->udp_unall_len
+ 40) <= udf_vfsp
->udf_lbsize
);
864 bp
= ud_bread(vfsp
->vfs_dev
,
865 ud_xlate_to_daddr(udf_vfsp
, ud_part
->udp_number
, block
, 1, &dummy
),
866 udf_vfsp
->udf_lbsize
);
868 use
= (struct unall_space_ent
*)bp
->b_un
.b_addr
;
869 sz
= SWAP_32(use
->use_len_ad
);
870 adesc
= SWAP_16(use
->use_icb_tag
.itag_flags
) & 0x7;
871 if (adesc
== ICB_FLAG_SHORT_AD
) {
872 struct short_ad
*sad
;
874 sad
= (struct short_ad
*)use
->use_ad
;
875 count
= sz
/ sizeof (struct short_ad
);
877 * Check if the blocks being freed
878 * are continuous with any of the
881 for (index
= 0; index
< count
; index
++, sad
++) {
882 if (beginblk
== (SWAP_32(sad
->sad_ext_loc
) +
883 (SWAP_32(sad
->sad_ext_len
) /
884 udf_vfsp
->udf_lbsize
))) {
885 dummy
= SWAP_32(sad
->sad_ext_len
) +
886 blkcount
* udf_vfsp
->udf_lbsize
;
887 sad
->sad_ext_len
= SWAP_32(dummy
);
889 } else if ((beginblk
+ blkcount
) ==
890 SWAP_32(sad
->sad_ext_loc
)) {
891 sad
->sad_ext_loc
= SWAP_32(beginblk
);
897 * We need to add a new entry
900 if ((40 + sz
+ sizeof (struct short_ad
)) >
901 udf_vfsp
->udf_lbsize
) {
907 * We have enough space
908 * just add the entry at the end
910 dummy
= SWAP_32(use
->use_len_ad
);
911 sad
= (struct short_ad
*)&use
->use_ad
[dummy
];
912 sz
= blkcount
* udf_vfsp
->udf_lbsize
;
913 sad
->sad_ext_len
= SWAP_32(sz
);
914 sad
->sad_ext_loc
= SWAP_32(beginblk
);
915 dummy
+= sizeof (struct short_ad
);
916 use
->use_len_ad
= SWAP_32(dummy
);
917 } else if (adesc
== ICB_FLAG_LONG_AD
) {
920 lad
= (struct long_ad
*)use
->use_ad
;
921 count
= sz
/ sizeof (struct long_ad
);
923 * Check if the blocks being freed
924 * are continuous with any of the
927 for (index
= 0; index
< count
; index
++, lad
++) {
928 if (beginblk
== (SWAP_32(lad
->lad_ext_loc
) +
929 (SWAP_32(lad
->lad_ext_len
) /
930 udf_vfsp
->udf_lbsize
))) {
931 dummy
= SWAP_32(lad
->lad_ext_len
) +
932 blkcount
* udf_vfsp
->udf_lbsize
;
933 lad
->lad_ext_len
= SWAP_32(dummy
);
935 } else if ((beginblk
+ blkcount
) ==
936 SWAP_32(lad
->lad_ext_loc
)) {
937 lad
->lad_ext_loc
= SWAP_32(beginblk
);
943 * We need to add a new entry
946 if ((40 + sz
+ sizeof (struct long_ad
)) >
947 udf_vfsp
->udf_lbsize
) {
953 * We have enough space
954 * just add the entry at the end
956 dummy
= SWAP_32(use
->use_len_ad
);
957 lad
= (struct long_ad
*)&use
->use_ad
[dummy
];
958 sz
= blkcount
* udf_vfsp
->udf_lbsize
;
959 lad
->lad_ext_len
= SWAP_32(sz
);
960 lad
->lad_ext_loc
= SWAP_32(beginblk
);
961 lad
->lad_ext_prn
= SWAP_16(ud_part
->udp_number
);
962 dummy
+= sizeof (struct long_ad
);
963 use
->use_len_ad
= SWAP_32(dummy
);
980 ud_ialloc(struct ud_inode
*pip
,
981 struct ud_inode
**ipp
, struct vattr
*vap
, struct cred
*cr
)
984 uint32_t blkno
, size
, loc
;
985 uint32_t imode
, ichar
, lbsize
, ea_len
, dummy
;
988 struct file_entry
*fe
;
989 struct timespec32 time
;
990 struct timespec32 settime
;
992 struct ext_attr_hdr
*eah
;
993 struct dev_spec_ear
*ds
;
994 struct udf_vfs
*udf_vfsp
;
1001 ASSERT(vap
!= NULL
);
1003 ud_printf("ud_ialloc\n");
1005 if (((vap
->va_mask
& AT_ATIME
) && TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
1006 ((vap
->va_mask
& AT_MTIME
) && TIMESPEC_OVERFLOW(&vap
->va_mtime
)))
1009 udf_vfsp
= pip
->i_udf
;
1010 lbsize
= udf_vfsp
->udf_lbsize
;
1011 prn
= pip
->i_icb_prn
;
1013 if ((err
= ud_alloc_space(pip
->i_vfs
, prn
,
1014 0, 1, &blkno
, &size
, 0, 1)) != 0) {
1017 loc
= ud_xlate_to_daddr(udf_vfsp
, prn
, blkno
, 1, &dummy
);
1020 bp
= ud_bread(pip
->i_dev
, loc
<< udf_vfsp
->udf_l2d_shift
, lbsize
);
1021 if (bp
->b_flags
& B_ERROR
) {
1022 ud_free_space(pip
->i_vfs
, prn
, blkno
, size
);
1025 bzero(bp
->b_un
.b_addr
, bp
->b_bcount
);
1026 fe
= (struct file_entry
*)bp
->b_un
.b_addr
;
1029 fe
->fe_uid
= SWAP_32(uid
);
1032 * To determine the group-id of the created file:
1033 * 1) If the gid is set in the attribute list (non-Sun & pre-4.0
1034 * clients are not likely to set the gid), then use it if
1035 * the process is privileged, belongs to the target group,
1036 * or the group is the same as the parent directory.
1037 * 2) If the filesystem was not mounted with the Old-BSD-compatible
1038 * GRPID option, and the directory's set-gid bit is clear,
1039 * then use the process's gid.
1040 * 3) Otherwise, set the group-id to the gid of the parent directory.
1042 if ((vap
->va_mask
& AT_GID
) &&
1043 ((vap
->va_gid
== pip
->i_gid
) || groupmember(vap
->va_gid
, cr
) ||
1044 secpolicy_vnode_create_gid(cr
) == 0)) {
1046 * XXX - is this only the case when a 4.0 NFS client, or a
1047 * client derived from that code, makes a call over the wire?
1049 fe
->fe_gid
= SWAP_32(vap
->va_gid
);
1052 fe
->fe_gid
= (pip
->i_char
& ISGID
) ?
1053 SWAP_32(pip
->i_gid
) : SWAP_32(gid
);
1056 imode
= MAKEIMODE(vap
->va_type
, vap
->va_mode
);
1057 ichar
= imode
& (VSUID
| VSGID
| VSVTX
);
1058 imode
= UD_UPERM2DPERM(imode
);
1061 * Under solaris only the owner can
1062 * change the attributes of files so set
1063 * the change attribute bit only for user
1068 * File delete permissions on Solaris are
1069 * the permissions on the directory but not the file
1070 * when we create a file just inherit the directorys
1071 * write permission to be the file delete permissions
1072 * Atleast we will be consistent in the files we create
1074 imode
|= (pip
->i_perm
& (IWRITE
| IWRITE
>> 5 | IWRITE
>> 10)) << 3;
1076 fe
->fe_perms
= SWAP_32(imode
);
1079 * udf does not have a "." entry in dir's
1080 * so even directories have only one link
1082 fe
->fe_lcount
= SWAP_16(1);
1084 fe
->fe_info_len
= 0;
1088 time
.tv_sec
= now
.tv_sec
;
1089 time
.tv_nsec
= now
.tv_nsec
;
1090 if (vap
->va_mask
& AT_ATIME
) {
1091 TIMESPEC_TO_TIMESPEC32(&settime
, &vap
->va_atime
)
1092 ud_utime2dtime(&settime
, &fe
->fe_acc_time
);
1094 ud_utime2dtime(&time
, &fe
->fe_acc_time
);
1095 if (vap
->va_mask
& AT_MTIME
) {
1096 TIMESPEC_TO_TIMESPEC32(&settime
, &vap
->va_mtime
)
1097 ud_utime2dtime(&settime
, &fe
->fe_mod_time
);
1099 ud_utime2dtime(&time
, &fe
->fe_mod_time
);
1100 ud_utime2dtime(&time
, &fe
->fe_attr_time
);
1102 ud_update_regid(&fe
->fe_impl_id
);
1104 mutex_enter(&udf_vfsp
->udf_lock
);
1105 fe
->fe_uniq_id
= SWAP_64(udf_vfsp
->udf_maxuniq
);
1106 udf_vfsp
->udf_maxuniq
++;
1107 mutex_exit(&udf_vfsp
->udf_lock
);
1110 if ((vap
->va_type
== VBLK
) || (vap
->va_type
== VCHR
)) {
1111 eah
= (struct ext_attr_hdr
*)fe
->fe_spec
;
1112 ea_len
= (sizeof (struct ext_attr_hdr
) + 3) & ~3;
1113 eah
->eah_ial
= SWAP_32(ea_len
);
1115 ds
= (struct dev_spec_ear
*)&fe
->fe_spec
[ea_len
];
1116 ea_len
+= ud_make_dev_spec_ear(ds
,
1117 getmajor(vap
->va_rdev
), getminor(vap
->va_rdev
));
1118 ea_len
= (ea_len
+ 3) & ~3;
1119 eah
->eah_aal
= SWAP_32(ea_len
);
1120 ud_make_tag(udf_vfsp
, &eah
->eah_tag
,
1121 UD_EXT_ATTR_HDR
, blkno
, ea_len
);
1124 fe
->fe_len_ear
= SWAP_32(ea_len
);
1125 fe
->fe_len_adesc
= 0;
1127 icb
= &fe
->fe_icb_tag
;
1128 icb
->itag_prnde
= 0;
1129 icb
->itag_strategy
= SWAP_16(STRAT_TYPE4
);
1130 icb
->itag_param
= 0;
1131 icb
->itag_max_ent
= SWAP_16(1);
1132 switch (vap
->va_type
) {
1134 icb
->itag_ftype
= FTYPE_FILE
;
1137 icb
->itag_ftype
= FTYPE_DIRECTORY
;
1140 icb
->itag_ftype
= FTYPE_BLOCK_DEV
;
1143 icb
->itag_ftype
= FTYPE_CHAR_DEV
;
1146 icb
->itag_ftype
= FTYPE_SYMLINK
;
1149 icb
->itag_ftype
= FTYPE_FIFO
;
1152 icb
->itag_ftype
= FTYPE_C_ISSOCK
;
1158 icb
->itag_lb_loc
= 0;
1159 icb
->itag_lb_prn
= 0;
1160 flags
= ICB_FLAG_ONE_AD
;
1161 if ((pip
->i_char
& ISGID
) && (vap
->va_type
== VDIR
)) {
1164 if ((ichar
& ISGID
) &&
1165 secpolicy_vnode_setids_setgids(cr
,
1166 (gid_t
)SWAP_32(fe
->fe_gid
)) != 0) {
1170 if (ichar
& ISUID
) {
1171 flags
|= ICB_FLAG_SETUID
;
1173 if (ichar
& ISGID
) {
1174 flags
|= ICB_FLAG_SETGID
;
1176 if (ichar
& ISVTX
) {
1177 flags
|= ICB_FLAG_STICKY
;
1179 icb
->itag_flags
= SWAP_16(flags
);
1180 ud_make_tag(udf_vfsp
, &fe
->fe_tag
, UD_FILE_ENTRY
, blkno
,
1181 offsetof(struct file_entry
, fe_spec
) +
1182 SWAP_32(fe
->fe_len_ear
) + SWAP_32(fe
->fe_len_adesc
));
1186 mutex_enter(&udf_vfsp
->udf_lock
);
1187 if (vap
->va_type
== VDIR
) {
1188 udf_vfsp
->udf_ndirs
++;
1190 udf_vfsp
->udf_nfiles
++;
1192 mutex_exit(&udf_vfsp
->udf_lock
);
1196 struct ud_inode
*ip
;
1198 if ((ip
= ud_search_icache(pip
->i_vfs
, prn
, blkno
)) != NULL
) {
1199 cmn_err(CE_NOTE
, "duplicate %p %x\n",
1200 (void *)ip
, (uint32_t)ip
->i_icb_lbano
);
1205 if ((err
= ud_iget(pip
->i_vfs
, prn
, blkno
, ipp
, bp
, cr
)) != 0) {
1207 ud_free_space(pip
->i_vfs
, prn
, blkno
, size
);
1214 cmn_err(CE_NOTE
, "%s: out of inodes\n", pip
->i_udf
->udf_volid
);
1220 ud_ifree(struct ud_inode
*ip
, vtype_t type
)
1222 struct udf_vfs
*udf_vfsp
;
1225 ud_printf("ud_ifree\n");
1227 if (ip
->i_vfs
== NULL
) {
1231 udf_vfsp
= (struct udf_vfs
*)ip
->i_vfs
->vfs_data
;
1232 bp
= ud_bread(ip
->i_dev
, ip
->i_icb_lbano
<<
1233 udf_vfsp
->udf_l2d_shift
, udf_vfsp
->udf_lbsize
);
1234 if (bp
->b_flags
& B_ERROR
) {
1236 * Error get rid of bp
1241 * Just trash the inode
1243 bzero(bp
->b_un
.b_addr
, 0x10);
1246 ud_free_space(ip
->i_vfs
, ip
->i_icb_prn
, ip
->i_icb_block
, 1);
1247 mutex_enter(&udf_vfsp
->udf_lock
);
1249 if (udf_vfsp
->udf_ndirs
> 1) {
1250 udf_vfsp
->udf_ndirs
--;
1253 if (udf_vfsp
->udf_nfiles
> 0) {
1254 udf_vfsp
->udf_nfiles
--;
1257 mutex_exit(&udf_vfsp
->udf_lock
);
1262 * Free storage space associated with the specified inode. The portion
1263 * to be freed is specified by lp->l_start and lp->l_len (already
1264 * normalized to a "whence" of 0).
1266 * This is an experimental facility whose continued existence is not
1267 * guaranteed. Currently, we only support the special case
1268 * of l_len == 0, meaning free to end of file.
1270 * Blocks are freed in reverse order. This FILO algorithm will tend to
1271 * maintain a contiguous free list much longer than FIFO.
1272 * See also ufs_itrunc() in ufs_inode.c.
1274 * Bug: unused bytes in the last retained block are not cleared.
1275 * This may result in a "hole" in the file that does not read as zeroes.
1278 ud_freesp(struct vnode
*vp
,
1280 int32_t flag
, struct cred
*cr
)
1283 struct ud_inode
*ip
= VTOI(vp
);
1286 ASSERT(vp
->v_type
== VREG
);
1287 ASSERT(lp
->l_start
>= (offset_t
)0); /* checked by convoff */
1289 ud_printf("udf_freesp\n");
1291 if (lp
->l_len
!= 0) {
1295 rw_enter(&ip
->i_contents
, RW_READER
);
1296 if (ip
->i_size
== (uoff_t
)lp
->l_start
) {
1297 rw_exit(&ip
->i_contents
);
1302 * Check if there is any active mandatory lock on the
1303 * range that will be truncated/expanded.
1305 if (MANDLOCK(vp
, ip
->i_char
)) {
1306 offset_t save_start
;
1308 save_start
= lp
->l_start
;
1310 if (ip
->i_size
< lp
->l_start
) {
1312 * "Truncate up" case: need to make sure there
1313 * is no lock beyond current end-of-file. To
1314 * do so, we need to set l_start to the size
1315 * of the file temporarily.
1317 lp
->l_start
= ip
->i_size
;
1319 lp
->l_type
= F_WRLCK
;
1321 lp
->l_pid
= ttoproc(curthread
)->p_pid
;
1322 i
= (flag
& (FNDELAY
|FNONBLOCK
)) ? 0 : SLPFLCK
;
1323 rw_exit(&ip
->i_contents
);
1324 if ((i
= reclock(vp
, lp
, i
, 0, lp
->l_start
, NULL
)) != 0 ||
1325 lp
->l_type
!= F_UNLCK
) {
1326 return (i
? i
: EAGAIN
);
1328 rw_enter(&ip
->i_contents
, RW_READER
);
1330 lp
->l_start
= save_start
;
1333 * Make sure a write isn't in progress (allocating blocks)
1334 * by acquiring i_rwlock (we promised ufs_bmap we wouldn't
1335 * truncate while it was allocating blocks).
1336 * Grab the locks in the right order.
1338 rw_exit(&ip
->i_contents
);
1339 rw_enter(&ip
->i_rwlock
, RW_WRITER
);
1340 rw_enter(&ip
->i_contents
, RW_WRITER
);
1341 error
= ud_itrunc(ip
, lp
->l_start
, 0, cr
);
1342 rw_exit(&ip
->i_contents
);
1343 rw_exit(&ip
->i_rwlock
);
1350 * Cache is implemented by
1351 * allocating a cluster of blocks
1354 ud_alloc_from_cache(struct udf_vfs
*udf_vfsp
,
1355 struct ud_part
*part
, uint32_t *blkno
)
1358 int32_t error
, index
, free
= 0;
1360 ud_printf("ud_alloc_from_cache\n");
1364 mutex_enter(&udf_vfsp
->udf_lock
);
1365 if (part
->udp_cache_count
== 0) {
1366 mutex_exit(&udf_vfsp
->udf_lock
);
1367 /* allocate new cluster */
1368 if ((error
= ud_alloc_space(udf_vfsp
->udf_vfs
,
1369 part
->udp_number
, 0, CLSTR_SIZE
, &bno
, &sz
, 1, 0)) != 0) {
1375 mutex_enter(&udf_vfsp
->udf_lock
);
1376 if (part
->udp_cache_count
== 0) {
1377 for (index
= 0; index
< sz
; index
++, bno
++) {
1378 part
->udp_cache
[index
] = bno
;
1380 part
->udp_cache_count
= sz
;
1385 part
->udp_cache_count
--;
1386 *blkno
= part
->udp_cache
[part
->udp_cache_count
];
1387 mutex_exit(&udf_vfsp
->udf_lock
);
1389 ud_free_space(udf_vfsp
->udf_vfs
, part
->udp_number
, bno
, sz
);
1395 * Will be called from unmount
1398 ud_release_cache(struct udf_vfs
*udf_vfsp
)
1400 int32_t i
, error
= 0;
1401 struct ud_part
*part
;
1402 uint32_t start
, nblks
;
1404 ud_printf("ud_release_cache\n");
1406 mutex_enter(&udf_vfsp
->udf_lock
);
1407 part
= udf_vfsp
->udf_parts
;
1408 for (i
= 0; i
< udf_vfsp
->udf_npart
; i
++, part
++) {
1409 if (part
->udp_cache_count
) {
1410 nblks
= part
->udp_cache_count
;
1411 start
= part
->udp_cache
[0];
1412 part
->udp_cache_count
= 0;
1413 mutex_exit(&udf_vfsp
->udf_lock
);
1414 ud_free_space(udf_vfsp
->udf_vfs
,
1415 part
->udp_number
, start
, nblks
);
1416 mutex_enter(&udf_vfsp
->udf_lock
);
1419 mutex_exit(&udf_vfsp
->udf_lock
);