4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/signal.h>
42 #include <sys/vnode.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
53 #include <sys/errno.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kmem.h>
75 #include <vm/seg_vn.h>
81 #include <sys/fs_subr.h>
84 #include <sys/fs/udf_volume.h>
85 #include <sys/fs/udf_inode.h>
88 int32_t ud_break_create_new_icb(struct ud_inode
*, int32_t, uint32_t);
89 int32_t ud_bump_ext_count(struct ud_inode
*, int32_t);
90 void ud_remove_ext_at_index(struct ud_inode
*, int32_t);
91 int32_t ud_last_alloc_ext(struct ud_inode
*, uint64_t, uint32_t, int32_t);
92 int32_t ud_create_ext(struct ud_inode
*, int32_t, uint32_t,
93 int32_t, uint64_t, uint64_t *);
94 int32_t ud_zero_it(struct ud_inode
*, uint32_t, uint32_t);
96 #define ALLOC_SPACE 0x01
102 ud_bmap_has_holes(struct ud_inode
*ip
)
104 int32_t i
, error
= 0;
105 struct icb_ext
*iext
;
107 ud_printf("ud_bmap_has_holes\n");
109 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
111 /* ICB_FLAG_ONE_AD is always continuos */
112 if (ip
->i_desc_type
!= ICB_FLAG_ONE_AD
) {
113 if ((error
= ud_read_icb_till_off(ip
, ip
->i_size
)) == 0) {
114 for (i
= 0; i
< ip
->i_ext_used
; i
++) {
115 iext
= &ip
->i_ext
[i
];
116 if (iext
->ib_flags
== IB_UN_RE_AL
) {
128 ud_bmap_read(struct ud_inode
*ip
, uoff_t off
, daddr_t
*bnp
, int32_t *lenp
)
130 struct icb_ext
*iext
;
132 int32_t lbmask
, i
, l2b
, l2d
, error
= 0, count
;
133 uint32_t length
, block
, dummy
;
135 ud_printf("ud_bmap_read\n");
137 ASSERT(RW_LOCK_HELD(&ip
->i_contents
));
139 lbmask
= ip
->i_udf
->udf_lbmask
;
140 l2b
= ip
->i_udf
->udf_l2b_shift
;
141 l2d
= ip
->i_udf
->udf_l2d_shift
;
143 if ((error
= ud_read_icb_till_off(ip
, ip
->i_size
)) == 0) {
144 for (i
= 0; i
< ip
->i_ext_used
; i
++) {
145 iext
= &ip
->i_ext
[i
];
146 if ((iext
->ib_offset
<= off
) &&
147 (off
< (iext
->ib_offset
+ iext
->ib_count
))) {
148 length
= ((iext
->ib_offset
+
149 iext
->ib_count
- off
) +
151 if (iext
->ib_flags
== IB_UN_RE_AL
) {
157 block
= iext
->ib_block
+
158 ((off
- iext
->ib_offset
) >> l2b
);
159 count
= length
>> l2b
;
161 bno
= ud_xlate_to_daddr(ip
->i_udf
,
162 iext
->ib_prn
, block
, count
, &dummy
);
164 ASSERT(dummy
<= count
);
166 *lenp
= dummy
<< l2b
;
171 if (i
== ip
->i_ext_used
) {
181 * Extent allocation in the inode
182 * Initially when the inode is allocated we
183 * will allocate EXT_PER_MALLOC extents and once these
184 * are used we allocate another 10 and copy
185 * the old extents and start using the others
187 #define BASE(count) ((count) & ~lbmask)
188 #define CEIL(count) (((count) + lbmask) & ~lbmask)
190 #define PBASE(count) ((count) & PAGEMASK)
191 #define PCEIL(count) (((count) + PAGEOFFSET) & PAGEMASK)
196 ud_bmap_write(struct ud_inode
*ip
,
197 uoff_t off
, int32_t size
, int32_t alloc_only
, struct cred
*cr
)
199 int32_t error
= 0, i
, isdir
, issync
;
200 struct udf_vfs
*udf_vfsp
;
201 struct icb_ext
*iext
, *pext
;
204 uint32_t acount
, prox
;
205 int32_t blkcount
, next
;
207 uint64_t end_req
, end_ext
, mext_sz
, icb_offset
, count
;
208 int32_t dtype_changed
= 0, memory_allocated
= 0;
209 struct fbuf
*fbp
= NULL
;
212 ud_printf("ud_bmap_write\n");
214 ASSERT(RW_WRITE_HELD(&ip
->i_contents
));
216 udf_vfsp
= ip
->i_udf
;
217 lbmask
= udf_vfsp
->udf_lbmask
;
218 l2b
= udf_vfsp
->udf_l2b_shift
;
219 mext_sz
= (1 << MEXT_BITS
) - PAGESIZE
;
221 if (lblkno(udf_vfsp
, off
) < 0) {
225 issync
= ((ip
->i_flag
& ISYNC
) != 0);
227 isdir
= (ip
->i_type
== VDIR
);
228 if (isdir
|| issync
) {
229 alloc_only
= 0; /* make sure */
232 end_req
= BASE(off
) + size
;
233 if (ip
->i_desc_type
== ICB_FLAG_ONE_AD
) {
234 if (end_req
< ip
->i_max_emb
) {
238 if (ip
->i_size
!= 0) {
239 error
= fbread(ITOV(ip
), 0, ip
->i_size
, S_OTHER
, &fbp
);
247 * Change the desc_type
249 ip
->i_desc_type
= ICB_FLAG_SHORT_AD
;
253 ASSERT(ip
->i_ext
== NULL
);
254 ASSERT(ip
->i_astrat
== STRAT_TYPE4
);
257 ip
->i_cur_max_ext
= ip
->i_max_emb
/ sizeof (struct short_ad
);
258 ip
->i_cur_max_ext
--;
259 if (end_req
> mext_sz
) {
260 next
= end_req
/ mext_sz
;
265 ((next
/ EXT_PER_MALLOC
) + 1) * EXT_PER_MALLOC
;
266 iext
= ip
->i_ext
= kmem_zalloc(
267 ip
->i_ext_count
* sizeof (struct icb_ext
), KM_SLEEP
);
268 memory_allocated
= 1;
270 /* There will be atleast EXT_PER_MALLOC icb_ext's allocated */
276 /* Can we create a HOLE */
278 if ((PCEIL(ip
->i_size
) < PBASE(off
)) &&
279 ((PBASE(off
) - PCEIL(ip
->i_size
)) >= PAGESIZE
)) {
281 if (ip
->i_size
!= 0) {
284 * Allocate one block for
285 * old data.(cannot be more than one page)
289 if (error
= ud_create_ext(ip
, ip
->i_ext_used
,
290 ALLOC_SPACE
| NEW_EXT
, alloc_only
,
291 icb_offset
, &count
)) {
294 icb_offset
= PAGESIZE
;
298 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
301 count
= PBASE(off
) - PCEIL(ip
->i_size
);
302 (void) ud_create_ext(ip
, ip
->i_ext_used
, NEW_EXT
,
303 alloc_only
, icb_offset
, &count
);
304 icb_offset
= PBASE(off
);
307 * Allocate the rest of the space PBASE(off) to end_req
309 count
= end_req
- PBASE(off
);
312 * If no hole can be created then allocate
313 * space till the end of the request
320 if (error
= ud_create_ext(ip
, ip
->i_ext_used
,
321 ALLOC_SPACE
| NEW_EXT
,
322 alloc_only
, icb_offset
, &count
)) {
326 * most probable file system is full
327 * we know that the file came in as a embedded file.
328 * undo what ever we did in this block of code
331 ip
->i_desc_type
= ICB_FLAG_ONE_AD
;
333 for (i
= 0; i
< ip
->i_ext_used
; i
++) {
334 iext
= &ip
->i_ext
[i
];
335 if (iext
->ib_flags
!= IB_UN_RE_AL
) {
336 ud_free_space(ip
->i_udf
->udf_vfs
,
337 iext
->ib_prn
, iext
->ib_block
,
338 (iext
->ib_count
+ lbmask
) >>
342 if (memory_allocated
) {
345 sizeof (struct icb_ext
));
347 ip
->i_ext_count
= ip
->i_ext_used
= 0;
352 fbrelse(fbp
, S_WRITE
);
359 * Type 4 directories being created
361 if (ip
->i_ext
== NULL
) {
362 goto one_ad_no_i_ext
;
366 * Read the entire icb's to memory
368 if (ud_read_icb_till_off(ip
, ip
->i_size
) != 0) {
373 isize
= CEIL(ip
->i_size
);
375 if (end_req
> isize
) {
378 * The new file size is greater
382 if (ip
->i_ext
== NULL
) {
383 goto one_ad_no_i_ext
;
384 } else if (ip
->i_ext_used
== 0) {
388 error
= ud_last_alloc_ext(ip
, off
, size
, alloc_only
);
394 * File growing the new size will be less than
395 * iext->ib_offset + CEIL(iext->ib_count)
398 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
400 if (end_req
> (iext
->ib_offset
+ iext
->ib_count
)) {
402 iext
->ib_count
= end_req
- iext
->ib_offset
;
404 if (iext
->ib_flags
!= IB_UN_RE_AL
) {
412 /* By this point the end of last extent is >= BASE(off) + size */
417 * Figure out the icb_ext that has offset "off"
419 for (i
= 0; i
< ip
->i_ext_used
; i
++) {
420 iext
= &ip
->i_ext
[i
];
421 if ((iext
->ib_offset
<= off
) &&
422 ((iext
->ib_offset
+ iext
->ib_count
) > off
)) {
428 * iext will have offset "off"
433 iext
= &ip
->i_ext
[i
];
435 if ((iext
->ib_flags
& IB_UN_RE_AL
) == 0) {
438 * Already allocated do nothing
446 * allocate the required space
447 * while trying to create smaller holes
450 if ((PBASE(off
) > PBASE(iext
->ib_offset
)) &&
451 ((PBASE(off
) - PBASE(iext
->ib_offset
)) >=
455 * Allocate space from begining of
456 * old hole to the begining of new hole
457 * We want all holes created by us
458 * to be MMUPAGE Aligned
461 if (PBASE(iext
->ib_offset
) !=
462 BASE(iext
->ib_offset
)) {
463 if ((error
= ud_break_create_new_icb(
464 ip
, i
, BASE(iext
->ib_offset
) -
465 PBASE(iext
->ib_offset
))) != 0) {
472 * Create the new hole
475 if ((error
= ud_break_create_new_icb(ip
, i
,
476 PBASE(off
) - iext
->ib_offset
)) != 0) {
479 iext
= &ip
->i_ext
[i
];
484 end_ext
= iext
->ib_offset
+ iext
->ib_count
;
486 if ((PBASE(end_ext
) > PCEIL(end_req
)) &&
487 ((PBASE(end_ext
) - PCEIL(end_req
)) >=
490 * We can create a hole
491 * from PCEIL(end_req) - BASE(end_ext)
493 if ((error
= ud_break_create_new_icb(ip
, i
,
494 PCEIL(end_req
) - iext
->ib_offset
)) != 0) {
502 * Allocate the current extent
507 * If the previous extent
508 * is allocated then try to allocate
509 * adjascent to the previous extent
513 pext
= &ip
->i_ext
[i
- 1];
514 if (pext
->ib_flags
!= IB_UN_RE_AL
) {
515 prox
= pext
->ib_block
+
516 (CEIL(pext
->ib_count
) >> l2b
);
520 iext
= &ip
->i_ext
[i
];
521 blkcount
= CEIL(iext
->ib_count
) >> l2b
;
523 if ((error
= ud_alloc_space(ip
->i_vfs
,
524 ip
->i_icb_prn
, prox
, blkcount
,
525 &blkno
, &sz
, 1, 0)) != 0) {
533 if (alloc_only
== 0) {
534 error
= ud_zero_it(ip
, blkno
, sz
);
538 if ((prox
== blkno
) &&
539 ((pext
->ib_count
+ acount
) < mext_sz
)) {
542 * We are able to allocate adjascent to
543 * the previous extent. Increment the
544 * previous extent count if the size
545 * of the extent is not greater than
549 pext
= &ip
->i_ext
[i
- 1];
550 pext
->ib_count
+= acount
;
552 if (sz
== blkcount
) {
554 * and get rid of the current
555 * extent since we have
556 * allocated all of its size
557 * and incremented the
558 * previous extents count
560 ud_remove_ext_at_index(ip
, i
);
563 * reduce the count of the
564 * current extent by the amount
565 * allocated in the last extent
567 ASSERT(acount
< iext
->ib_count
);
568 iext
->ib_count
-= acount
;
569 iext
->ib_offset
+= acount
;
573 if ((error
= ud_break_create_new_icb(
574 ip
, i
, sz
<< l2b
)) != 0) {
578 iext
= &ip
->i_ext
[i
];
579 count
-= CEIL(iext
->ib_count
);
580 iext
->ib_prn
= ip
->i_icb_prn
;
581 iext
->ib_block
= blkno
;
582 iext
->ib_flags
&= ~IB_UN_RE_AL
;
584 * iext->ib_flags |= IB_UN_REC;
590 } while ((iext
->ib_offset
+ iext
->ib_count
) < end_req
);
598 * increase i_con/i_ext arrays and set new elements
599 * using long or short allocation descriptors
602 ud_common_ad(struct ud_inode
*ip
, struct buf
*bp
)
604 int32_t ndesc
, count
, lbmask
;
606 struct alloc_ext_desc
*aed
;
607 struct icb_ext
*iext
, *con
;
614 addr
= bp
->b_un
.b_addr
+ sizeof (struct alloc_ext_desc
);
615 aed
= (struct alloc_ext_desc
*)bp
->b_un
.b_addr
;
616 length
= SWAP_32(aed
->aed_len_aed
);
617 if (ip
->i_desc_type
== ICB_FLAG_LONG_AD
) {
620 ndesc
= length
/ sizeof (*lad
);
621 } else if (ip
->i_desc_type
== ICB_FLAG_SHORT_AD
) {
624 ndesc
= length
/ sizeof (*sad
);
629 * realloc i_ext array
631 count
= (((ip
->i_ext_used
+ ndesc
) / EXT_PER_MALLOC
) + 1) *
633 addr
= kmem_zalloc(count
* sizeof (struct icb_ext
), KM_SLEEP
);
634 bcopy(ip
->i_ext
, addr
, ip
->i_ext_used
* sizeof (struct icb_ext
));
635 kmem_free(ip
->i_ext
, ip
->i_ext_count
* sizeof (struct icb_ext
));
637 ip
->i_ext_count
= count
;
642 lbmask
= ip
->i_udf
->udf_lbmask
;
643 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
644 offset
= iext
->ib_offset
+ iext
->ib_count
;
648 length
= SWAP_32(lad
->lad_ext_len
);
650 length
= SWAP_32(sad
->sad_ext_len
);
652 if ((length
& 0x3FFFFFFF) == 0)
654 else if (((length
>> 30) & IB_MASK
) == IB_CON
) {
655 if (ip
->i_con_used
== ip
->i_con_count
) {
660 old_count
= ip
->i_con_count
*
661 sizeof (struct icb_ext
);
662 ip
->i_con_count
+= EXT_PER_MALLOC
;
663 ip
->i_con
= kmem_zalloc(ip
->i_con_count
*
664 sizeof (struct icb_ext
), KM_SLEEP
);
667 bcopy(old
, ip
->i_con
, old_count
);
668 kmem_free(old
, old_count
);
671 con
= &ip
->i_con
[ip
->i_con_used
];
673 con
->ib_prn
= SWAP_16(lad
->lad_ext_prn
);
674 con
->ib_block
= SWAP_32(lad
->lad_ext_loc
);
676 con
->ib_prn
= ip
->i_icb_prn
;
677 con
->ib_block
= SWAP_32(sad
->sad_ext_loc
);
679 con
->ib_count
= length
& 0x3FFFFFFF;
680 con
->ib_flags
= (length
>> 30) & IB_MASK
;
686 iext
->ib_prn
= SWAP_16(lad
->lad_ext_prn
);
687 iext
->ib_block
= SWAP_32(lad
->lad_ext_loc
);
691 iext
->ib_block
= SWAP_32(sad
->sad_ext_loc
);
694 iext
->ib_count
= length
& 0x3FFFFFFF;
695 iext
->ib_offset
= offset
;
696 iext
->ib_marker1
= (uint32_t)0xAAAAAAAA;
697 iext
->ib_marker2
= (uint32_t)0xBBBBBBBB;
698 offset
+= (iext
->ib_count
+ lbmask
) & (~lbmask
);
699 iext
->ib_flags
= (length
>> 30) & IB_MASK
;
707 ud_read_next_cont(struct ud_inode
*ip
)
709 uint32_t dummy
, error
= 0;
710 struct alloc_ext_desc
*aed
;
711 struct icb_ext
*cont
;
715 cont
= &ip
->i_con
[ip
->i_con_read
];
716 ASSERT(cont
->ib_count
> 0);
718 bno
= ud_xlate_to_daddr(ip
->i_udf
, cont
->ib_prn
, cont
->ib_block
,
720 bp
= ud_bread(ip
->i_dev
, bno
<< ip
->i_udf
->udf_l2d_shift
,
722 if (bp
->b_flags
& B_ERROR
)
725 aed
= (struct alloc_ext_desc
*)bp
->b_un
.b_addr
;
726 if (ud_verify_tag_and_desc(&aed
->aed_tag
, UD_ALLOC_EXT_DESC
,
727 cont
->ib_block
, 1, cont
->ib_count
))
732 ud_common_ad(ip
, bp
);
740 ud_read_icb_till_off(struct ud_inode
*ip
, uoff_t offset
)
743 struct icb_ext
*iext
;
745 ud_printf("ud_read_icb_till_off\n");
747 if (ip
->i_desc_type
== ICB_FLAG_ONE_AD
)
749 else if ((ip
->i_astrat
!= STRAT_TYPE4
) &&
750 (ip
->i_astrat
!= STRAT_TYPE4096
))
752 else if (ip
->i_ext_used
== 0)
753 return ((ip
->i_size
== 0) ? 0 : EINVAL
);
756 * supported allocation strategies are
757 * STRAT_TYPE4 and STRAT_TYPE4096
760 mutex_enter(&ip
->i_con_lock
);
761 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
762 while ((iext
->ib_offset
+ iext
->ib_count
) < offset
) {
763 if (ip
->i_con_used
== ip
->i_con_read
) {
767 if (error
= ud_read_next_cont(ip
))
770 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
772 mutex_exit(&ip
->i_con_lock
);
779 * Assumption is the off is beyond ip->i_size
780 * And we will have atleast one ext used
783 ud_last_alloc_ext(struct ud_inode
*ip
, uint64_t off
,
784 uint32_t size
, int32_t alloc_only
)
786 struct icb_ext
*iext
;
787 struct udf_vfs
*udf_vfsp
;
788 int32_t lbsize
, lbmask
;
789 uint64_t end_req
, end_count
, icb_offset
;
794 udf_vfsp
= ip
->i_udf
;
795 lbsize
= udf_vfsp
->udf_lbsize
;
796 lbmask
= udf_vfsp
->udf_lbmask
;
798 end_req
= BASE(off
) + size
;
802 * If we are here it means the file
803 * is growing beyond the end of the
804 * current block. So round up the
808 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
809 iext
->ib_count
= CEIL(iext
->ib_count
);
812 * Figure out if we can create
817 end_count
= iext
->ib_offset
+ iext
->ib_count
;
819 if ((PCEIL(end_count
) < PBASE(off
)) &&
820 ((PBASE(off
) - PCEIL(end_count
)) >= PAGESIZE
)) {
822 count
= PCEIL(end_count
) - CEIL(end_count
);
823 if (count
>= lbsize
) {
826 * There is space between the begining
827 * of the hole to be created and
828 * end of the last offset
829 * Allocate blocks for it
832 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
833 icb_offset
= iext
->ib_offset
+ CEIL(iext
->ib_count
);
835 if (iext
->ib_flags
== IB_UN_RE_AL
) {
838 * Previous extent is a unallocated
839 * extent. Create a new allocated
843 error
= ud_create_ext(ip
, ip
->i_ext_used
,
844 ALLOC_SPACE
| NEW_EXT
,
845 alloc_only
, icb_offset
, &count
);
850 * Last extent is allocated
851 * try to allocate adjascent to the
855 error
= ud_create_ext(ip
, ip
->i_ext_used
- 1,
856 ALLOC_SPACE
, alloc_only
,
865 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
866 end_count
= iext
->ib_offset
+ iext
->ib_count
;
867 count
= PBASE(off
) - PCEIL(end_count
);
868 icb_offset
= PCEIL(end_count
);
870 if (iext
->ib_flags
== IB_UN_RE_AL
) {
873 * The last extent is unallocated
874 * Just bump the extent count
876 (void) ud_create_ext(ip
, ip
->i_ext_used
- 1,
877 0, alloc_only
, icb_offset
, &count
);
881 * Last extent is allocated
882 * round up the size of the extent to
883 * lbsize and allocate a new unallocated extent
885 iext
->ib_count
= CEIL(iext
->ib_count
);
886 (void) ud_create_ext(ip
, ip
->i_ext_used
,
887 NEW_EXT
, alloc_only
, icb_offset
, &count
);
890 icb_offset
= PBASE(off
);
894 * We cannot create any hole inbetween
895 * the last extent and the off so
896 * round up the count in the last extent
899 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
900 iext
->ib_count
= CEIL(iext
->ib_count
);
905 iext
= &ip
->i_ext
[ip
->i_ext_used
- 1];
906 count
= end_req
- (iext
->ib_offset
+ iext
->ib_count
);
907 icb_offset
= iext
->ib_offset
+ CEIL(iext
->ib_count
);
909 if (iext
->ib_flags
== IB_UN_RE_AL
) {
912 * Last extent was a unallocated extent
913 * create a new extent
916 error
= ud_create_ext(ip
, ip
->i_ext_used
,
917 ALLOC_SPACE
| NEW_EXT
, alloc_only
, icb_offset
, &count
);
921 * Last extent was an allocated extent
922 * try to allocate adjascent to the old blocks
925 error
= ud_create_ext(ip
, ip
->i_ext_used
- 1,
926 ALLOC_SPACE
, alloc_only
, icb_offset
, &count
);
933 * Break up the icb_ext at index
935 * one at index ib_count "count" and
936 * the other at index+1 with ib_count = old_ib_count - count
939 ud_break_create_new_icb(struct ud_inode
*ip
,
940 int32_t index
, uint32_t count
)
943 struct icb_ext
*iext
, *next
;
946 ud_printf("ud_break_create_new_icb\n");
947 iext
= &ip
->i_ext
[index
];
949 ASSERT(count
< iext
->ib_count
);
951 if ((error
= ud_bump_ext_count(ip
, KM_SLEEP
)) != 0) {
955 for (i
= ip
->i_ext_used
; i
> index
; i
--) {
956 ip
->i_ext
[i
] = ip
->i_ext
[i
- 1];
959 next
= &ip
->i_ext
[index
+ 1];
960 iext
= &ip
->i_ext
[index
];
962 iext
->ib_count
= count
;
963 next
->ib_count
-= count
;
964 next
->ib_offset
= iext
->ib_offset
+ iext
->ib_count
;
965 if (iext
->ib_flags
!= IB_UN_RE_AL
) {
966 next
->ib_block
= iext
->ib_block
+
967 iext
->ib_count
>> ip
->i_udf
->udf_l2b_shift
;
974 ud_remove_ext_at_index(struct ud_inode
*ip
, int32_t index
)
978 ASSERT(index
<= ip
->i_ext_used
);
980 for (i
= index
; i
< ip
->i_ext_used
; i
++) {
981 if ((i
+ 1) < ip
->i_ext_count
) {
982 ip
->i_ext
[i
] = ip
->i_ext
[i
+ 1];
984 bzero(&ip
->i_ext
[i
], sizeof (struct icb_ext
));
991 ud_bump_ext_count(struct ud_inode
*ip
, int32_t sleep_flag
)
994 struct icb_ext
*iext
;
995 uint32_t old_count
, elen
;
998 ASSERT(sleep_flag
== KM_SLEEP
);
1000 ud_printf("ud_bump_ext_count\n");
1002 if (ip
->i_ext_used
>= ip
->i_ext_count
) {
1004 old_count
= sizeof (struct icb_ext
) * ip
->i_ext_count
;
1005 ip
->i_ext_count
+= EXT_PER_MALLOC
;
1006 iext
= kmem_zalloc(sizeof (struct icb_ext
) *
1007 ip
->i_ext_count
, sleep_flag
);
1008 bcopy(ip
->i_ext
, iext
, old_count
);
1009 kmem_free(ip
->i_ext
, old_count
);
1013 if (ip
->i_ext_used
>= ip
->i_cur_max_ext
) {
1015 struct icb_ext
*icon
;
1017 int32_t lbmask
, l2b
;
1019 lbmask
= ip
->i_udf
->udf_lbmask
;
1020 l2b
= ip
->i_udf
->udf_l2b_shift
;
1022 if ((error
= ud_read_icb_till_off(ip
, ip
->i_size
)) != 0) {
1027 * If there are any old cont extents
1028 * allocate the new one ajscant to the old one
1030 if (ip
->i_con_used
!= 0) {
1031 icon
= &ip
->i_con
[ip
->i_con_used
- 1];
1032 prox
= icon
->ib_block
+ (CEIL(icon
->ib_count
) >> l2b
);
1040 if ((error
= ud_alloc_space(ip
->i_vfs
, ip
->i_icb_prn
,
1041 prox
, 1, &blkno
, &sz
, 0, 0)) != 0) {
1050 if (ip
->i_con_used
== ip
->i_con_count
) {
1051 struct icb_ext
*old
;
1055 old_count
= ip
->i_con_count
*
1056 sizeof (struct icb_ext
);
1057 ip
->i_con_count
+= EXT_PER_MALLOC
;
1058 ip
->i_con
= kmem_zalloc(ip
->i_con_count
*
1059 sizeof (struct icb_ext
), KM_SLEEP
);
1061 bcopy(old
, ip
->i_con
, old_count
);
1062 kmem_free(old
, old_count
);
1065 icon
= &ip
->i_con
[ip
->i_con_used
++];
1066 icon
->ib_flags
= IB_CON
;
1067 icon
->ib_prn
= ip
->i_icb_prn
;
1068 icon
->ib_block
= blkno
;
1069 icon
->ib_count
= sz
;
1070 icon
->ib_offset
= 0;
1071 icon
->ib_marker1
= (uint32_t)0xAAAAAAAA;
1072 icon
->ib_marker2
= (uint32_t)0xBBBBBBBB;
1075 * Bump the i_cur_max_ext according to
1076 * the space allocated
1078 if (ip
->i_desc_type
== ICB_FLAG_SHORT_AD
) {
1079 elen
= sizeof (struct short_ad
);
1080 } else if (ip
->i_desc_type
== ICB_FLAG_LONG_AD
) {
1081 elen
= sizeof (struct long_ad
);
1085 sz
= sz
- (sizeof (struct alloc_ext_desc
) + elen
);
1086 ip
->i_cur_max_ext
+= sz
/ elen
;
1092 ud_create_ext(struct ud_inode
*ip
, int32_t index
, uint32_t flags
,
1093 int32_t alloc_only
, uint64_t offset
, uint64_t *count
)
1095 struct icb_ext
*iext
, *pext
;
1096 struct udf_vfs
*udf_vfsp
;
1097 int32_t error
= 0, blkcount
, acount
;
1098 uint32_t blkno
, sz
, prox
, mext_sz
;
1099 int32_t lbmask
, l2b
;
1106 udf_vfsp
= ip
->i_udf
;
1107 lbmask
= udf_vfsp
->udf_lbmask
;
1108 l2b
= udf_vfsp
->udf_l2b_shift
;
1109 mext_sz
= (1 << MEXT_BITS
) - PAGESIZE
;
1111 if ((error
= ud_bump_ext_count(ip
, KM_SLEEP
)) != 0) {
1115 iext
= &ip
->i_ext
[index
];
1116 if (flags
& ALLOC_SPACE
) {
1117 if ((flags
& NEW_EXT
) ||
1118 (ip
->i_ext_count
== 0)) {
1121 iext
->ib_prn
= ip
->i_icb_prn
;
1122 if (*count
> mext_sz
) {
1123 blkcount
= mext_sz
>> l2b
;
1125 blkcount
= CEIL(*count
) >> l2b
;
1127 if ((error
= ud_alloc_space(ip
->i_vfs
,
1128 ip
->i_icb_prn
, 0, blkcount
,
1129 &blkno
, &sz
, 1, 0)) != 0) {
1136 iext
->ib_block
= blkno
;
1138 if ((sz
<< l2b
) > *count
) {
1139 iext
->ib_count
= *count
;
1142 iext
->ib_count
= sz
<< l2b
;
1143 *count
-= iext
->ib_count
;
1145 iext
->ib_offset
= offset
;
1146 if (ip
->i_ext_used
<= index
)
1149 if ((iext
->ib_count
+ *count
) > mext_sz
) {
1150 blkcount
= (mext_sz
- iext
->ib_count
) >> l2b
;
1152 blkcount
= CEIL(*count
) >> l2b
;
1154 if (blkcount
== 0) {
1159 prox
= iext
->ib_block
+ (CEIL(iext
->ib_count
) >> l2b
);
1160 if ((error
= ud_alloc_space(ip
->i_vfs
,
1161 ip
->i_icb_prn
, prox
, blkcount
,
1162 &blkno
, &sz
, 1, 0)) != 0) {
1169 if (acount
> *count
) {
1176 if (prox
== blkno
) {
1177 iext
->ib_count
+= acount
;
1179 if ((error
= ud_bump_ext_count(ip
, KM_SLEEP
))
1183 pext
= &ip
->i_ext
[index
];
1184 iext
= &ip
->i_ext
[index
+ 1];
1186 iext
->ib_prn
= ip
->i_icb_prn
;
1187 iext
->ib_block
= blkno
;
1189 pext
->ib_offset
+ pext
->ib_count
;
1190 iext
->ib_count
= acount
;
1192 * Increment the index, since we have used
1193 * the extent at [index+1] above.
1196 if (ip
->i_ext_used
<= index
)
1200 if (alloc_only
== 0) {
1201 error
= ud_zero_it(ip
, blkno
, sz
);
1204 offset
= iext
->ib_offset
+ CEIL(iext
->ib_count
);
1210 if (flags
& NEW_EXT
) {
1211 iext
->ib_flags
= IB_UN_RE_AL
;
1214 if (*count
> mext_sz
) {
1215 iext
->ib_count
= mext_sz
;
1216 *count
-= iext
->ib_count
;
1218 iext
->ib_count
= *count
;
1221 iext
->ib_offset
= offset
;
1222 if (ip
->i_ext_used
<= index
)
1225 ASSERT(iext
->ib_flags
== IB_UN_RE_AL
);
1226 if ((iext
->ib_count
+ *count
) > mext_sz
) {
1227 acount
= mext_sz
- iext
->ib_count
;
1228 iext
->ib_count
+= acount
;
1231 iext
->ib_count
+= *count
;
1236 offset
= iext
->ib_offset
+ CEIL(iext
->ib_count
);
1242 iext
->ib_marker1
= (uint32_t)0xAAAAAAAA;
1243 iext
->ib_marker2
= (uint32_t)0xBBBBBBBB;
1251 ud_zero_it(struct ud_inode
*ip
, uint32_t start_block
, uint32_t block_count
)
1253 struct udf_vfs
*udf_vfsp
;
1254 uint32_t bno
, dummy
;
1259 * Donot use bio routines
1260 * since the buffer can sit
1261 * long enough in cache for the space
1262 * to be allocated/freed and
1265 udf_vfsp
= ip
->i_udf
;
1266 bno
= ud_xlate_to_daddr(udf_vfsp
,
1267 ip
->i_icb_prn
, start_block
, block_count
, &dummy
);
1269 dummy
= block_count
<< udf_vfsp
->udf_l2b_shift
;
1270 bp
= kmem_zalloc(biosize(), KM_SLEEP
);
1271 sema_init(&bp
->b_sem
, 0, NULL
, SEMA_DEFAULT
, NULL
);
1272 sema_init(&bp
->b_io
, 0, NULL
, SEMA_DEFAULT
, NULL
);
1274 bp
->b_flags
= B_WRITE
| B_BUSY
;
1275 bp
->b_edev
= ip
->i_dev
;
1276 bp
->b_dev
= cmpdev(ip
->i_dev
);
1277 bp
->b_blkno
= bno
<< udf_vfsp
->udf_l2d_shift
;
1278 bp
->b_bcount
= dummy
;
1279 bp
->b_un
.b_addr
= kmem_zalloc(bp
->b_bcount
, KM_SLEEP
);
1280 bp
->b_file
= ip
->i_vnode
;
1283 (void) bdev_strategy(bp
);
1284 if (error
= biowait(bp
)) {
1285 cmn_err(CE_WARN
, "error in write\n");
1288 kmem_free(bp
->b_un
.b_addr
, dummy
);
1289 sema_destroy(&bp
->b_io
);
1290 sema_destroy(&bp
->b_sem
);
1291 kmem_free((caddr_t
)bp
, biosize());