dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / kernel / fs / udfs / udf_bmap.c
blobbe9a53307e71a6a973b0cbbd492912469775da7b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/signal.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/buf.h>
40 #include <sys/vfs.h>
41 #include <sys/stat.h>
42 #include <sys/vnode.h>
43 #include <sys/mode.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
49 #include <sys/kmem.h>
50 #include <sys/uio.h>
51 #include <sys/dnlc.h>
52 #include <sys/conf.h>
53 #include <sys/errno.h>
54 #include <sys/mman.h>
55 #include <sys/fbuf.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
68 #include <vm/hat.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kmem.h>
75 #include <vm/seg_vn.h>
76 #include <vm/rm.h>
77 #include <vm/page.h>
78 #include <sys/swap.h>
81 #include <sys/fs_subr.h>
84 #include <sys/fs/udf_volume.h>
85 #include <sys/fs/udf_inode.h>
88 int32_t ud_break_create_new_icb(struct ud_inode *, int32_t, uint32_t);
89 int32_t ud_bump_ext_count(struct ud_inode *, int32_t);
90 void ud_remove_ext_at_index(struct ud_inode *, int32_t);
91 int32_t ud_last_alloc_ext(struct ud_inode *, uint64_t, uint32_t, int32_t);
92 int32_t ud_create_ext(struct ud_inode *, int32_t, uint32_t,
93 int32_t, uint64_t, uint64_t *);
94 int32_t ud_zero_it(struct ud_inode *, uint32_t, uint32_t);
96 #define ALLOC_SPACE 0x01
97 #define NEW_EXT 0x02
99 #define MEXT_BITS 30
101 int32_t
102 ud_bmap_has_holes(struct ud_inode *ip)
104 int32_t i, error = 0;
105 struct icb_ext *iext;
107 ud_printf("ud_bmap_has_holes\n");
109 ASSERT(RW_LOCK_HELD(&ip->i_contents));
111 /* ICB_FLAG_ONE_AD is always continuos */
112 if (ip->i_desc_type != ICB_FLAG_ONE_AD) {
113 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
114 for (i = 0; i < ip->i_ext_used; i++) {
115 iext = &ip->i_ext[i];
116 if (iext->ib_flags == IB_UN_RE_AL) {
117 error = 1;
118 break;
124 return (error);
127 int32_t
128 ud_bmap_read(struct ud_inode *ip, uoff_t off, daddr_t *bnp, int32_t *lenp)
130 struct icb_ext *iext;
131 daddr_t bno;
132 int32_t lbmask, i, l2b, l2d, error = 0, count;
133 uint32_t length, block, dummy;
135 ud_printf("ud_bmap_read\n");
137 ASSERT(RW_LOCK_HELD(&ip->i_contents));
139 lbmask = ip->i_udf->udf_lbmask;
140 l2b = ip->i_udf->udf_l2b_shift;
141 l2d = ip->i_udf->udf_l2d_shift;
143 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
144 for (i = 0; i < ip->i_ext_used; i++) {
145 iext = &ip->i_ext[i];
146 if ((iext->ib_offset <= off) &&
147 (off < (iext->ib_offset + iext->ib_count))) {
148 length = ((iext->ib_offset +
149 iext->ib_count - off) +
150 lbmask) & ~lbmask;
151 if (iext->ib_flags == IB_UN_RE_AL) {
152 *bnp = UDF_HOLE;
153 *lenp = length;
154 break;
157 block = iext->ib_block +
158 ((off - iext->ib_offset) >> l2b);
159 count = length >> l2b;
161 bno = ud_xlate_to_daddr(ip->i_udf,
162 iext->ib_prn, block, count, &dummy);
163 ASSERT(dummy != 0);
164 ASSERT(dummy <= count);
165 *bnp = bno << l2d;
166 *lenp = dummy << l2b;
168 break;
171 if (i == ip->i_ext_used) {
172 error = EINVAL;
176 return (error);
181 * Extent allocation in the inode
182 * Initially when the inode is allocated we
183 * will allocate EXT_PER_MALLOC extents and once these
184 * are used we allocate another 10 and copy
185 * the old extents and start using the others
187 #define BASE(count) ((count) & ~lbmask)
188 #define CEIL(count) (((count) + lbmask) & ~lbmask)
190 #define PBASE(count) ((count) & PAGEMASK)
191 #define PCEIL(count) (((count) + PAGEOFFSET) & PAGEMASK)
194 /* ARGSUSED3 */
195 int32_t
196 ud_bmap_write(struct ud_inode *ip,
197 uoff_t off, int32_t size, int32_t alloc_only, struct cred *cr)
199 int32_t error = 0, i, isdir, issync;
200 struct udf_vfs *udf_vfsp;
201 struct icb_ext *iext, *pext;
202 uint32_t blkno, sz;
203 uoff_t isize;
204 uint32_t acount, prox;
205 int32_t blkcount, next;
206 int32_t lbmask, l2b;
207 uint64_t end_req, end_ext, mext_sz, icb_offset, count;
208 int32_t dtype_changed = 0, memory_allocated = 0;
209 struct fbuf *fbp = NULL;
212 ud_printf("ud_bmap_write\n");
214 ASSERT(RW_WRITE_HELD(&ip->i_contents));
216 udf_vfsp = ip->i_udf;
217 lbmask = udf_vfsp->udf_lbmask;
218 l2b = udf_vfsp->udf_l2b_shift;
219 mext_sz = (1 << MEXT_BITS) - PAGESIZE;
221 if (lblkno(udf_vfsp, off) < 0) {
222 return (EFBIG);
225 issync = ((ip->i_flag & ISYNC) != 0);
227 isdir = (ip->i_type == VDIR);
228 if (isdir || issync) {
229 alloc_only = 0; /* make sure */
232 end_req = BASE(off) + size;
233 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
234 if (end_req < ip->i_max_emb) {
235 goto out;
238 if (ip->i_size != 0) {
239 error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp);
240 if (error != 0) {
241 goto out;
243 } else {
244 fbp = NULL;
247 * Change the desc_type
249 ip->i_desc_type = ICB_FLAG_SHORT_AD;
250 dtype_changed = 1;
252 one_ad_no_i_ext:
253 ASSERT(ip->i_ext == NULL);
254 ASSERT(ip->i_astrat == STRAT_TYPE4);
256 ip->i_ext_used = 0;
257 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
258 ip->i_cur_max_ext --;
259 if (end_req > mext_sz) {
260 next = end_req / mext_sz;
261 } else {
262 next = 1;
264 ip->i_ext_count =
265 ((next / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
266 iext = ip->i_ext = kmem_zalloc(
267 ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP);
268 memory_allocated = 1;
270 /* There will be atleast EXT_PER_MALLOC icb_ext's allocated */
272 one_ad_i_ext:
273 icb_offset = 0;
274 count = end_req;
276 /* Can we create a HOLE */
278 if ((PCEIL(ip->i_size) < PBASE(off)) &&
279 ((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) {
281 if (ip->i_size != 0) {
284 * Allocate one block for
285 * old data.(cannot be more than one page)
288 count = PAGESIZE;
289 if (error = ud_create_ext(ip, ip->i_ext_used,
290 ALLOC_SPACE | NEW_EXT, alloc_only,
291 icb_offset, &count)) {
292 goto embedded_error;
294 icb_offset = PAGESIZE;
298 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
301 count = PBASE(off) - PCEIL(ip->i_size);
302 (void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT,
303 alloc_only, icb_offset, &count);
304 icb_offset = PBASE(off);
307 * Allocate the rest of the space PBASE(off) to end_req
309 count = end_req - PBASE(off);
310 } else {
312 * If no hole can be created then allocate
313 * space till the end of the request
315 count = end_req;
320 if (error = ud_create_ext(ip, ip->i_ext_used,
321 ALLOC_SPACE | NEW_EXT,
322 alloc_only, icb_offset, &count)) {
323 embedded_error:
325 * Something error
326 * most probable file system is full
327 * we know that the file came in as a embedded file.
328 * undo what ever we did in this block of code
330 if (dtype_changed) {
331 ip->i_desc_type = ICB_FLAG_ONE_AD;
333 for (i = 0; i < ip->i_ext_used; i++) {
334 iext = &ip->i_ext[i];
335 if (iext->ib_flags != IB_UN_RE_AL) {
336 ud_free_space(ip->i_udf->udf_vfs,
337 iext->ib_prn, iext->ib_block,
338 (iext->ib_count + lbmask) >>
339 l2b);
342 if (memory_allocated) {
343 kmem_free(ip->i_ext,
344 ip->i_ext_count *
345 sizeof (struct icb_ext));
346 ip->i_ext = NULL;
347 ip->i_ext_count = ip->i_ext_used = 0;
351 if (fbp != NULL) {
352 fbrelse(fbp, S_WRITE);
355 return (error);
356 } else {
359 * Type 4 directories being created
361 if (ip->i_ext == NULL) {
362 goto one_ad_no_i_ext;
366 * Read the entire icb's to memory
368 if (ud_read_icb_till_off(ip, ip->i_size) != 0) {
369 error = EINVAL;
370 goto out;
373 isize = CEIL(ip->i_size);
375 if (end_req > isize) {
378 * The new file size is greater
379 * than the old size
382 if (ip->i_ext == NULL) {
383 goto one_ad_no_i_ext;
384 } else if (ip->i_ext_used == 0) {
385 goto one_ad_i_ext;
388 error = ud_last_alloc_ext(ip, off, size, alloc_only);
390 return (error);
391 } else {
394 * File growing the new size will be less than
395 * iext->ib_offset + CEIL(iext->ib_count)
398 iext = &ip->i_ext[ip->i_ext_used - 1];
400 if (end_req > (iext->ib_offset + iext->ib_count)) {
402 iext->ib_count = end_req - iext->ib_offset;
404 if (iext->ib_flags != IB_UN_RE_AL) {
405 error = 0;
406 goto out;
412 /* By this point the end of last extent is >= BASE(off) + size */
414 ASSERT(ip->i_ext);
417 * Figure out the icb_ext that has offset "off"
419 for (i = 0; i < ip->i_ext_used; i++) {
420 iext = &ip->i_ext[i];
421 if ((iext->ib_offset <= off) &&
422 ((iext->ib_offset + iext->ib_count) > off)) {
423 break;
428 * iext will have offset "off"
432 do {
433 iext = &ip->i_ext[i];
435 if ((iext->ib_flags & IB_UN_RE_AL) == 0) {
438 * Already allocated do nothing
441 i++;
442 } else {
445 * We are in a hole.
446 * allocate the required space
447 * while trying to create smaller holes
450 if ((PBASE(off) > PBASE(iext->ib_offset)) &&
451 ((PBASE(off) - PBASE(iext->ib_offset)) >=
452 PAGESIZE)) {
455 * Allocate space from begining of
456 * old hole to the begining of new hole
457 * We want all holes created by us
458 * to be MMUPAGE Aligned
461 if (PBASE(iext->ib_offset) !=
462 BASE(iext->ib_offset)) {
463 if ((error = ud_break_create_new_icb(
464 ip, i, BASE(iext->ib_offset) -
465 PBASE(iext->ib_offset))) != 0) {
466 return (error);
468 goto alloc_cur_ext;
472 * Create the new hole
475 if ((error = ud_break_create_new_icb(ip, i,
476 PBASE(off) - iext->ib_offset)) != 0) {
477 return (error);
479 iext = &ip->i_ext[i];
480 i++;
481 continue;
484 end_ext = iext->ib_offset + iext->ib_count;
486 if ((PBASE(end_ext) > PCEIL(end_req)) &&
487 ((PBASE(end_ext) - PCEIL(end_req)) >=
488 PAGESIZE)) {
490 * We can create a hole
491 * from PCEIL(end_req) - BASE(end_ext)
493 if ((error = ud_break_create_new_icb(ip, i,
494 PCEIL(end_req) - iext->ib_offset)) != 0) {
495 return (error);
500 alloc_cur_ext:
502 * Allocate the current extent
507 * If the previous extent
508 * is allocated then try to allocate
509 * adjascent to the previous extent
511 prox = 0;
512 if (i != 0) {
513 pext = &ip->i_ext[i - 1];
514 if (pext->ib_flags != IB_UN_RE_AL) {
515 prox = pext->ib_block +
516 (CEIL(pext->ib_count) >> l2b);
520 iext = &ip->i_ext[i];
521 blkcount = CEIL(iext->ib_count) >> l2b;
523 if ((error = ud_alloc_space(ip->i_vfs,
524 ip->i_icb_prn, prox, blkcount,
525 &blkno, &sz, 1, 0)) != 0) {
526 return (error);
528 ip->i_lbr += sz;
529 if (sz == 0) {
530 return (ENOSPC);
533 if (alloc_only == 0) {
534 error = ud_zero_it(ip, blkno, sz);
537 acount = sz << l2b;
538 if ((prox == blkno) &&
539 ((pext->ib_count + acount) < mext_sz)) {
542 * We are able to allocate adjascent to
543 * the previous extent. Increment the
544 * previous extent count if the size
545 * of the extent is not greater than
546 * max extent size
549 pext = &ip->i_ext[i - 1];
550 pext->ib_count += acount;
552 if (sz == blkcount) {
554 * and get rid of the current
555 * extent since we have
556 * allocated all of its size
557 * and incremented the
558 * previous extents count
560 ud_remove_ext_at_index(ip, i);
561 } else {
563 * reduce the count of the
564 * current extent by the amount
565 * allocated in the last extent
567 ASSERT(acount < iext->ib_count);
568 iext->ib_count -= acount;
569 iext->ib_offset += acount;
571 } else {
572 if (sz < blkcount) {
573 if ((error = ud_break_create_new_icb(
574 ip, i, sz << l2b)) != 0) {
575 return (error);
578 iext = &ip->i_ext[i];
579 count -= CEIL(iext->ib_count);
580 iext->ib_prn = ip->i_icb_prn;
581 iext->ib_block = blkno;
582 iext->ib_flags &= ~IB_UN_RE_AL;
584 * iext->ib_flags |= IB_UN_REC;
586 i++;
587 continue;
590 } while ((iext->ib_offset + iext->ib_count) < end_req);
592 out:
593 return (error);
598 * increase i_con/i_ext arrays and set new elements
599 * using long or short allocation descriptors
601 static void
602 ud_common_ad(struct ud_inode *ip, struct buf *bp)
604 int32_t ndesc, count, lbmask;
605 uint32_t length;
606 struct alloc_ext_desc *aed;
607 struct icb_ext *iext, *con;
608 uoff_t offset;
609 long_ad_t *lad;
610 short_ad_t *sad;
611 int islong;
612 void *addr;
614 addr = bp->b_un.b_addr + sizeof (struct alloc_ext_desc);
615 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
616 length = SWAP_32(aed->aed_len_aed);
617 if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
618 islong = 1;
619 lad = addr;
620 ndesc = length / sizeof (*lad);
621 } else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
622 islong = 0;
623 sad = addr;
624 ndesc = length / sizeof (*sad);
625 } else
626 return;
629 * realloc i_ext array
631 count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) *
632 EXT_PER_MALLOC;
633 addr = kmem_zalloc(count * sizeof (struct icb_ext), KM_SLEEP);
634 bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext));
635 kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext));
636 ip->i_ext = addr;
637 ip->i_ext_count = count;
640 * scan descriptors
642 lbmask = ip->i_udf->udf_lbmask;
643 iext = &ip->i_ext[ip->i_ext_used - 1];
644 offset = iext->ib_offset + iext->ib_count;
645 iext++;
646 while (ndesc--) {
647 if (islong)
648 length = SWAP_32(lad->lad_ext_len);
649 else
650 length = SWAP_32(sad->sad_ext_len);
652 if ((length & 0x3FFFFFFF) == 0)
653 break;
654 else if (((length >> 30) & IB_MASK) == IB_CON) {
655 if (ip->i_con_used == ip->i_con_count) {
656 struct icb_ext *old;
657 int32_t old_count;
659 old = ip->i_con;
660 old_count = ip->i_con_count *
661 sizeof (struct icb_ext);
662 ip->i_con_count += EXT_PER_MALLOC;
663 ip->i_con = kmem_zalloc(ip->i_con_count *
664 sizeof (struct icb_ext), KM_SLEEP);
666 if (old) {
667 bcopy(old, ip->i_con, old_count);
668 kmem_free(old, old_count);
671 con = &ip->i_con[ip->i_con_used];
672 if (islong) {
673 con->ib_prn = SWAP_16(lad->lad_ext_prn);
674 con->ib_block = SWAP_32(lad->lad_ext_loc);
675 } else {
676 con->ib_prn = ip->i_icb_prn;
677 con->ib_block = SWAP_32(sad->sad_ext_loc);
679 con->ib_count = length & 0x3FFFFFFF;
680 con->ib_flags = (length >> 30) & IB_MASK;
681 ip->i_con_used++;
682 break;
685 if (islong) {
686 iext->ib_prn = SWAP_16(lad->lad_ext_prn);
687 iext->ib_block = SWAP_32(lad->lad_ext_loc);
688 lad++;
689 } else {
690 iext->ib_prn = 0;
691 iext->ib_block = SWAP_32(sad->sad_ext_loc);
692 sad++;
694 iext->ib_count = length & 0x3FFFFFFF;
695 iext->ib_offset = offset;
696 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
697 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
698 offset += (iext->ib_count + lbmask) & (~lbmask);
699 iext->ib_flags = (length >> 30) & IB_MASK;
700 ip->i_ext_used++;
701 iext++;
706 static int32_t
707 ud_read_next_cont(struct ud_inode *ip)
709 uint32_t dummy, error = 0;
710 struct alloc_ext_desc *aed;
711 struct icb_ext *cont;
712 struct buf *bp;
713 daddr_t bno;
715 cont = &ip->i_con[ip->i_con_read];
716 ASSERT(cont->ib_count > 0);
718 bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block,
719 1, &dummy);
720 bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift,
721 cont->ib_count);
722 if (bp->b_flags & B_ERROR)
723 error = bp->b_error;
724 else {
725 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
726 if (ud_verify_tag_and_desc(&aed->aed_tag, UD_ALLOC_EXT_DESC,
727 cont->ib_block, 1, cont->ib_count))
728 error = EINVAL;
731 if (error == 0)
732 ud_common_ad(ip, bp);
734 brelse(bp);
735 return (error);
739 int32_t
740 ud_read_icb_till_off(struct ud_inode *ip, uoff_t offset)
742 int32_t error = 0;
743 struct icb_ext *iext;
745 ud_printf("ud_read_icb_till_off\n");
747 if (ip->i_desc_type == ICB_FLAG_ONE_AD)
748 return (0);
749 else if ((ip->i_astrat != STRAT_TYPE4) &&
750 (ip->i_astrat != STRAT_TYPE4096))
751 return (EINVAL);
752 else if (ip->i_ext_used == 0)
753 return ((ip->i_size == 0) ? 0 : EINVAL);
756 * supported allocation strategies are
757 * STRAT_TYPE4 and STRAT_TYPE4096
760 mutex_enter(&ip->i_con_lock);
761 iext = &ip->i_ext[ip->i_ext_used - 1];
762 while ((iext->ib_offset + iext->ib_count) < offset) {
763 if (ip->i_con_used == ip->i_con_read) {
764 error = EINVAL;
765 break;
767 if (error = ud_read_next_cont(ip))
768 break;
769 ip->i_con_read++;
770 iext = &ip->i_ext[ip->i_ext_used - 1];
772 mutex_exit(&ip->i_con_lock);
774 return (error);
779 * Assumption is the off is beyond ip->i_size
780 * And we will have atleast one ext used
782 int32_t
783 ud_last_alloc_ext(struct ud_inode *ip, uint64_t off,
784 uint32_t size, int32_t alloc_only)
786 struct icb_ext *iext;
787 struct udf_vfs *udf_vfsp;
788 int32_t lbsize, lbmask;
789 uint64_t end_req, end_count, icb_offset;
790 uint64_t count;
791 int32_t error = 0;
794 udf_vfsp = ip->i_udf;
795 lbsize = udf_vfsp->udf_lbsize;
796 lbmask = udf_vfsp->udf_lbmask;
798 end_req = BASE(off) + size;
802 * If we are here it means the file
803 * is growing beyond the end of the
804 * current block. So round up the
805 * last extent
808 iext = &ip->i_ext[ip->i_ext_used - 1];
809 iext->ib_count = CEIL(iext->ib_count);
812 * Figure out if we can create
813 * a hole here
817 end_count = iext->ib_offset + iext->ib_count;
819 if ((PCEIL(end_count) < PBASE(off)) &&
820 ((PBASE(off) - PCEIL(end_count)) >= PAGESIZE)) {
822 count = PCEIL(end_count) - CEIL(end_count);
823 if (count >= lbsize) {
826 * There is space between the begining
827 * of the hole to be created and
828 * end of the last offset
829 * Allocate blocks for it
832 iext = &ip->i_ext[ip->i_ext_used - 1];
833 icb_offset = iext->ib_offset + CEIL(iext->ib_count);
835 if (iext->ib_flags == IB_UN_RE_AL) {
838 * Previous extent is a unallocated
839 * extent. Create a new allocated
840 * extent
843 error = ud_create_ext(ip, ip->i_ext_used,
844 ALLOC_SPACE | NEW_EXT,
845 alloc_only, icb_offset, &count);
847 } else {
850 * Last extent is allocated
851 * try to allocate adjascent to the
852 * last extent
855 error = ud_create_ext(ip, ip->i_ext_used - 1,
856 ALLOC_SPACE, alloc_only,
857 icb_offset, &count);
860 if (error != 0) {
861 return (error);
865 iext = &ip->i_ext[ip->i_ext_used - 1];
866 end_count = iext->ib_offset + iext->ib_count;
867 count = PBASE(off) - PCEIL(end_count);
868 icb_offset = PCEIL(end_count);
870 if (iext->ib_flags == IB_UN_RE_AL) {
873 * The last extent is unallocated
874 * Just bump the extent count
876 (void) ud_create_ext(ip, ip->i_ext_used - 1,
877 0, alloc_only, icb_offset, &count);
878 } else {
881 * Last extent is allocated
882 * round up the size of the extent to
883 * lbsize and allocate a new unallocated extent
885 iext->ib_count = CEIL(iext->ib_count);
886 (void) ud_create_ext(ip, ip->i_ext_used,
887 NEW_EXT, alloc_only, icb_offset, &count);
890 icb_offset = PBASE(off);
891 } else {
894 * We cannot create any hole inbetween
895 * the last extent and the off so
896 * round up the count in the last extent
899 iext = &ip->i_ext[ip->i_ext_used - 1];
900 iext->ib_count = CEIL(iext->ib_count);
905 iext = &ip->i_ext[ip->i_ext_used - 1];
906 count = end_req - (iext->ib_offset + iext->ib_count);
907 icb_offset = iext->ib_offset + CEIL(iext->ib_count);
909 if (iext->ib_flags == IB_UN_RE_AL) {
912 * Last extent was a unallocated extent
913 * create a new extent
916 error = ud_create_ext(ip, ip->i_ext_used,
917 ALLOC_SPACE | NEW_EXT, alloc_only, icb_offset, &count);
918 } else {
921 * Last extent was an allocated extent
922 * try to allocate adjascent to the old blocks
925 error = ud_create_ext(ip, ip->i_ext_used - 1,
926 ALLOC_SPACE, alloc_only, icb_offset, &count);
929 return (error);
933 * Break up the icb_ext at index
934 * into two icb_ext,
935 * one at index ib_count "count" and
936 * the other at index+1 with ib_count = old_ib_count - count
938 int32_t
939 ud_break_create_new_icb(struct ud_inode *ip,
940 int32_t index, uint32_t count)
942 int32_t i, error;
943 struct icb_ext *iext, *next;
946 ud_printf("ud_break_create_new_icb\n");
947 iext = &ip->i_ext[index];
949 ASSERT(count < iext->ib_count);
951 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
952 return (error);
955 for (i = ip->i_ext_used; i > index; i--) {
956 ip->i_ext[i] = ip->i_ext[i - 1];
959 next = &ip->i_ext[index + 1];
960 iext = &ip->i_ext[index];
962 iext->ib_count = count;
963 next->ib_count -= count;
964 next->ib_offset = iext->ib_offset + iext->ib_count;
965 if (iext->ib_flags != IB_UN_RE_AL) {
966 next->ib_block = iext->ib_block +
967 iext->ib_count >> ip->i_udf->udf_l2b_shift;
969 ip->i_ext_used++;
970 return (0);
973 void
974 ud_remove_ext_at_index(struct ud_inode *ip, int32_t index)
976 int32_t i;
978 ASSERT(index <= ip->i_ext_used);
980 for (i = index; i < ip->i_ext_used; i++) {
981 if ((i + 1) < ip->i_ext_count) {
982 ip->i_ext[i] = ip->i_ext[i + 1];
983 } else {
984 bzero(&ip->i_ext[i], sizeof (struct icb_ext));
987 ip->i_ext_used --;
990 int32_t
991 ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag)
993 int32_t error = 0;
994 struct icb_ext *iext;
995 uint32_t old_count, elen;
997 ASSERT(ip);
998 ASSERT(sleep_flag == KM_SLEEP);
1000 ud_printf("ud_bump_ext_count\n");
1002 if (ip->i_ext_used >= ip->i_ext_count) {
1004 old_count = sizeof (struct icb_ext) * ip->i_ext_count;
1005 ip->i_ext_count += EXT_PER_MALLOC;
1006 iext = kmem_zalloc(sizeof (struct icb_ext) *
1007 ip->i_ext_count, sleep_flag);
1008 bcopy(ip->i_ext, iext, old_count);
1009 kmem_free(ip->i_ext, old_count);
1010 ip->i_ext = iext;
1013 if (ip->i_ext_used >= ip->i_cur_max_ext) {
1014 int32_t prox;
1015 struct icb_ext *icon;
1016 uint32_t blkno, sz;
1017 int32_t lbmask, l2b;
1019 lbmask = ip->i_udf->udf_lbmask;
1020 l2b = ip->i_udf->udf_l2b_shift;
1022 if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) {
1023 return (error);
1027 * If there are any old cont extents
1028 * allocate the new one ajscant to the old one
1030 if (ip->i_con_used != 0) {
1031 icon = &ip->i_con[ip->i_con_used - 1];
1032 prox = icon->ib_block + (CEIL(icon->ib_count) >> l2b);
1033 } else {
1034 prox = 0;
1038 * Allocate space
1040 if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn,
1041 prox, 1, &blkno, &sz, 0, 0)) != 0) {
1042 return (error);
1044 if (sz == 0) {
1045 return (ENOSPC);
1048 sz <<= l2b;
1050 if (ip->i_con_used == ip->i_con_count) {
1051 struct icb_ext *old;
1052 int32_t old_count;
1054 old = ip->i_con;
1055 old_count = ip->i_con_count *
1056 sizeof (struct icb_ext);
1057 ip->i_con_count += EXT_PER_MALLOC;
1058 ip->i_con = kmem_zalloc(ip->i_con_count *
1059 sizeof (struct icb_ext), KM_SLEEP);
1060 if (old != 0) {
1061 bcopy(old, ip->i_con, old_count);
1062 kmem_free(old, old_count);
1065 icon = &ip->i_con[ip->i_con_used++];
1066 icon->ib_flags = IB_CON;
1067 icon->ib_prn = ip->i_icb_prn;
1068 icon->ib_block = blkno;
1069 icon->ib_count = sz;
1070 icon->ib_offset = 0;
1071 icon->ib_marker1 = (uint32_t)0xAAAAAAAA;
1072 icon->ib_marker2 = (uint32_t)0xBBBBBBBB;
1075 * Bump the i_cur_max_ext according to
1076 * the space allocated
1078 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1079 elen = sizeof (struct short_ad);
1080 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1081 elen = sizeof (struct long_ad);
1082 } else {
1083 return (ENOSPC);
1085 sz = sz - (sizeof (struct alloc_ext_desc) + elen);
1086 ip->i_cur_max_ext += sz / elen;
1088 return (error);
1091 int32_t
1092 ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags,
1093 int32_t alloc_only, uint64_t offset, uint64_t *count)
1095 struct icb_ext *iext, *pext;
1096 struct udf_vfs *udf_vfsp;
1097 int32_t error = 0, blkcount, acount;
1098 uint32_t blkno, sz, prox, mext_sz;
1099 int32_t lbmask, l2b;
1101 if (*count == 0) {
1102 return (0);
1105 begin:
1106 udf_vfsp = ip->i_udf;
1107 lbmask = udf_vfsp->udf_lbmask;
1108 l2b = udf_vfsp->udf_l2b_shift;
1109 mext_sz = (1 << MEXT_BITS) - PAGESIZE;
1111 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
1112 return (error);
1115 iext = &ip->i_ext[index];
1116 if (flags & ALLOC_SPACE) {
1117 if ((flags & NEW_EXT) ||
1118 (ip->i_ext_count == 0)) {
1120 iext->ib_flags = 0;
1121 iext->ib_prn = ip->i_icb_prn;
1122 if (*count > mext_sz) {
1123 blkcount = mext_sz >> l2b;
1124 } else {
1125 blkcount = CEIL(*count) >> l2b;
1127 if ((error = ud_alloc_space(ip->i_vfs,
1128 ip->i_icb_prn, 0, blkcount,
1129 &blkno, &sz, 1, 0)) != 0) {
1130 return (error);
1132 if (sz == 0) {
1133 return (ENOSPC);
1135 ip->i_lbr += sz;
1136 iext->ib_block = blkno;
1137 acount = sz << l2b;
1138 if ((sz << l2b) > *count) {
1139 iext->ib_count = *count;
1140 *count = 0;
1141 } else {
1142 iext->ib_count = sz << l2b;
1143 *count -= iext->ib_count;
1145 iext->ib_offset = offset;
1146 if (ip->i_ext_used <= index)
1147 ip->i_ext_used ++;
1148 } else {
1149 if ((iext->ib_count + *count) > mext_sz) {
1150 blkcount = (mext_sz - iext->ib_count) >> l2b;
1151 } else {
1152 blkcount = CEIL(*count) >> l2b;
1154 if (blkcount == 0) {
1155 flags |= NEW_EXT;
1156 index++;
1157 goto begin;
1159 prox = iext->ib_block + (CEIL(iext->ib_count) >> l2b);
1160 if ((error = ud_alloc_space(ip->i_vfs,
1161 ip->i_icb_prn, prox, blkcount,
1162 &blkno, &sz, 1, 0)) != 0) {
1163 return (error);
1165 if (sz == 0) {
1166 return (ENOSPC);
1168 acount = sz << l2b;
1169 if (acount > *count) {
1170 acount = *count;
1171 *count = 0;
1172 } else {
1173 *count -= acount;
1175 ip->i_lbr += sz;
1176 if (prox == blkno) {
1177 iext->ib_count += acount;
1178 } else {
1179 if ((error = ud_bump_ext_count(ip, KM_SLEEP))
1180 != 0) {
1181 return (error);
1183 pext = &ip->i_ext[index];
1184 iext = &ip->i_ext[index + 1];
1185 iext->ib_flags = 0;
1186 iext->ib_prn = ip->i_icb_prn;
1187 iext->ib_block = blkno;
1188 iext->ib_offset =
1189 pext->ib_offset + pext->ib_count;
1190 iext->ib_count = acount;
1192 * Increment the index, since we have used
1193 * the extent at [index+1] above.
1195 index++;
1196 if (ip->i_ext_used <= index)
1197 ip->i_ext_used ++;
1200 if (alloc_only == 0) {
1201 error = ud_zero_it(ip, blkno, sz);
1203 if (*count) {
1204 offset = iext->ib_offset + CEIL(iext->ib_count);
1205 flags |= NEW_EXT;
1206 index++;
1207 goto begin;
1209 } else {
1210 if (flags & NEW_EXT) {
1211 iext->ib_flags = IB_UN_RE_AL;
1212 iext->ib_prn = 0;
1213 iext->ib_block = 0;
1214 if (*count > mext_sz) {
1215 iext->ib_count = mext_sz;
1216 *count -= iext->ib_count;
1217 } else {
1218 iext->ib_count = *count;
1219 *count = 0;
1221 iext->ib_offset = offset;
1222 if (ip->i_ext_used <= index)
1223 ip->i_ext_used ++;
1224 } else {
1225 ASSERT(iext->ib_flags == IB_UN_RE_AL);
1226 if ((iext->ib_count + *count) > mext_sz) {
1227 acount = mext_sz - iext->ib_count;
1228 iext->ib_count += acount;
1229 *count -= acount;
1230 } else {
1231 iext->ib_count += *count;
1232 *count = 0;
1235 if (*count != 0) {
1236 offset = iext->ib_offset + CEIL(iext->ib_count);
1237 flags |= NEW_EXT;
1238 index++;
1239 goto begin;
1242 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
1243 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
1244 return (error);
1247 #undef CEIL
1248 #undef BASE
1250 int32_t
1251 ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count)
1253 struct udf_vfs *udf_vfsp;
1254 uint32_t bno, dummy;
1255 int32_t error;
1256 struct buf *bp;
1259 * Donot use bio routines
1260 * since the buffer can sit
1261 * long enough in cache for the space
1262 * to be allocated/freed and
1263 * then allocated
1265 udf_vfsp = ip->i_udf;
1266 bno = ud_xlate_to_daddr(udf_vfsp,
1267 ip->i_icb_prn, start_block, block_count, &dummy);
1269 dummy = block_count << udf_vfsp->udf_l2b_shift;
1270 bp = kmem_zalloc(biosize(), KM_SLEEP);
1271 sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
1272 sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
1274 bp->b_flags = B_WRITE | B_BUSY;
1275 bp->b_edev = ip->i_dev;
1276 bp->b_dev = cmpdev(ip->i_dev);
1277 bp->b_blkno = bno << udf_vfsp->udf_l2d_shift;
1278 bp->b_bcount = dummy;
1279 bp->b_un.b_addr = kmem_zalloc(bp->b_bcount, KM_SLEEP);
1280 bp->b_file = ip->i_vnode;
1281 bp->b_offset = -1;
1283 (void) bdev_strategy(bp);
1284 if (error = biowait(bp)) {
1285 cmn_err(CE_WARN, "error in write\n");
1288 kmem_free(bp->b_un.b_addr, dummy);
1289 sema_destroy(&bp->b_io);
1290 sema_destroy(&bp->b_sem);
1291 kmem_free((caddr_t)bp, biosize());
1293 return (error);