dmake: do not set MAKEFLAGS=k
[unleashed/tickless.git] / kernel / fs / swapfs / swap_vnops.c
blobb1311395f651ae7dd7a428890c9fda65ff04235e
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 #include <sys/types.h>
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/buf.h>
30 #include <sys/cred.h>
31 #include <sys/errno.h>
32 #include <sys/vnode.h>
33 #include <sys/vfs.h>
34 #include <sys/cmn_err.h>
35 #include <sys/swap.h>
36 #include <sys/mman.h>
37 #include <sys/vmsystm.h>
38 #include <sys/vtrace.h>
39 #include <sys/debug.h>
40 #include <sys/sysmacros.h>
41 #include <sys/vm.h>
43 #include <sys/fs/swapnode.h>
45 #include <vm/seg.h>
46 #include <vm/page.h>
47 #include <vm/pvn.h>
48 #include <sys/fs_subr.h>
50 #include <vm/seg_kp.h>
53 * Define the routines within this file.
55 static int swap_getpage(struct vnode *vp, offset_t off, size_t len,
56 uint_t *protp, struct page **plarr, size_t plsz, struct seg *seg,
57 caddr_t addr, enum seg_rw rw, struct cred *cr, caller_context_t *ct);
58 static int swap_putpage(struct vnode *vp, offset_t off, size_t len,
59 int flags, struct cred *cr, caller_context_t *ct);
60 static void swap_inactive(struct vnode *vp, struct cred *cr,
61 caller_context_t *ct);
62 static void swap_dispose(vnode_t *vp, page_t *pp, int fl, int dn,
63 cred_t *cr, caller_context_t *ct);
65 static int swap_getapage(struct vnode *vp, uoff_t off, size_t len,
66 uint_t *protp, page_t **plarr, size_t plsz,
67 struct seg *seg, caddr_t addr, enum seg_rw rw, struct cred *cr);
69 int swap_getconpage(struct vnode *vp, uoff_t off, size_t len,
70 uint_t *protp, page_t **plarr, size_t plsz, page_t *conpp,
71 uint_t *pszc, spgcnt_t *nreloc, struct seg *seg, caddr_t addr,
72 enum seg_rw rw, struct cred *cr);
74 static int swap_putapage(struct vnode *vp, page_t *pp, uoff_t *off,
75 size_t *lenp, int flags, struct cred *cr);
77 const struct vnodeops swap_vnodeops = {
78 .vnop_name = "swapfs",
79 .vop_inactive = swap_inactive,
80 .vop_getpage = swap_getpage,
81 .vop_putpage = swap_putpage,
82 .vop_dispose = swap_dispose,
83 .vop_setfl = fs_nosys,
84 .vop_poll = (void *) fs_nosys,
85 .vop_pathconf = fs_nosys,
86 .vop_getsecattr = fs_nosys,
87 .vop_shrlock = fs_nosys,
90 /* ARGSUSED */
91 static void
92 swap_inactive(
93 struct vnode *vp,
94 struct cred *cr,
95 caller_context_t *ct)
97 SWAPFS_PRINT(SWAP_VOPS, "swap_inactive: vp %x\n", vp, 0, 0, 0, 0);
101 * Return all the pages from [off..off+len] in given file
103 /*ARGSUSED*/
104 static int
105 swap_getpage(
106 struct vnode *vp,
107 offset_t off,
108 size_t len,
109 uint_t *protp,
110 page_t *pl[],
111 size_t plsz,
112 struct seg *seg,
113 caddr_t addr,
114 enum seg_rw rw,
115 struct cred *cr,
116 caller_context_t *ct)
118 SWAPFS_PRINT(SWAP_VOPS, "swap_getpage: vp %p, off %llx, len %lx\n",
119 (void *)vp, off, len, 0, 0);
121 TRACE_3(TR_FAC_SWAPFS, TR_SWAPFS_GETPAGE,
122 "swapfs getpage:vp %p off %llx len %ld",
123 (void *)vp, off, len);
125 return (pvn_getpages(swap_getapage, vp, (uoff_t)off, len, protp,
126 pl, plsz, seg, addr, rw, cr));
130 * Called from pvn_getpages to get a particular page.
132 /*ARGSUSED*/
133 static int
134 swap_getapage(
135 struct vnode *vp,
136 uoff_t off,
137 size_t len,
138 uint_t *protp,
139 page_t *pl[],
140 size_t plsz,
141 struct seg *seg,
142 caddr_t addr,
143 enum seg_rw rw,
144 struct cred *cr)
146 struct page *pp, *rpp;
147 int flags;
148 int err = 0;
149 struct vnode *pvp = NULL;
150 uoff_t poff;
151 int flag_noreloc;
152 se_t lock;
153 extern int kcage_on;
154 int upgrade = 0;
156 SWAPFS_PRINT(SWAP_VOPS, "swap_getapage: vp %p, off %llx, len %lx\n",
157 vp, off, len, 0, 0);
160 * Until there is a call-back mechanism to cause SEGKP
161 * pages to be unlocked, make them non-relocatable.
163 if (SEG_IS_SEGKP(seg))
164 flag_noreloc = PG_NORELOC;
165 else
166 flag_noreloc = 0;
168 if (protp != NULL)
169 *protp = PROT_ALL;
171 lock = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
173 again:
174 if (pp = page_lookup(&vp->v_object, off, lock)) {
176 * In very rare instances, a segkp page may have been
177 * relocated outside of the kernel by the kernel cage
178 * due to the window between page_unlock() and
179 * fop_putpage() in segkp_unlock(). Due to the
180 * rareness of these occurances, the solution is to
181 * relocate the page to a P_NORELOC page.
183 if (flag_noreloc != 0) {
184 if (!PP_ISNORELOC(pp) && kcage_on) {
185 if (lock != SE_EXCL) {
186 upgrade = 1;
187 if (!page_tryupgrade(pp)) {
188 page_unlock(pp);
189 lock = SE_EXCL;
190 goto again;
194 if (page_relocate_cage(&pp, &rpp) != 0)
195 panic("swap_getapage: "
196 "page_relocate_cage failed");
198 pp = rpp;
202 if (pl) {
203 if (upgrade)
204 page_downgrade(pp);
206 pl[0] = pp;
207 pl[1] = NULL;
208 } else {
209 page_unlock(pp);
211 } else {
212 pp = page_create_va(&vp->v_object, off, PAGESIZE,
213 PG_WAIT | PG_EXCL | flag_noreloc,
214 seg, addr);
216 * Someone raced in and created the page after we did the
217 * lookup but before we did the create, so go back and
218 * try to look it up again.
220 if (pp == NULL)
221 goto again;
222 if (rw != S_CREATE) {
223 err = swap_getphysname(vp, off, &pvp, &poff);
224 if (pvp) {
225 struct anon *ap;
226 kmutex_t *ahm;
228 flags = (pl == NULL ? B_ASYNC|B_READ : B_READ);
229 err = fop_pageio(pvp, pp, poff,
230 PAGESIZE, flags, cr, NULL);
232 if (!err) {
233 ahm = AH_MUTEX(vp, off);
234 mutex_enter(ahm);
236 ap = swap_anon(vp, off);
237 if (ap == NULL) {
238 panic("swap_getapage:"
239 " null anon");
242 if (ap->an_pvp == pvp &&
243 ap->an_poff == poff) {
244 swap_phys_free(pvp, poff,
245 PAGESIZE);
246 ap->an_pvp = NULL;
247 ap->an_poff = 0;
248 hat_setmod(pp);
251 mutex_exit(ahm);
253 } else {
254 if (!err)
255 pagezero(pp, 0, PAGESIZE);
258 * If it's a fault ahead, release page_io_lock
259 * and SE_EXCL we grabbed in page_create_va
261 * If we are here, we haven't called fop_pageio
262 * and thus calling pvn_read_done(pp, B_READ)
263 * below may mislead that we tried i/o. Besides,
264 * in case of async, pvn_read_done() should
265 * not be called by *getpage()
267 if (pl == NULL) {
269 * swap_getphysname can return error
270 * only when we are getting called from
271 * swapslot_free which passes non-NULL
272 * pl to fop_getpage.
274 ASSERT(err == 0);
275 page_io_unlock(pp);
276 page_unlock(pp);
281 ASSERT(pp != NULL);
283 if (err && pl)
284 pvn_read_done(pp, B_ERROR);
286 if (!err && pl)
287 pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
289 TRACE_3(TR_FAC_SWAPFS, TR_SWAPFS_GETAPAGE,
290 "swapfs getapage:pp %p vp %p off %llx", pp, vp, off);
291 return (err);
295 * Called from large page anon routines only! This is an ugly hack where
296 * the anon layer directly calls into swapfs with a preallocated large page.
297 * Another method would have been to change to VOP and add an extra arg for
298 * the preallocated large page. This all could be cleaned up later when we
299 * solve the anonymous naming problem and no longer need to loop across of
300 * the VOP in PAGESIZE increments to fill in or initialize a large page as
301 * is done today. I think the latter is better since it avoid a change to
302 * the VOP interface that could later be avoided.
305 swap_getconpage(
306 struct vnode *vp,
307 uoff_t off,
308 size_t len,
309 uint_t *protp,
310 page_t *pl[],
311 size_t plsz,
312 page_t *conpp,
313 uint_t *pszc,
314 spgcnt_t *nreloc,
315 struct seg *seg,
316 caddr_t addr,
317 enum seg_rw rw,
318 struct cred *cr)
320 struct page *pp;
321 int err = 0;
322 struct vnode *pvp = NULL;
323 uoff_t poff;
325 ASSERT(len == PAGESIZE);
326 ASSERT(pl != NULL);
327 ASSERT(plsz == PAGESIZE);
328 ASSERT(protp == NULL);
329 ASSERT(nreloc != NULL);
330 ASSERT(!SEG_IS_SEGKP(seg)); /* XXX for now not supported */
331 SWAPFS_PRINT(SWAP_VOPS, "swap_getconpage: vp %p, off %llx, len %lx\n",
332 vp, off, len, 0, 0);
335 * If we are not using a preallocated page then we know one already
336 * exists. So just let the old code handle it.
338 if (conpp == NULL) {
339 err = swap_getapage(vp, (uoff_t)off, len, protp, pl, plsz,
340 seg, addr, rw, cr);
341 return (err);
343 ASSERT(conpp->p_szc != 0);
344 ASSERT(PAGE_EXCL(conpp));
347 ASSERT(conpp->p_next == conpp);
348 ASSERT(conpp->p_prev == conpp);
349 ASSERT(!PP_ISAGED(conpp));
350 ASSERT(!PP_ISFREE(conpp));
352 *nreloc = 0;
353 pp = page_lookup_create(&vp->v_object, off, SE_SHARED, conpp, nreloc,
357 * If existing page is found we may need to relocate.
359 if (pp != conpp) {
360 ASSERT(rw != S_CREATE);
361 ASSERT(pszc != NULL);
362 ASSERT(PAGE_SHARED(pp));
363 if (pp->p_szc < conpp->p_szc) {
364 *pszc = pp->p_szc;
365 page_unlock(pp);
366 err = -1;
367 } else if (pp->p_szc > conpp->p_szc &&
368 seg->s_szc > conpp->p_szc) {
369 *pszc = MIN(pp->p_szc, seg->s_szc);
370 page_unlock(pp);
371 err = -2;
372 } else {
373 pl[0] = pp;
374 pl[1] = NULL;
375 if (page_pptonum(pp) &
376 (page_get_pagecnt(conpp->p_szc) - 1))
377 cmn_err(CE_PANIC, "swap_getconpage: no root");
379 return (err);
382 ASSERT(PAGE_EXCL(pp));
384 if (*nreloc != 0) {
385 ASSERT(rw != S_CREATE);
386 pl[0] = pp;
387 pl[1] = NULL;
388 return (0);
391 *nreloc = 1;
394 * If necessary do the page io.
396 if (rw != S_CREATE) {
398 * Since we are only called now on behalf of an
399 * address space operation it's impossible for
400 * us to fail unlike swap_getapge() which
401 * also gets called from swapslot_free().
403 if (swap_getphysname(vp, off, &pvp, &poff)) {
404 cmn_err(CE_PANIC,
405 "swap_getconpage: swap_getphysname failed!");
408 if (pvp != NULL) {
409 err = fop_pageio(pvp, pp, poff, PAGESIZE, B_READ,
410 cr, NULL);
411 if (err == 0) {
412 struct anon *ap;
413 kmutex_t *ahm;
415 ahm = AH_MUTEX(vp, off);
416 mutex_enter(ahm);
417 ap = swap_anon(vp, off);
418 if (ap == NULL)
419 panic("swap_getconpage: null anon");
420 if (ap->an_pvp != pvp || ap->an_poff != poff)
421 panic("swap_getconpage: bad anon");
423 swap_phys_free(pvp, poff, PAGESIZE);
424 ap->an_pvp = NULL;
425 ap->an_poff = 0;
426 hat_setmod(pp);
427 mutex_exit(ahm);
429 } else {
430 pagezero(pp, 0, PAGESIZE);
435 * Normally we would let pvn_read_done() destroy
436 * the page on IO error. But since this is a preallocated
437 * page we'll let the anon layer handle it.
439 page_io_unlock(pp);
440 if (err != 0)
441 page_hashout(pp, false);
442 ASSERT(pp->p_next == pp);
443 ASSERT(pp->p_prev == pp);
445 TRACE_3(TR_FAC_SWAPFS, TR_SWAPFS_GETAPAGE,
446 "swapfs getconpage:pp %p vp %p off %llx", pp, vp, off);
448 pl[0] = pp;
449 pl[1] = NULL;
450 return (err);
453 /* Async putpage klustering stuff */
454 int sw_pending_size;
455 extern int klustsize;
456 extern struct async_reqs *sw_getreq();
457 extern void sw_putreq(struct async_reqs *);
458 extern void sw_putbackreq(struct async_reqs *);
459 extern struct async_reqs *sw_getfree();
460 extern void sw_putfree(struct async_reqs *);
462 static size_t swap_putpagecnt, swap_pagespushed;
463 static size_t swap_otherfail, swap_otherpages;
464 static size_t swap_klustfail, swap_klustpages;
465 static size_t swap_getiofail, swap_getiopages;
468 * Flags are composed of {B_INVAL, B_DIRTY B_FREE, B_DONTNEED}.
469 * If len == 0, do from off to EOF.
471 static int swap_nopage = 0; /* Don't do swap_putpage's if set */
473 /* ARGSUSED */
474 static int
475 swap_putpage(
476 struct vnode *vp,
477 offset_t off,
478 size_t len,
479 int flags,
480 struct cred *cr,
481 caller_context_t *ct)
483 page_t *pp;
484 uoff_t io_off;
485 size_t io_len = 0;
486 int err = 0;
487 int nowait;
488 struct async_reqs *arg;
490 if (swap_nopage)
491 return (0);
493 ASSERT(vp->v_count != 0);
495 nowait = flags & B_PAGE_NOWAIT;
498 * Clear force flag so that p_lckcnt pages are not invalidated.
500 flags &= ~(B_FORCE | B_PAGE_NOWAIT);
502 SWAPFS_PRINT(SWAP_VOPS,
503 "swap_putpage: vp %p, off %llx len %lx, flags %x\n",
504 (void *)vp, off, len, flags, 0);
505 TRACE_3(TR_FAC_SWAPFS, TR_SWAPFS_PUTPAGE,
506 "swapfs putpage:vp %p off %llx len %ld", (void *)vp, off, len);
508 if (vp->v_flag & VNOMAP)
509 return (ENOSYS);
511 if (!vn_has_cached_data(vp))
512 return (0);
514 if (len == 0) {
515 if (curproc == proc_pageout)
516 cmn_err(CE_PANIC, "swapfs: pageout can't block");
518 /* Search the entire vp list for pages >= off. */
519 err = pvn_vplist_dirty(vp, (uoff_t)off, swap_putapage,
520 flags, cr);
521 } else {
522 uoff_t eoff;
525 * Loop over all offsets in the range [off...off + len]
526 * looking for pages to deal with.
528 eoff = off + len;
529 for (io_off = (uoff_t)off; io_off < eoff;
530 io_off += io_len) {
532 * If we run out of the async req slot, put the page
533 * now instead of queuing.
535 if (flags == (B_ASYNC | B_FREE) &&
536 sw_pending_size < klustsize &&
537 (arg = sw_getfree())) {
539 * If we are clustering, we should allow
540 * pageout to feed us more pages because # of
541 * pushes is limited by # of I/Os, and one
542 * cluster is considered to be one I/O.
544 if (pushes)
545 pushes--;
547 arg->a_vp = vp;
548 arg->a_off = io_off;
549 arg->a_len = PAGESIZE;
550 arg->a_flags = B_ASYNC | B_FREE;
551 arg->a_cred = kcred;
552 sw_putreq(arg);
553 io_len = PAGESIZE;
554 continue;
557 * If we are not invalidating pages, use the
558 * routine page_lookup_nowait() to prevent
559 * reclaiming them from the free list.
561 if (!nowait && ((flags & B_INVAL) ||
562 (flags & (B_ASYNC | B_FREE)) == B_FREE))
563 pp = page_lookup(&vp->v_object, io_off,
564 SE_EXCL);
565 else
566 pp = page_lookup_nowait(&vp->v_object,
567 io_off,
568 (flags & (B_FREE | B_INVAL)) ? SE_EXCL : SE_SHARED);
570 if (pp == NULL || pvn_getdirty(pp, flags) == 0)
571 io_len = PAGESIZE;
572 else {
573 err = swap_putapage(vp, pp, &io_off, &io_len,
574 flags, cr);
575 if (err != 0)
576 break;
580 /* If invalidating, verify all pages on vnode list are gone. */
581 if (err == 0 && off == 0 && len == 0 &&
582 (flags & B_INVAL) && vn_has_cached_data(vp)) {
583 cmn_err(CE_WARN,
584 "swap_putpage: B_INVAL, pages not gone");
586 return (err);
590 * Write out a single page.
591 * For swapfs this means choose a physical swap slot and write the page
592 * out using fop_pageio.
593 * In the (B_ASYNC | B_FREE) case we try to find a bunch of other dirty
594 * swapfs pages, a bunch of contiguous swap slots and then write them
595 * all out in one clustered i/o.
597 /*ARGSUSED*/
598 static int
599 swap_putapage(
600 struct vnode *vp,
601 page_t *pp,
602 uoff_t *offp,
603 size_t *lenp,
604 int flags,
605 struct cred *cr)
607 int err;
608 struct vnode *pvp;
609 uoff_t poff, off;
610 uoff_t doff;
611 size_t dlen;
612 size_t klsz = 0;
613 uoff_t klstart = 0;
614 struct vnode *klvp = NULL;
615 page_t *pplist;
616 se_t se;
617 struct async_reqs *arg;
618 size_t swap_klustsize;
621 * This check is added for callers who access swap_putpage with len = 0.
622 * swap_putpage calls swap_putapage page-by-page via pvn_vplist_dirty.
623 * And it's necessary to do the same queuing if users have the same
624 * B_ASYNC|B_FREE flags on.
626 if (flags == (B_ASYNC | B_FREE) &&
627 sw_pending_size < klustsize && (arg = sw_getfree())) {
629 hat_setmod(pp);
630 page_io_unlock(pp);
631 page_unlock(pp);
633 arg->a_vp = vp;
634 arg->a_off = pp->p_offset;
635 arg->a_len = PAGESIZE;
636 arg->a_flags = B_ASYNC | B_FREE;
637 arg->a_cred = kcred;
638 sw_putreq(arg);
640 return (0);
643 SWAPFS_PRINT(SWAP_PUTP,
644 "swap_putapage: pp %p, vp %p, off %llx, flags %x\n",
645 pp, vp, pp->p_offset, flags, 0);
647 ASSERT(PAGE_LOCKED(pp));
649 off = pp->p_offset;
651 doff = off;
652 dlen = PAGESIZE;
654 if (err = swap_newphysname(vp, off, &doff, &dlen, &pvp, &poff)) {
655 err = (flags == (B_ASYNC | B_FREE) ? ENOMEM : 0);
656 hat_setmod(pp);
657 page_io_unlock(pp);
658 page_unlock(pp);
659 goto out;
662 klvp = pvp;
663 klstart = poff;
664 pplist = pp;
666 * If this is ASYNC | FREE and we've accumulated a bunch of such
667 * pending requests, kluster.
669 if (flags == (B_ASYNC | B_FREE))
670 swap_klustsize = klustsize;
671 else
672 swap_klustsize = PAGESIZE;
673 se = (flags & B_FREE ? SE_EXCL : SE_SHARED);
674 klsz = PAGESIZE;
675 while (klsz < swap_klustsize) {
676 if ((arg = sw_getreq()) == NULL) {
677 swap_getiofail++;
678 swap_getiopages += btop(klsz);
679 break;
681 ASSERT(vn_matchops(arg->a_vp, &swap_vnodeops));
682 vp = arg->a_vp;
683 off = arg->a_off;
685 if ((pp = page_lookup_nowait(&vp->v_object, off, se)) == NULL) {
686 swap_otherfail++;
687 swap_otherpages += btop(klsz);
688 sw_putfree(arg);
689 break;
691 if (pvn_getdirty(pp, flags | B_DELWRI) == 0) {
692 sw_putfree(arg);
693 continue;
695 /* Get new physical backing store for the page */
696 doff = off;
697 dlen = PAGESIZE;
698 if (err = swap_newphysname(vp, off, &doff, &dlen,
699 &pvp, &poff)) {
700 swap_otherfail++;
701 swap_otherpages += btop(klsz);
702 hat_setmod(pp);
703 page_io_unlock(pp);
704 page_unlock(pp);
705 sw_putbackreq(arg);
706 break;
708 /* Try to cluster new physical name with previous ones */
709 if (klvp == pvp && poff == klstart + klsz) {
710 klsz += PAGESIZE;
711 page_add(&pplist, pp);
712 pplist = pplist->p_next;
713 sw_putfree(arg);
714 } else if (klvp == pvp && poff == klstart - PAGESIZE) {
715 klsz += PAGESIZE;
716 klstart -= PAGESIZE;
717 page_add(&pplist, pp);
718 sw_putfree(arg);
719 } else {
720 swap_klustfail++;
721 swap_klustpages += btop(klsz);
722 hat_setmod(pp);
723 page_io_unlock(pp);
724 page_unlock(pp);
725 sw_putbackreq(arg);
726 break;
730 err = fop_pageio(klvp, pplist, klstart, klsz,
731 B_WRITE | flags, cr, NULL);
733 if ((flags & B_ASYNC) == 0)
734 pvn_write_done(pp, ((err) ? B_ERROR : 0) | B_WRITE | flags);
736 /* Statistics */
737 if (!err) {
738 swap_putpagecnt++;
739 swap_pagespushed += btop(klsz);
741 out:
742 TRACE_4(TR_FAC_SWAPFS, TR_SWAPFS_PUTAPAGE,
743 "swapfs putapage:vp %p klvp %p, klstart %lx, klsz %lx",
744 vp, klvp, klstart, klsz);
745 if (err && err != ENOMEM)
746 cmn_err(CE_WARN, "swapfs_putapage: err %d\n", err);
747 if (lenp)
748 *lenp = PAGESIZE;
749 return (err);
752 static void
753 swap_dispose(
754 vnode_t *vp,
755 page_t *pp,
756 int fl,
757 int dn,
758 cred_t *cr,
759 caller_context_t *ct)
761 int err;
762 uoff_t off = pp->p_offset;
763 vnode_t *pvp;
764 uoff_t poff;
766 ASSERT(PAGE_EXCL(pp));
769 * The caller will free/invalidate large page in one shot instead of
770 * one small page at a time.
772 if (pp->p_szc != 0) {
773 page_unlock(pp);
774 return;
777 err = swap_getphysname(vp, off, &pvp, &poff);
778 if (!err && pvp != NULL)
779 fop_dispose(pvp, pp, fl, dn, cr, ct);
780 else
781 fs_dispose(vp, pp, fl, dn, cr, ct);