1 /* $NetBSD: uvm_bio.c,v 1.67 2009/08/04 23:31:57 pooka Exp $ */
4 * Copyright (c) 1998 Chuck Silvers.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * uvm_bio.c: buffered i/o object mapping cache
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.67 2009/08/04 23:31:57 pooka Exp $");
39 #include "opt_uvmhist.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/kernel.h>
47 #include <sys/vnode.h>
52 * global data structures
59 static int ubc_fault(struct uvm_faultinfo
*, vaddr_t
, struct vm_page
**,
60 int, int, vm_prot_t
, int);
61 static struct ubc_map
*ubc_find_mapping(struct uvm_object
*, voff_t
);
64 * local data structues
67 #define UBC_HASH(uobj, offset) \
68 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \
71 #define UBC_QUEUE(offset) \
72 (&ubc_object.inactive[(((u_long)(offset)) >> ubc_winshift) & \
75 #define UBC_UMAP_ADDR(u) \
76 (vaddr_t)(ubc_object.kva + (((u) - ubc_object.umap) << ubc_winshift))
79 #define UMAP_PAGES_LOCKED 0x0001
80 #define UMAP_MAPPING_CACHED 0x0002
84 struct uvm_object
* uobj
; /* mapped object */
85 voff_t offset
; /* offset into uobj */
86 voff_t writeoff
; /* write offset */
87 vsize_t writelen
; /* write len */
88 int refcount
; /* refcount on mapping */
89 int flags
; /* extra state */
92 LIST_ENTRY(ubc_map
) hash
; /* hash table */
93 TAILQ_ENTRY(ubc_map
) inactive
; /* inactive queue */
96 static struct ubc_object
98 struct uvm_object uobj
; /* glue for uvm_map() */
99 char *kva
; /* where ubc_object is mapped */
100 struct ubc_map
*umap
; /* array of ubc_map's */
102 LIST_HEAD(, ubc_map
) *hash
; /* hashtable for cached ubc_map's */
103 u_long hashmask
; /* mask for hashtable */
105 TAILQ_HEAD(ubc_inactive_head
, ubc_map
) *inactive
;
106 /* inactive queues for ubc_map's */
110 const struct uvm_pagerops ubc_pager
= {
111 .pgo_fault
= ubc_fault
,
112 /* ... rest are NULL */
115 int ubc_nwins
= UBC_NWINS
;
116 int ubc_winshift
= UBC_WINSHIFT
;
118 #if defined(PMAP_PREFER)
120 #define UBC_NQUEUES ubc_nqueues
122 #define UBC_NQUEUES 1
125 #if defined(UBC_STATS)
127 #define UBC_EVCNT_DEFINE(name) \
128 struct evcnt ubc_evcnt_##name = \
129 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "ubc", #name); \
130 EVCNT_ATTACH_STATIC(ubc_evcnt_##name);
131 #define UBC_EVCNT_INCR(name) ubc_evcnt_##name.ev_count++
133 #else /* defined(UBC_STATS) */
135 #define UBC_EVCNT_DEFINE(name) /* nothing */
136 #define UBC_EVCNT_INCR(name) /* nothing */
138 #endif /* defined(UBC_STATS) */
140 UBC_EVCNT_DEFINE(wincachehit
)
141 UBC_EVCNT_DEFINE(wincachemiss
)
142 UBC_EVCNT_DEFINE(faultbusy
)
147 * init pager private data structures.
153 struct ubc_map
*umap
;
158 * Make sure ubc_winshift is sane.
160 if (ubc_winshift
< PAGE_SHIFT
)
161 ubc_winshift
= PAGE_SHIFT
;
165 * alloc and init ubc_map's.
166 * init inactive queues.
167 * alloc and init hashtable.
171 UVM_OBJ_INIT(&ubc_object
.uobj
, &ubc_pager
, UVM_OBJ_KERN
);
173 ubc_object
.umap
= kmem_zalloc(ubc_nwins
* sizeof(struct ubc_map
),
175 if (ubc_object
.umap
== NULL
)
176 panic("ubc_init: failed to allocate ubc_map");
178 if (ubc_winshift
< PAGE_SHIFT
) {
179 ubc_winshift
= PAGE_SHIFT
;
183 PMAP_PREFER(0, &va
, 0, 0); /* kernel is never topdown */
184 ubc_nqueues
= va
>> ubc_winshift
;
185 if (ubc_nqueues
== 0) {
189 ubc_winsize
= 1 << ubc_winshift
;
190 ubc_object
.inactive
= kmem_alloc(UBC_NQUEUES
*
191 sizeof(struct ubc_inactive_head
), KM_SLEEP
);
192 if (ubc_object
.inactive
== NULL
)
193 panic("ubc_init: failed to allocate inactive queue heads");
194 for (i
= 0; i
< UBC_NQUEUES
; i
++) {
195 TAILQ_INIT(&ubc_object
.inactive
[i
]);
197 for (i
= 0; i
< ubc_nwins
; i
++) {
198 umap
= &ubc_object
.umap
[i
];
199 TAILQ_INSERT_TAIL(&ubc_object
.inactive
[i
& (UBC_NQUEUES
- 1)],
203 ubc_object
.hash
= hashinit(ubc_nwins
, HASH_LIST
, true,
204 &ubc_object
.hashmask
);
205 for (i
= 0; i
<= ubc_object
.hashmask
; i
++) {
206 LIST_INIT(&ubc_object
.hash
[i
]);
209 if (uvm_map(kernel_map
, (vaddr_t
*)&ubc_object
.kva
,
210 ubc_nwins
<< ubc_winshift
, &ubc_object
.uobj
, 0, (vsize_t
)va
,
211 UVM_MAPFLAG(UVM_PROT_ALL
, UVM_PROT_ALL
, UVM_INH_NONE
,
212 UVM_ADV_RANDOM
, UVM_FLAG_NOMERGE
)) != 0) {
213 panic("ubc_init: failed to map ubc_object");
215 UVMHIST_INIT(ubchist
, 300);
219 * ubc_fault: fault routine for ubc mapping
223 ubc_fault(struct uvm_faultinfo
*ufi
, vaddr_t ign1
, struct vm_page
**ign2
,
224 int ign3
, int ign4
, vm_prot_t access_type
, int flags
)
226 struct uvm_object
*uobj
;
227 struct ubc_map
*umap
;
228 vaddr_t va
, eva
, ubc_offset
, slot_offset
;
229 int i
, error
, npages
;
230 struct vm_page
*pgs
[ubc_winsize
>> PAGE_SHIFT
], *pg
;
232 UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist
);
235 * no need to try with PGO_LOCKED...
236 * we don't need to have the map locked since we know that
237 * no one will mess with it until our reference is released.
240 if (flags
& PGO_LOCKED
) {
241 uvmfault_unlockall(ufi
, NULL
, &ubc_object
.uobj
, NULL
);
242 flags
&= ~PGO_LOCKED
;
245 va
= ufi
->orig_rvaddr
;
246 ubc_offset
= va
- (vaddr_t
)ubc_object
.kva
;
247 umap
= &ubc_object
.umap
[ubc_offset
>> ubc_winshift
];
248 KASSERT(umap
->refcount
!= 0);
249 KASSERT((umap
->flags
& UMAP_PAGES_LOCKED
) == 0);
250 slot_offset
= ubc_offset
& (ubc_winsize
- 1);
253 * some platforms cannot write to individual bytes atomically, so
254 * software has to do read/modify/write of larger quantities instead.
255 * this means that the access_type for "write" operations
256 * can be VM_PROT_READ, which confuses us mightily.
258 * deal with this by resetting access_type based on the info
259 * that ubc_alloc() stores for us.
262 access_type
= umap
->writelen
? VM_PROT_WRITE
: VM_PROT_READ
;
263 UVMHIST_LOG(ubchist
, "va 0x%lx ubc_offset 0x%lx access_type %d",
264 va
, ubc_offset
, access_type
, 0);
267 if ((access_type
& VM_PROT_WRITE
) != 0) {
268 if (slot_offset
< trunc_page(umap
->writeoff
) ||
269 umap
->writeoff
+ umap
->writelen
<= slot_offset
) {
270 panic("ubc_fault: out of range write");
275 /* no umap locking needed since we have a ref on the umap */
278 if ((access_type
& VM_PROT_WRITE
) == 0) {
279 npages
= (ubc_winsize
- slot_offset
) >> PAGE_SHIFT
;
281 npages
= (round_page(umap
->offset
+ umap
->writeoff
+
282 umap
->writelen
) - (umap
->offset
+ slot_offset
))
284 flags
|= PGO_PASTEOF
;
288 memset(pgs
, 0, sizeof (pgs
));
289 mutex_enter(&uobj
->vmobjlock
);
291 UVMHIST_LOG(ubchist
, "slot_offset 0x%x writeoff 0x%x writelen 0x%x ",
292 slot_offset
, umap
->writeoff
, umap
->writelen
, 0);
293 UVMHIST_LOG(ubchist
, "getpages uobj %p offset 0x%x npages %d",
294 uobj
, umap
->offset
+ slot_offset
, npages
, 0);
296 error
= (*uobj
->pgops
->pgo_get
)(uobj
, umap
->offset
+ slot_offset
, pgs
,
297 &npages
, 0, access_type
, umap
->advice
, flags
| PGO_NOBLOCKALLOC
|
299 UVMHIST_LOG(ubchist
, "getpages error %d npages %d", error
, npages
, 0,
302 if (error
== EAGAIN
) {
303 kpause("ubc_fault", false, hz
, NULL
);
310 va
= ufi
->orig_rvaddr
;
311 eva
= ufi
->orig_rvaddr
+ (npages
<< PAGE_SHIFT
);
313 UVMHIST_LOG(ubchist
, "va 0x%lx eva 0x%lx", va
, eva
, 0, 0);
314 for (i
= 0; va
< eva
; i
++, va
+= PAGE_SIZE
) {
319 * for virtually-indexed, virtually-tagged caches we should
320 * avoid creating writable mappings when we don't absolutely
321 * need them, since the "compatible alias" trick doesn't work
322 * on such caches. otherwise, we can always map the pages
326 #ifdef PMAP_CACHE_VIVT
327 prot
= VM_PROT_READ
| access_type
;
329 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
331 UVMHIST_LOG(ubchist
, "pgs[%d] = %p", i
, pgs
[i
], 0, 0);
334 if (pg
== NULL
|| pg
== PGO_DONTCARE
) {
339 mutex_enter(&uobj
->vmobjlock
);
340 if (pg
->flags
& PG_WANTED
) {
343 KASSERT((pg
->flags
& PG_FAKE
) == 0);
344 if (pg
->flags
& PG_RELEASED
) {
345 mutex_enter(&uvm_pageqlock
);
347 mutex_exit(&uvm_pageqlock
);
348 mutex_exit(&uobj
->vmobjlock
);
351 if (pg
->loan_count
!= 0) {
354 * avoid unneeded loan break if possible.
357 if ((access_type
& VM_PROT_WRITE
) == 0)
358 prot
&= ~VM_PROT_WRITE
;
360 if (prot
& VM_PROT_WRITE
) {
361 struct vm_page
*newpg
;
363 newpg
= uvm_loanbreak(pg
);
365 uvm_page_unbusy(&pg
, 1);
366 mutex_exit(&uobj
->vmobjlock
);
367 uvm_wait("ubc_loanbrk");
368 continue; /* will re-fault */
375 * note that a page whose backing store is partially allocated
376 * is marked as PG_RDONLY.
379 rdonly
= ((access_type
& VM_PROT_WRITE
) == 0 &&
380 (pg
->flags
& PG_RDONLY
) != 0) ||
381 UVM_OBJ_NEEDS_WRITEFAULT(uobj
);
382 KASSERT((pg
->flags
& PG_RDONLY
) == 0 ||
383 (access_type
& VM_PROT_WRITE
) == 0 ||
384 pg
->offset
< umap
->writeoff
||
385 pg
->offset
+ PAGE_SIZE
> umap
->writeoff
+ umap
->writelen
);
386 mask
= rdonly
? ~VM_PROT_WRITE
: VM_PROT_ALL
;
387 error
= pmap_enter(ufi
->orig_map
->pmap
, va
, VM_PAGE_TO_PHYS(pg
),
388 prot
& mask
, PMAP_CANFAIL
| (access_type
& mask
));
389 mutex_enter(&uvm_pageqlock
);
390 uvm_pageactivate(pg
);
391 mutex_exit(&uvm_pageqlock
);
392 pg
->flags
&= ~(PG_BUSY
|PG_WANTED
);
393 UVM_PAGE_OWN(pg
, NULL
);
394 mutex_exit(&uobj
->vmobjlock
);
396 UVMHIST_LOG(ubchist
, "pmap_enter fail %d",
398 uvm_wait("ubc_pmfail");
402 pmap_update(ufi
->orig_map
->pmap
);
410 static struct ubc_map
*
411 ubc_find_mapping(struct uvm_object
*uobj
, voff_t offset
)
413 struct ubc_map
*umap
;
415 LIST_FOREACH(umap
, &ubc_object
.hash
[UBC_HASH(uobj
, offset
)], hash
) {
416 if (umap
->uobj
== uobj
&& umap
->offset
== offset
) {
425 * ubc interface functions
429 * ubc_alloc: allocate a file mapping window
433 ubc_alloc(struct uvm_object
*uobj
, voff_t offset
, vsize_t
*lenp
, int advice
,
436 vaddr_t slot_offset
, va
;
437 struct ubc_map
*umap
;
440 UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist
);
442 UVMHIST_LOG(ubchist
, "uobj %p offset 0x%lx len 0x%lx",
443 uobj
, offset
, *lenp
, 0);
446 umap_offset
= (offset
& ~((voff_t
)ubc_winsize
- 1));
447 slot_offset
= (vaddr_t
)(offset
& ((voff_t
)ubc_winsize
- 1));
448 *lenp
= MIN(*lenp
, ubc_winsize
- slot_offset
);
451 * the object is always locked here, so we don't need to add a ref.
455 mutex_enter(&ubc_object
.uobj
.vmobjlock
);
456 umap
= ubc_find_mapping(uobj
, umap_offset
);
458 UBC_EVCNT_INCR(wincachemiss
);
459 umap
= TAILQ_FIRST(UBC_QUEUE(offset
));
461 mutex_exit(&ubc_object
.uobj
.vmobjlock
);
462 kpause("ubc_alloc", false, hz
, NULL
);
467 * remove from old hash (if any), add to new hash.
470 if (umap
->uobj
!= NULL
) {
471 LIST_REMOVE(umap
, hash
);
474 umap
->offset
= umap_offset
;
475 LIST_INSERT_HEAD(&ubc_object
.hash
[UBC_HASH(uobj
, umap_offset
)],
477 va
= UBC_UMAP_ADDR(umap
);
478 if (umap
->flags
& UMAP_MAPPING_CACHED
) {
479 umap
->flags
&= ~UMAP_MAPPING_CACHED
;
480 pmap_remove(pmap_kernel(), va
, va
+ ubc_winsize
);
481 pmap_update(pmap_kernel());
484 UBC_EVCNT_INCR(wincachehit
);
485 va
= UBC_UMAP_ADDR(umap
);
488 if (umap
->refcount
== 0) {
489 TAILQ_REMOVE(UBC_QUEUE(offset
), umap
, inactive
);
493 if ((flags
& UBC_WRITE
) && (umap
->writeoff
|| umap
->writelen
)) {
494 panic("ubc_alloc: concurrent writes uobj %p", uobj
);
497 if (flags
& UBC_WRITE
) {
498 umap
->writeoff
= slot_offset
;
499 umap
->writelen
= *lenp
;
503 umap
->advice
= advice
;
504 mutex_exit(&ubc_object
.uobj
.vmobjlock
);
505 UVMHIST_LOG(ubchist
, "umap %p refs %d va %p flags 0x%x",
506 umap
, umap
->refcount
, va
, flags
);
508 if (flags
& UBC_FAULTBUSY
) {
509 int npages
= (*lenp
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
510 struct vm_page
*pgs
[npages
];
512 PGO_SYNCIO
|PGO_OVERWRITE
|PGO_PASTEOF
|PGO_NOBLOCKALLOC
|
515 KDASSERT(flags
& UBC_WRITE
);
516 KASSERT(umap
->refcount
== 1);
518 UBC_EVCNT_INCR(faultbusy
);
519 if (umap
->flags
& UMAP_MAPPING_CACHED
) {
520 umap
->flags
&= ~UMAP_MAPPING_CACHED
;
521 pmap_remove(pmap_kernel(), va
, va
+ ubc_winsize
);
524 memset(pgs
, 0, sizeof(pgs
));
525 mutex_enter(&uobj
->vmobjlock
);
526 error
= (*uobj
->pgops
->pgo_get
)(uobj
, trunc_page(offset
), pgs
,
527 &npages
, 0, VM_PROT_READ
| VM_PROT_WRITE
, advice
, gpflags
);
528 UVMHIST_LOG(ubchist
, "faultbusy getpages %d", error
, 0, 0, 0);
532 for (i
= 0; i
< npages
; i
++) {
533 struct vm_page
*pg
= pgs
[i
];
535 KASSERT(pg
->uobject
== uobj
);
536 if (pg
->loan_count
!= 0) {
537 mutex_enter(&uobj
->vmobjlock
);
538 if (pg
->loan_count
!= 0) {
539 pg
= uvm_loanbreak(pg
);
541 mutex_exit(&uobj
->vmobjlock
);
543 pmap_kremove(va
, ubc_winsize
);
544 pmap_update(pmap_kernel());
545 mutex_enter(&uobj
->vmobjlock
);
546 uvm_page_unbusy(pgs
, npages
);
547 mutex_exit(&uobj
->vmobjlock
);
548 uvm_wait("ubc_alloc");
549 goto again_faultbusy
;
553 pmap_kenter_pa(va
+ slot_offset
+ (i
<< PAGE_SHIFT
),
555 VM_PROT_READ
| VM_PROT_WRITE
, 0);
557 pmap_update(pmap_kernel());
558 umap
->flags
|= UMAP_PAGES_LOCKED
;
560 KASSERT((umap
->flags
& UMAP_PAGES_LOCKED
) == 0);
564 return (void *)(va
+ slot_offset
);
568 * ubc_release: free a file mapping window.
572 ubc_release(void *va
, int flags
)
574 struct ubc_map
*umap
;
575 struct uvm_object
*uobj
;
578 UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist
);
580 UVMHIST_LOG(ubchist
, "va %p", va
, 0, 0, 0);
581 umap
= &ubc_object
.umap
[((char *)va
- ubc_object
.kva
) >> ubc_winshift
];
582 umapva
= UBC_UMAP_ADDR(umap
);
584 KASSERT(uobj
!= NULL
);
586 if (umap
->flags
& UMAP_PAGES_LOCKED
) {
587 int slot_offset
= umap
->writeoff
;
588 int endoff
= umap
->writeoff
+ umap
->writelen
;
589 int zerolen
= round_page(endoff
) - endoff
;
590 int npages
= (int)(round_page(umap
->writeoff
+ umap
->writelen
)
591 - trunc_page(umap
->writeoff
)) >> PAGE_SHIFT
;
592 struct vm_page
*pgs
[npages
];
597 KASSERT((umap
->flags
& UMAP_MAPPING_CACHED
) == 0);
599 memset((char *)umapva
+ endoff
, 0, zerolen
);
601 umap
->flags
&= ~UMAP_PAGES_LOCKED
;
602 mutex_enter(&uvm_pageqlock
);
603 for (i
= 0; i
< npages
; i
++) {
604 rv
= pmap_extract(pmap_kernel(),
605 umapva
+ slot_offset
+ (i
<< PAGE_SHIFT
), &pa
);
607 pgs
[i
] = PHYS_TO_VM_PAGE(pa
);
608 pgs
[i
]->flags
&= ~(PG_FAKE
|PG_CLEAN
);
609 KASSERT(pgs
[i
]->loan_count
== 0);
610 uvm_pageactivate(pgs
[i
]);
612 mutex_exit(&uvm_pageqlock
);
613 pmap_kremove(umapva
, ubc_winsize
);
614 pmap_update(pmap_kernel());
615 mutex_enter(&uobj
->vmobjlock
);
616 uvm_page_unbusy(pgs
, npages
);
617 mutex_exit(&uobj
->vmobjlock
);
623 mutex_enter(&ubc_object
.uobj
.vmobjlock
);
627 if (umap
->refcount
== 0) {
628 if (flags
& UBC_UNMAP
) {
631 * Invalidate any cached mappings if requested.
632 * This is typically used to avoid leaving
633 * incompatible cache aliases around indefinitely.
636 pmap_remove(pmap_kernel(), umapva
,
637 umapva
+ ubc_winsize
);
638 umap
->flags
&= ~UMAP_MAPPING_CACHED
;
639 pmap_update(pmap_kernel());
640 LIST_REMOVE(umap
, hash
);
642 TAILQ_INSERT_HEAD(UBC_QUEUE(umap
->offset
), umap
,
646 umap
->flags
|= UMAP_MAPPING_CACHED
;
648 TAILQ_INSERT_TAIL(UBC_QUEUE(umap
->offset
), umap
,
652 UVMHIST_LOG(ubchist
, "umap %p refs %d", umap
, umap
->refcount
, 0, 0);
653 mutex_exit(&ubc_object
.uobj
.vmobjlock
);
657 * ubc_uiomove: move data to/from an object.
661 ubc_uiomove(struct uvm_object
*uobj
, struct uio
*uio
, vsize_t todo
, int advice
,
665 const bool overwrite
= (flags
& UBC_FAULTBUSY
) != 0;
668 KASSERT(todo
<= uio
->uio_resid
);
669 KASSERT(((flags
& UBC_WRITE
) != 0 && uio
->uio_rw
== UIO_WRITE
) ||
670 ((flags
& UBC_READ
) != 0 && uio
->uio_rw
== UIO_READ
));
672 off
= uio
->uio_offset
;
675 vsize_t bytelen
= todo
;
678 win
= ubc_alloc(uobj
, off
, &bytelen
, advice
, flags
);
680 error
= uiomove(win
, bytelen
, uio
);
682 if (error
!= 0 && overwrite
) {
684 * if we haven't initialized the pages yet,
685 * do it now. it's safe to use memset here
686 * because we just mapped the pages above.
688 printf("%s: error=%d\n", __func__
, error
);
689 memset(win
, 0, bytelen
);
691 ubc_release(win
, flags
);
694 if (error
!= 0 && (flags
& UBC_PARTIALOK
) != 0) {
704 * uvm_vnp_zerorange: set a range of bytes in a file to zero.
708 uvm_vnp_zerorange(struct vnode
*vp
, off_t off
, size_t len
)
714 * XXXUBC invent kzero() and use it
718 vsize_t bytelen
= len
;
720 win
= ubc_alloc(&vp
->v_uobj
, off
, &bytelen
, UVM_ADV_NORMAL
,
722 memset(win
, 0, bytelen
);
723 flags
= UBC_WANT_UNMAP(vp
) ? UBC_UNMAP
: 0;
724 ubc_release(win
, flags
);