1 /* $NetBSD: uvm_loan.c,v 1.73 2008/12/03 14:46:24 pooka Exp $ */
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
38 * uvm_loan.c: page loanout handler
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.73 2008/12/03 14:46:24 pooka Exp $");
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
48 #include <sys/malloc.h>
54 * "loaned" pages are pages which are (read-only, copy-on-write) loaned
55 * from the VM system to other parts of the kernel. this allows page
56 * copying to be avoided (e.g. you can loan pages from objs/anons to
59 * there are 3 types of loans possible:
60 * O->K uvm_object page to wired kernel page (e.g. mbuf data area)
61 * A->K anon page to wired kernel page (e.g. mbuf data area)
62 * O->A uvm_object to anon loan (e.g. vnode page to an anon)
63 * note that it possible to have an O page loaned to both an A and K
66 * loans are tracked by pg->loan_count. an O->A page will have both
67 * a uvm_object and a vm_anon, but PQ_ANON will not be set. this sort
68 * of page is considered "owned" by the uvm_object (not the anon).
70 * each loan of a page to the kernel bumps the pg->wire_count. the
71 * kernel mappings for these pages will be read-only and wired. since
72 * the page will also be wired, it will not be a candidate for pageout,
73 * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a
74 * write fault in the kernel to one of these pages will not cause
75 * copy-on-write. instead, the page fault is considered fatal. this
76 * is because the kernel mapping will have no way to look up the
77 * object/anon which the page is owned by. this is a good side-effect,
78 * since a kernel write to a loaned page is an error.
80 * owners that want to free their pages and discover that they are
81 * loaned out simply "disown" them (the page becomes an orphan). these
82 * pages should be freed when the last loan is dropped. in some cases
83 * an anon may "adopt" an orphaned page.
85 * locking: to read pg->loan_count either the owner or the page queues
86 * must be locked. to modify pg->loan_count, both the owner of the page
87 * and the PQs must be locked. pg->flags is (as always) locked by
88 * the owner of the page.
90 * note that locking from the "loaned" side is tricky since the object
91 * getting the loaned page has no reference to the page's owner and thus
92 * the owner could "die" at any time. in order to prevent the owner
93 * from dying the page queues should be locked. this forces us to sometimes
96 * loans are typically broken by the following events:
97 * 1. user-level xwrite fault to a loaned page
98 * 2. pageout of clean+inactive O->A loaned page
99 * 3. owner frees page (e.g. pager flush)
101 * note that loaning a page causes all mappings of the page to become
102 * read-only (via pmap_page_protect). this could have an unexpected
103 * effect on normal "wired" pages if one is not careful (XXX).
110 static int uvm_loananon(struct uvm_faultinfo
*, void ***,
111 int, struct vm_anon
*);
112 static int uvm_loanuobj(struct uvm_faultinfo
*, void ***,
114 static int uvm_loanzero(struct uvm_faultinfo
*, void ***, int);
115 static void uvm_unloananon(struct vm_anon
**, int);
116 static void uvm_unloanpage(struct vm_page
**, int);
117 static int uvm_loanpage(struct vm_page
**, int);
125 * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
127 * => "ufi" is the result of a successful map lookup (meaning that
128 * on entry the map is locked by the caller)
129 * => we may unlock and then relock the map if needed (for I/O)
130 * => we put our output result in "output"
131 * => we always return with the map unlocked
132 * => possible return values:
133 * -1 == error, map is unlocked
134 * 0 == map relock error (try again!), map is unlocked
135 * >0 == number of pages we loaned, map is unlocked
137 * NOTE: We can live with this being an inline, because it is only called
142 uvm_loanentry(struct uvm_faultinfo
*ufi
, void ***output
, int flags
)
144 vaddr_t curaddr
= ufi
->orig_rvaddr
;
145 vsize_t togo
= ufi
->size
;
146 struct vm_aref
*aref
= &ufi
->entry
->aref
;
147 struct uvm_object
*uobj
= ufi
->entry
->object
.uvm_obj
;
148 struct vm_anon
*anon
;
151 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
154 * lock us the rest of the way down (we unlock before return)
157 amap_lock(aref
->ar_amap
);
165 * find the page we want. check the anon layer first.
169 anon
= amap_lookup(aref
, curaddr
- ufi
->entry
->start
);
174 /* locked: map, amap, uobj */
176 rv
= uvm_loananon(ufi
, output
, flags
, anon
);
178 rv
= uvm_loanuobj(ufi
, output
, flags
, curaddr
);
179 } else if (UVM_ET_ISCOPYONWRITE(ufi
->entry
)) {
180 rv
= uvm_loanzero(ufi
, output
, flags
);
182 uvmfault_unlockall(ufi
, aref
->ar_amap
, uobj
, NULL
);
185 /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */
186 KASSERT(rv
> 0 || aref
->ar_amap
== NULL
||
187 !mutex_owned(&aref
->ar_amap
->am_l
));
188 KASSERT(rv
> 0 || uobj
== NULL
||
189 !mutex_owned(&uobj
->vmobjlock
));
193 UVMHIST_LOG(loanhist
, "failure %d", rv
, 0,0,0);
197 /* relock failed, need to do another lookup */
199 UVMHIST_LOG(loanhist
, "relock failure %d", result
205 * got it... advance to next page
210 curaddr
+= PAGE_SIZE
;
214 * unlock what we locked, unlock the maps and return
218 amap_unlock(aref
->ar_amap
);
219 uvmfault_unlockmaps(ufi
, false);
220 UVMHIST_LOG(loanhist
, "done %d", result
, 0,0,0);
229 * uvm_loan: loan pages in a map out to anons or to the kernel
231 * => map should be unlocked
232 * => start and len should be multiples of PAGE_SIZE
233 * => result is either an array of anon's or vm_pages (depending on flags)
234 * => flag values: UVM_LOAN_TOANON - loan to anons
235 * UVM_LOAN_TOPAGE - loan to wired kernel page
236 * one and only one of these flags must be set!
237 * => returns 0 (success), or an appropriate error number
241 uvm_loan(struct vm_map
*map
, vaddr_t start
, vsize_t len
, void *v
, int flags
)
243 struct uvm_faultinfo ufi
;
244 void **result
, **output
;
247 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
250 * ensure that one and only one of the flags is set
253 KASSERT(((flags
& UVM_LOAN_TOANON
) == 0) ^
254 ((flags
& UVM_LOAN_TOPAGE
) == 0));
255 KASSERT((map
->flags
& VM_MAP_INTRSAFE
) == 0);
258 * "output" is a pointer to the current place to put the loaned page.
262 output
= &result
[0]; /* start at the beginning ... */
265 * while we've got pages to do
271 * fill in params for a call to uvmfault_lookup
275 ufi
.orig_rvaddr
= start
;
279 * do the lookup, the only time this will fail is if we hit on
280 * an unmapped region (an error)
283 if (!uvmfault_lookup(&ufi
, false)) {
289 * map now locked. now do the loanout...
292 rv
= uvm_loanentry(&ufi
, &output
, flags
);
294 /* all unlocked due to error */
300 * done! the map is unlocked. advance, if possible.
302 * XXXCDC: could be recoded to hold the map lock with
303 * smarter code (but it only happens on map entry
304 * boundaries, so it isn't that bad).
313 UVMHIST_LOG(loanhist
, "success", 0,0,0,0);
318 * failed to complete loans. drop any loans and return failure code.
319 * map is already unlocked.
322 if (output
- result
) {
323 if (flags
& UVM_LOAN_TOANON
) {
324 uvm_unloananon((struct vm_anon
**)result
,
327 uvm_unloanpage((struct vm_page
**)result
,
331 UVMHIST_LOG(loanhist
, "error %d", error
,0,0,0);
336 * uvm_loananon: loan a page from an anon out
338 * => called with map, amap, uobj locked
340 * -1 = fatal error, everything is unlocked, abort.
341 * 0 = lookup in ufi went stale, everything unlocked, relookup and
343 * 1 = got it, everything still locked
347 uvm_loananon(struct uvm_faultinfo
*ufi
, void ***output
, int flags
,
348 struct vm_anon
*anon
)
353 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
356 * if we are loaning to "another" anon then it is easy, we just
357 * bump the reference count on the current anon and return a
358 * pointer to it (it becomes copy-on-write shared).
361 if (flags
& UVM_LOAN_TOANON
) {
362 mutex_enter(&anon
->an_lock
);
364 if (pg
&& (pg
->pqflags
& PQ_ANON
) != 0 && anon
->an_ref
== 1) {
365 if (pg
->wire_count
> 0) {
366 UVMHIST_LOG(loanhist
, "->A wired %p", pg
,0,0,0);
367 uvmfault_unlockall(ufi
,
368 ufi
->entry
->aref
.ar_amap
,
369 ufi
->entry
->object
.uvm_obj
, anon
);
372 pmap_page_protect(pg
, VM_PROT_READ
);
377 mutex_exit(&anon
->an_lock
);
378 UVMHIST_LOG(loanhist
, "->A done", 0,0,0,0);
383 * we are loaning to a kernel-page. we need to get the page
384 * resident so we can wire it. uvmfault_anonget will handle
388 mutex_enter(&anon
->an_lock
);
389 error
= uvmfault_anonget(ufi
, ufi
->entry
->aref
.ar_amap
, anon
);
392 * if we were unable to get the anon, then uvmfault_anonget has
393 * unlocked everything and returned an error code.
397 UVMHIST_LOG(loanhist
, "error %d", error
,0,0,0);
399 /* need to refault (i.e. refresh our lookup) ? */
400 if (error
== ERESTART
) {
404 /* "try again"? sleep a bit and retry ... */
405 if (error
== EAGAIN
) {
406 kpause("loanagain", false, hz
/2, NULL
);
410 /* otherwise flag it as an error */
415 * we have the page and its owner locked: do the loan now.
419 mutex_enter(&uvm_pageqlock
);
420 if (pg
->wire_count
> 0) {
421 mutex_exit(&uvm_pageqlock
);
422 UVMHIST_LOG(loanhist
, "->K wired %p", pg
,0,0,0);
423 KASSERT(pg
->uobject
== NULL
);
424 uvmfault_unlockall(ufi
, ufi
->entry
->aref
.ar_amap
,
428 if (pg
->loan_count
== 0) {
429 pmap_page_protect(pg
, VM_PROT_READ
);
432 uvm_pageactivate(pg
);
433 mutex_exit(&uvm_pageqlock
);
437 /* unlock anon and return success */
439 mutex_exit(&pg
->uobject
->vmobjlock
);
440 mutex_exit(&anon
->an_lock
);
441 UVMHIST_LOG(loanhist
, "->K done", 0,0,0,0);
446 * uvm_loanpage: loan out pages to kernel (->K)
448 * => pages should be object-owned and the object should be locked.
449 * => in the case of error, the object might be unlocked and relocked.
450 * => caller should busy the pages beforehand.
451 * => pages will be unbusied.
452 * => fail with EBUSY if meet a wired page.
455 uvm_loanpage(struct vm_page
**pgpp
, int npages
)
460 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
462 for (i
= 0; i
< npages
; i
++) {
463 struct vm_page
*pg
= pgpp
[i
];
465 KASSERT(pg
->uobject
!= NULL
);
466 KASSERT(pg
->uobject
== pgpp
[0]->uobject
);
467 KASSERT(!(pg
->flags
& (PG_RELEASED
|PG_PAGEOUT
)));
468 KASSERT(mutex_owned(&pg
->uobject
->vmobjlock
));
469 KASSERT(pg
->flags
& PG_BUSY
);
471 mutex_enter(&uvm_pageqlock
);
472 if (pg
->wire_count
> 0) {
473 mutex_exit(&uvm_pageqlock
);
474 UVMHIST_LOG(loanhist
, "wired %p", pg
,0,0,0);
478 if (pg
->loan_count
== 0) {
479 pmap_page_protect(pg
, VM_PROT_READ
);
482 uvm_pageactivate(pg
);
483 mutex_exit(&uvm_pageqlock
);
486 uvm_page_unbusy(pgpp
, npages
);
490 * backout what we've done
492 kmutex_t
*slock
= &pgpp
[0]->uobject
->vmobjlock
;
495 uvm_unloan(pgpp
, i
, UVM_LOAN_TOPAGE
);
499 UVMHIST_LOG(loanhist
, "done %d", error
,0,0,0);
505 * number of pages to get at once.
506 * should be <= MAX_READ_AHEAD in genfs_vnops.c
508 #define UVM_LOAN_GET_CHUNK 16
511 * uvm_loanuobjpages: loan pages from a uobj out (O->K)
513 * => uobj shouldn't be locked. (we'll lock it)
514 * => fail with EBUSY if we meet a wired page.
517 uvm_loanuobjpages(struct uvm_object
*uobj
, voff_t pgoff
, int orignpages
,
518 struct vm_page
**origpgpp
)
520 int ndone
; /* # of pages loaned out */
521 struct vm_page
**pgpp
;
527 for (ndone
= 0; ndone
< orignpages
; ) {
529 /* npendloan: # of pages busied but not loand out yet. */
530 int npendloan
= 0xdead; /* XXX gcc */
532 npages
= MIN(UVM_LOAN_GET_CHUNK
, orignpages
- ndone
);
533 mutex_enter(&uobj
->vmobjlock
);
534 error
= (*uobj
->pgops
->pgo_get
)(uobj
,
535 pgoff
+ (ndone
<< PAGE_SHIFT
), pgpp
, &npages
, 0,
536 VM_PROT_READ
, 0, PGO_SYNCIO
);
537 if (error
== EAGAIN
) {
538 kpause("loanuopg", false, hz
/2, NULL
);
546 /* loan and unbusy pages */
548 for (i
= 0; i
< npages
; i
++) {
549 kmutex_t
*nextslock
; /* slock for next page */
550 struct vm_page
*pg
= *pgpp
;
552 /* XXX assuming that the page is owned by uobj */
553 KASSERT(pg
->uobject
!= NULL
);
554 nextslock
= &pg
->uobject
->vmobjlock
;
556 if (slock
!= nextslock
) {
558 KASSERT(npendloan
> 0);
559 error
= uvm_loanpage(pgpp
- npendloan
,
565 KASSERT(origpgpp
+ ndone
== pgpp
);
572 if ((pg
->flags
& PG_RELEASED
) != 0) {
574 * release pages and try again.
577 for (; i
< npages
; i
++) {
579 slock
= &pg
->uobject
->vmobjlock
;
582 mutex_enter(&uvm_pageqlock
);
583 uvm_page_unbusy(&pg
, 1);
584 mutex_exit(&uvm_pageqlock
);
592 KASSERT(origpgpp
+ ndone
+ npendloan
== pgpp
);
594 KASSERT(slock
!= NULL
);
595 KASSERT(npendloan
> 0);
596 error
= uvm_loanpage(pgpp
- npendloan
, npendloan
);
601 KASSERT(origpgpp
+ ndone
== pgpp
);
607 uvm_unloan(origpgpp
, ndone
, UVM_LOAN_TOPAGE
);
613 * uvm_loanuobj: loan a page from a uobj out
615 * => called with map, amap, uobj locked
617 * -1 = fatal error, everything is unlocked, abort.
618 * 0 = lookup in ufi went stale, everything unlocked, relookup and
620 * 1 = got it, everything still locked
624 uvm_loanuobj(struct uvm_faultinfo
*ufi
, void ***output
, int flags
, vaddr_t va
)
626 struct vm_amap
*amap
= ufi
->entry
->aref
.ar_amap
;
627 struct uvm_object
*uobj
= ufi
->entry
->object
.uvm_obj
;
629 struct vm_anon
*anon
;
633 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
636 * first we must make sure the page is resident.
638 * XXXCDC: duplicate code with uvm_fault().
641 mutex_enter(&uobj
->vmobjlock
);
642 if (uobj
->pgops
->pgo_get
) { /* try locked pgo_get */
645 error
= (*uobj
->pgops
->pgo_get
)(uobj
,
646 va
- ufi
->entry
->start
+ ufi
->entry
->offset
,
647 &pg
, &npages
, 0, VM_PROT_READ
, MADV_NORMAL
, PGO_LOCKED
);
649 error
= EIO
; /* must have pgo_get op */
653 * check the result of the locked pgo_get. if there is a problem,
654 * then we fail the loan.
657 if (error
&& error
!= EBUSY
) {
658 uvmfault_unlockall(ufi
, amap
, uobj
, NULL
);
663 * if we need to unlock for I/O, do so now.
666 if (error
== EBUSY
) {
667 uvmfault_unlockall(ufi
, amap
, NULL
, NULL
);
671 error
= (*uobj
->pgops
->pgo_get
)(uobj
,
672 va
- ufi
->entry
->start
+ ufi
->entry
->offset
,
673 &pg
, &npages
, 0, VM_PROT_READ
, MADV_NORMAL
, PGO_SYNCIO
);
674 /* locked: <nothing> */
677 if (error
== EAGAIN
) {
678 kpause("fltagain2", false, hz
/2, NULL
);
685 * pgo_get was a success. attempt to relock everything.
688 locked
= uvmfault_relock(ufi
);
692 mutex_enter(&uobj
->vmobjlock
);
695 * verify that the page has not be released and re-verify
696 * that amap slot is still free. if there is a problem we
697 * drop our lock (thus force a lookup refresh/retry).
700 if ((pg
->flags
& PG_RELEASED
) != 0 ||
701 (locked
&& amap
&& amap_lookup(&ufi
->entry
->aref
,
702 ufi
->orig_rvaddr
- ufi
->entry
->start
))) {
704 uvmfault_unlockall(ufi
, amap
, NULL
, NULL
);
709 * didn't get the lock? release the page and retry.
712 if (locked
== false) {
713 if (pg
->flags
& PG_WANTED
) {
716 if (pg
->flags
& PG_RELEASED
) {
717 mutex_enter(&uvm_pageqlock
);
719 mutex_exit(&uvm_pageqlock
);
720 mutex_exit(&uobj
->vmobjlock
);
723 mutex_enter(&uvm_pageqlock
);
724 uvm_pageactivate(pg
);
725 mutex_exit(&uvm_pageqlock
);
726 pg
->flags
&= ~(PG_BUSY
|PG_WANTED
);
727 UVM_PAGE_OWN(pg
, NULL
);
728 mutex_exit(&uobj
->vmobjlock
);
733 KASSERT(uobj
== pg
->uobject
);
736 * at this point we have the page we want ("pg") marked PG_BUSY for us
737 * and we have all data structures locked. do the loanout. page can
738 * not be PG_RELEASED (we caught this above).
741 if ((flags
& UVM_LOAN_TOANON
) == 0) {
742 if (uvm_loanpage(&pg
, 1)) {
743 uvmfault_unlockall(ufi
, amap
, uobj
, NULL
);
746 mutex_exit(&uobj
->vmobjlock
);
753 * must be a loan to an anon. check to see if there is already
754 * an anon associated with this page. if so, then just return
755 * a reference to this object. the page should already be
756 * mapped read-only because it is already on loan.
761 mutex_enter(&anon
->an_lock
);
763 mutex_exit(&anon
->an_lock
);
764 if (pg
->flags
& PG_WANTED
) {
767 pg
->flags
&= ~(PG_WANTED
|PG_BUSY
);
768 UVM_PAGE_OWN(pg
, NULL
);
769 mutex_exit(&uobj
->vmobjlock
);
776 * need to allocate a new anon
779 anon
= uvm_analloc();
785 mutex_enter(&uvm_pageqlock
);
786 if (pg
->wire_count
> 0) {
787 mutex_exit(&uvm_pageqlock
);
788 UVMHIST_LOG(loanhist
, "wired %p", pg
,0,0,0);
790 anon
->an_page
= NULL
;
792 mutex_exit(&anon
->an_lock
);
796 if (pg
->loan_count
== 0) {
797 pmap_page_protect(pg
, VM_PROT_READ
);
800 uvm_pageactivate(pg
);
801 mutex_exit(&uvm_pageqlock
);
802 if (pg
->flags
& PG_WANTED
) {
805 pg
->flags
&= ~(PG_WANTED
|PG_BUSY
);
806 UVM_PAGE_OWN(pg
, NULL
);
807 mutex_exit(&uobj
->vmobjlock
);
808 mutex_exit(&anon
->an_lock
);
814 UVMHIST_LOG(loanhist
, "fail", 0,0,0,0);
816 * unlock everything and bail out.
818 if (pg
->flags
& PG_WANTED
) {
821 pg
->flags
&= ~(PG_WANTED
|PG_BUSY
);
822 UVM_PAGE_OWN(pg
, NULL
);
823 uvmfault_unlockall(ufi
, amap
, uobj
, NULL
);
828 * uvm_loanzero: loan a zero-fill page out
830 * => called with map, amap, uobj locked
832 * -1 = fatal error, everything is unlocked, abort.
833 * 0 = lookup in ufi went stale, everything unlocked, relookup and
835 * 1 = got it, everything still locked
838 static struct uvm_object uvm_loanzero_object
;
841 uvm_loanzero(struct uvm_faultinfo
*ufi
, void ***output
, int flags
)
843 struct vm_anon
*anon
;
845 struct vm_amap
*amap
= ufi
->entry
->aref
.ar_amap
;
847 UVMHIST_FUNC(__func__
); UVMHIST_CALLED(loanhist
);
849 mutex_enter(&uvm_loanzero_object
.vmobjlock
);
852 * first, get ahold of our single zero page.
855 if (__predict_false((pg
=
856 TAILQ_FIRST(&uvm_loanzero_object
.memq
)) == NULL
)) {
857 while ((pg
= uvm_pagealloc(&uvm_loanzero_object
, 0, NULL
,
858 UVM_PGA_ZERO
)) == NULL
) {
859 mutex_exit(&uvm_loanzero_object
.vmobjlock
);
860 uvmfault_unlockall(ufi
, amap
, NULL
, NULL
);
861 uvm_wait("loanzero");
862 if (!uvmfault_relock(ufi
)) {
871 /* got a zero'd page. */
872 pg
->flags
&= ~(PG_WANTED
|PG_BUSY
|PG_FAKE
);
873 pg
->flags
|= PG_RDONLY
;
874 mutex_enter(&uvm_pageqlock
);
875 uvm_pageactivate(pg
);
876 mutex_exit(&uvm_pageqlock
);
877 UVM_PAGE_OWN(pg
, NULL
);
880 if ((flags
& UVM_LOAN_TOANON
) == 0) { /* loaning to kernel-page */
881 mutex_enter(&uvm_pageqlock
);
883 mutex_exit(&uvm_pageqlock
);
884 mutex_exit(&uvm_loanzero_object
.vmobjlock
);
891 * loaning to an anon. check to see if there is already an anon
892 * associated with this page. if so, then just return a reference
898 mutex_enter(&anon
->an_lock
);
900 mutex_exit(&anon
->an_lock
);
901 mutex_exit(&uvm_loanzero_object
.vmobjlock
);
908 * need to allocate a new anon
911 anon
= uvm_analloc();
913 /* out of swap causes us to fail */
914 mutex_exit(&uvm_loanzero_object
.vmobjlock
);
915 uvmfault_unlockall(ufi
, amap
, NULL
, NULL
);
920 mutex_enter(&uvm_pageqlock
);
922 uvm_pageactivate(pg
);
923 mutex_exit(&uvm_pageqlock
);
924 mutex_exit(&anon
->an_lock
);
925 mutex_exit(&uvm_loanzero_object
.vmobjlock
);
933 * uvm_unloananon: kill loans on anons (basically a normal ref drop)
935 * => we expect all our resources to be unlocked
939 uvm_unloananon(struct vm_anon
**aloans
, int nanons
)
941 struct vm_anon
*anon
;
943 while (nanons
-- > 0) {
947 mutex_enter(&anon
->an_lock
);
948 refs
= --anon
->an_ref
;
949 mutex_exit(&anon
->an_lock
);
958 * uvm_unloanpage: kill loans on pages loaned out to the kernel
960 * => we expect all our resources to be unlocked
964 uvm_unloanpage(struct vm_page
**ploans
, int npages
)
969 mutex_enter(&uvm_pageqlock
);
970 while (npages
-- > 0) {
974 * do a little dance to acquire the object or anon lock
975 * as appropriate. we are locking in the wrong order,
976 * so we have to do a try-lock here.
980 while (pg
->uobject
!= NULL
|| pg
->uanon
!= NULL
) {
981 if (pg
->uobject
!= NULL
) {
982 slock
= &pg
->uobject
->vmobjlock
;
984 slock
= &pg
->uanon
->an_lock
;
986 if (mutex_tryenter(slock
)) {
989 mutex_exit(&uvm_pageqlock
);
990 /* XXX Better than yielding but inadequate. */
991 kpause("livelock", false, 1, NULL
);
992 mutex_enter(&uvm_pageqlock
);
997 * drop our loan. if page is owned by an anon but
998 * PQ_ANON is not set, the page was loaned to the anon
999 * from an object which dropped ownership, so resolve
1000 * this by turning the anon's loan into real ownership
1001 * (ie. decrement loan_count again and set PQ_ANON).
1002 * after all this, if there are no loans left, put the
1003 * page back a paging queue (if the page is owned by
1004 * an anon) or free it (if the page is now unowned).
1007 KASSERT(pg
->loan_count
> 0);
1009 if (pg
->uobject
== NULL
&& pg
->uanon
!= NULL
&&
1010 (pg
->pqflags
& PQ_ANON
) == 0) {
1011 KASSERT(pg
->loan_count
> 0);
1013 pg
->pqflags
|= PQ_ANON
;
1015 if (pg
->loan_count
== 0 && pg
->uobject
== NULL
&&
1016 pg
->uanon
== NULL
) {
1017 KASSERT((pg
->flags
& PG_BUSY
) == 0);
1020 if (slock
!= NULL
) {
1024 mutex_exit(&uvm_pageqlock
);
1028 * uvm_unloan: kill loans on pages or anons.
1032 uvm_unloan(void *v
, int npages
, int flags
)
1034 if (flags
& UVM_LOAN_TOANON
) {
1035 uvm_unloananon(v
, npages
);
1037 uvm_unloanpage(v
, npages
);
1042 * Minimal pager for uvm_loanzero_object. We need to provide a "put"
1043 * method, because the page can end up on a paging queue, and the
1044 * page daemon will want to call pgo_put when it encounters the page
1045 * on the inactive list.
1049 ulz_put(struct uvm_object
*uobj
, voff_t start
, voff_t stop
, int flags
)
1053 KDASSERT(uobj
== &uvm_loanzero_object
);
1056 * Don't need to do any work here if we're not freeing pages.
1059 if ((flags
& PGO_FREE
) == 0) {
1060 mutex_exit(&uobj
->vmobjlock
);
1065 * we don't actually want to ever free the uvm_loanzero_page, so
1066 * just reactivate or dequeue it.
1069 pg
= TAILQ_FIRST(&uobj
->memq
);
1070 KASSERT(pg
!= NULL
);
1071 KASSERT(TAILQ_NEXT(pg
, listq
.queue
) == NULL
);
1073 mutex_enter(&uvm_pageqlock
);
1075 uvm_pageactivate(pg
);
1077 uvm_pagedequeue(pg
);
1078 mutex_exit(&uvm_pageqlock
);
1080 mutex_exit(&uobj
->vmobjlock
);
1084 static const struct uvm_pagerops ulz_pager
= {
1089 * uvm_loan_init(): initialize the uvm_loan() facility.
1096 UVM_OBJ_INIT(&uvm_loanzero_object
, &ulz_pager
, 0);
1098 UVMHIST_INIT(loanhist
, 300);
1102 * uvm_loanbreak: break loan on a uobj page
1104 * => called with uobj locked
1105 * => the page should be busy
1107 * newly allocated page if succeeded
1110 uvm_loanbreak(struct vm_page
*uobjpage
)
1114 struct uvm_object
*uobj
= uobjpage
->uobject
;
1117 KASSERT(uobj
!= NULL
);
1118 KASSERT(mutex_owned(&uobj
->vmobjlock
));
1119 KASSERT(uobjpage
->flags
& PG_BUSY
);
1121 /* alloc new un-owned page */
1122 pg
= uvm_pagealloc(NULL
, 0, NULL
, 0);
1127 * copy the data from the old page to the new
1128 * one and clear the fake flags on the new page (keep it busy).
1129 * force a reload of the old page by clearing it from all
1131 * transfer dirtiness of the old page to the new page.
1132 * then lock the page queues to rename the pages.
1135 uvm_pagecopy(uobjpage
, pg
); /* old -> new */
1136 pg
->flags
&= ~PG_FAKE
;
1137 pmap_page_protect(uobjpage
, VM_PROT_NONE
);
1138 if ((uobjpage
->flags
& PG_CLEAN
) != 0 && !pmap_clear_modify(uobjpage
)) {
1139 pmap_clear_modify(pg
);
1140 pg
->flags
|= PG_CLEAN
;
1142 /* uvm_pagecopy marked it dirty */
1143 KASSERT((pg
->flags
& PG_CLEAN
) == 0);
1144 /* a object with a dirty page should be dirty. */
1145 KASSERT(!UVM_OBJ_IS_CLEAN(uobj
));
1147 if (uobjpage
->flags
& PG_WANTED
)
1149 /* uobj still locked */
1150 uobjpage
->flags
&= ~(PG_WANTED
|PG_BUSY
);
1151 UVM_PAGE_OWN(uobjpage
, NULL
);
1153 mutex_enter(&uvm_pageqlock
);
1156 * replace uobjpage with new page.
1159 uvm_pagereplace(uobjpage
, pg
);
1162 * if the page is no longer referenced by
1163 * an anon (i.e. we are breaking an O->K
1164 * loan), then remove it from any pageq's.
1166 if (uobjpage
->uanon
== NULL
)
1167 uvm_pagedequeue(uobjpage
);
1170 * at this point we have absolutely no
1171 * control over uobjpage
1174 /* install new page */
1175 uvm_pageactivate(pg
);
1176 mutex_exit(&uvm_pageqlock
);
1179 * done! loan is broken and "pg" is
1180 * PG_BUSY. it can now replace uobjpage.