No empty .Rs/.Re
[netbsd-mini2440.git] / sys / uvm / uvm_object.c
blob5b2a9fa042208d826e331f45afc774c495139756
1 /* $NetBSD: uvm_object.c,v 1.6 2008/04/28 20:24:12 martin Exp $ */
3 /*
4 * Copyright (c) 2006 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * uvm_object.c: operate with memory objects
35 * TODO:
36 * 1. Support PG_RELEASED-using objects
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.6 2008/04/28 20:24:12 martin Exp $");
43 #include "opt_ddb.h"
44 #include "opt_uvmhist.h"
46 #include <sys/param.h>
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_ddb.h>
51 /* We will fetch this page count per step */
52 #define FETCH_PAGECOUNT 16
55 * uobj_wirepages: wire the pages of entire uobj
57 * => NOTE: this function should only be used for types of objects
58 * where PG_RELEASED flag is never set (aobj objects)
59 * => caller must pass page-aligned start and end values
62 int
63 uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end)
65 int i, npages, error;
66 struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
67 off_t offset = start, left;
69 left = (end - start) >> PAGE_SHIFT;
71 mutex_enter(&uobj->vmobjlock);
72 while (left) {
74 npages = MIN(FETCH_PAGECOUNT, left);
76 /* Get the pages */
77 memset(pgs, 0, sizeof(pgs));
78 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
79 VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
80 PGO_ALLPAGES | PGO_SYNCIO);
82 if (error)
83 goto error;
85 mutex_enter(&uobj->vmobjlock);
86 for (i = 0; i < npages; i++) {
88 KASSERT(pgs[i] != NULL);
89 KASSERT(!(pgs[i]->flags & PG_RELEASED));
92 * Loan break
94 if (pgs[i]->loan_count) {
95 while (pgs[i]->loan_count) {
96 pg = uvm_loanbreak(pgs[i]);
97 if (!pg) {
98 mutex_exit(&uobj->vmobjlock);
99 uvm_wait("uobjwirepg");
100 mutex_enter(&uobj->vmobjlock);
101 continue;
104 pgs[i] = pg;
107 if (pgs[i]->pqflags & PQ_AOBJ) {
108 pgs[i]->flags &= ~(PG_CLEAN);
109 uao_dropswap(uobj, i);
113 /* Wire the pages */
114 mutex_enter(&uvm_pageqlock);
115 for (i = 0; i < npages; i++) {
116 uvm_pagewire(pgs[i]);
118 mutex_exit(&uvm_pageqlock);
120 /* Unbusy the pages */
121 uvm_page_unbusy(pgs, npages);
123 left -= npages;
124 offset += npages << PAGE_SHIFT;
126 mutex_exit(&uobj->vmobjlock);
128 return 0;
130 error:
131 /* Unwire the pages which has been wired */
132 uobj_unwirepages(uobj, start, offset);
134 return error;
138 * uobj_unwirepages: unwire the pages of entire uobj
140 * => NOTE: this function should only be used for types of objects
141 * where PG_RELEASED flag is never set
142 * => caller must pass page-aligned start and end values
145 void
146 uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
148 struct vm_page *pg;
149 off_t offset;
151 mutex_enter(&uobj->vmobjlock);
152 mutex_enter(&uvm_pageqlock);
153 for (offset = start; offset < end; offset += PAGE_SIZE) {
154 pg = uvm_pagelookup(uobj, offset);
156 KASSERT(pg != NULL);
157 KASSERT(!(pg->flags & PG_RELEASED));
159 uvm_pageunwire(pg);
161 mutex_exit(&uvm_pageqlock);
162 mutex_exit(&uobj->vmobjlock);
165 #if defined(DDB) || defined(DEBUGPRINT)
168 * uvm_object_printit: actually prints the object
171 void
172 uvm_object_printit(struct uvm_object *uobj, bool full,
173 void (*pr)(const char *, ...))
175 struct vm_page *pg;
176 int cnt = 0;
178 (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
179 uobj, mutex_owned(&uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
180 if (UVM_OBJ_IS_KERN_OBJECT(uobj))
181 (*pr)("refs=<SYSTEM>\n");
182 else
183 (*pr)("refs=%d\n", uobj->uo_refs);
185 if (!full) {
186 return;
188 (*pr)(" PAGES <pg,offset>:\n ");
189 TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
190 cnt++;
191 (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
192 if ((cnt % 3) == 0) {
193 (*pr)("\n ");
196 if ((cnt % 3) != 0) {
197 (*pr)("\n");
201 #endif /* DDB || DEBUGPRINT */