Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / uvm / uvm_device.c
blobc9e426d1e784c9e9bb68e53e8514e931ad06d7d9
1 /* $NetBSD: uvm_device.c,v 1.55 2008/12/17 20:51:39 cegger Exp $ */
3 /*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Charles D. Cranor and
19 * Washington University.
20 * 4. The name of the author may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * from: Id: uvm_device.c,v 1.1.2.9 1998/02/06 05:11:47 chs Exp
38 * uvm_device.c: the device pager.
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.55 2008/12/17 20:51:39 cegger Exp $");
44 #include "opt_uvmhist.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/conf.h>
49 #include <sys/proc.h>
50 #include <sys/malloc.h>
51 #include <sys/vnode.h>
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_device.h>
57 * private global data structure
59 * we keep a list of active device objects in the system.
62 LIST_HEAD(udv_list_struct, uvm_device);
63 static struct udv_list_struct udv_list;
64 static kmutex_t udv_lock;
67 * functions
70 static void udv_init(void);
71 static void udv_reference(struct uvm_object *);
72 static void udv_detach(struct uvm_object *);
73 static int udv_fault(struct uvm_faultinfo *, vaddr_t,
74 struct vm_page **, int, int, vm_prot_t,
75 int);
78 * master pager structure
81 const struct uvm_pagerops uvm_deviceops = {
82 .pgo_init = udv_init,
83 .pgo_reference = udv_reference,
84 .pgo_detach = udv_detach,
85 .pgo_fault = udv_fault,
89 * the ops!
93 * udv_init
95 * init pager private data structures.
98 static void
99 udv_init(void)
101 LIST_INIT(&udv_list);
102 mutex_init(&udv_lock, MUTEX_DEFAULT, IPL_NONE);
106 * udv_attach
108 * get a VM object that is associated with a device. allocate a new
109 * one if needed.
111 * => caller must _not_ already be holding the lock on the uvm_object.
112 * => in fact, nothing should be locked so that we can sleep here.
115 struct uvm_object *
116 udv_attach(void *arg, vm_prot_t accessprot,
117 voff_t off, /* used only for access check */
118 vsize_t size /* used only for access check */)
120 dev_t device = *((dev_t *)arg);
121 struct uvm_device *udv, *lcv;
122 const struct cdevsw *cdev;
123 dev_type_mmap((*mapfn));
125 UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
127 UVMHIST_LOG(maphist, "(device=0x%x)", device,0,0,0);
130 * before we do anything, ensure this device supports mmap
133 cdev = cdevsw_lookup(device);
134 if (cdev == NULL) {
135 return (NULL);
137 mapfn = cdev->d_mmap;
138 if (mapfn == NULL || mapfn == nommap || mapfn == nullmmap) {
139 return(NULL);
143 * Negative offsets on the object are not allowed.
146 if ((cdev->d_flag & D_NEGOFFSAFE) == 0 &&
147 off != UVM_UNKNOWN_OFFSET && off < 0)
148 return(NULL);
151 * Check that the specified range of the device allows the
152 * desired protection.
154 * XXX assumes VM_PROT_* == PROT_*
155 * XXX clobbers off and size, but nothing else here needs them.
158 while (size != 0) {
159 if (cdev_mmap(device, off, accessprot) == -1) {
160 return (NULL);
162 off += PAGE_SIZE; size -= PAGE_SIZE;
166 * keep looping until we get it
169 for (;;) {
172 * first, attempt to find it on the main list
175 mutex_enter(&udv_lock);
176 LIST_FOREACH(lcv, &udv_list, u_list) {
177 if (device == lcv->u_device)
178 break;
182 * got it on main list. put a hold on it and unlock udv_lock.
185 if (lcv) {
188 * if someone else has a hold on it, sleep and start
189 * over again.
192 if (lcv->u_flags & UVM_DEVICE_HOLD) {
193 lcv->u_flags |= UVM_DEVICE_WANTED;
194 UVM_UNLOCK_AND_WAIT(lcv, &udv_lock, false,
195 "udv_attach",0);
196 continue;
199 /* we are now holding it */
200 lcv->u_flags |= UVM_DEVICE_HOLD;
201 mutex_exit(&udv_lock);
204 * bump reference count, unhold, return.
207 mutex_enter(&lcv->u_obj.vmobjlock);
208 lcv->u_obj.uo_refs++;
209 mutex_exit(&lcv->u_obj.vmobjlock);
211 mutex_enter(&udv_lock);
212 if (lcv->u_flags & UVM_DEVICE_WANTED)
213 wakeup(lcv);
214 lcv->u_flags &= ~(UVM_DEVICE_WANTED|UVM_DEVICE_HOLD);
215 mutex_exit(&udv_lock);
216 return(&lcv->u_obj);
220 * did not find it on main list. need to malloc a new one.
223 mutex_exit(&udv_lock);
224 /* NOTE: we could sleep in the following malloc() */
225 udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
226 mutex_enter(&udv_lock);
229 * now we have to double check to make sure no one added it
230 * to the list while we were sleeping...
233 LIST_FOREACH(lcv, &udv_list, u_list) {
234 if (device == lcv->u_device)
235 break;
239 * did we lose a race to someone else?
240 * free our memory and retry.
243 if (lcv) {
244 mutex_exit(&udv_lock);
245 free(udv, M_TEMP);
246 continue;
250 * we have it! init the data structures, add to list
251 * and return.
254 UVM_OBJ_INIT(&udv->u_obj, &uvm_deviceops, 1);
255 udv->u_flags = 0;
256 udv->u_device = device;
257 LIST_INSERT_HEAD(&udv_list, udv, u_list);
258 mutex_exit(&udv_lock);
259 return(&udv->u_obj);
261 /*NOTREACHED*/
265 * udv_reference
267 * add a reference to a VM object. Note that the reference count must
268 * already be one (the passed in reference) so there is no chance of the
269 * udv being released or locked out here.
271 * => caller must call with object unlocked.
274 static void
275 udv_reference(struct uvm_object *uobj)
277 UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
279 mutex_enter(&uobj->vmobjlock);
280 uobj->uo_refs++;
281 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)",
282 uobj, uobj->uo_refs,0,0);
283 mutex_exit(&uobj->vmobjlock);
287 * udv_detach
289 * remove a reference to a VM object.
291 * => caller must call with object unlocked and map locked.
294 static void
295 udv_detach(struct uvm_object *uobj)
297 struct uvm_device *udv = (struct uvm_device *)uobj;
298 UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
301 * loop until done
303 again:
304 mutex_enter(&uobj->vmobjlock);
305 if (uobj->uo_refs > 1) {
306 uobj->uo_refs--;
307 mutex_exit(&uobj->vmobjlock);
308 UVMHIST_LOG(maphist," <- done, uobj=0x%x, ref=%d",
309 uobj,uobj->uo_refs,0,0);
310 return;
314 * is it being held? if so, wait until others are done.
317 mutex_enter(&udv_lock);
318 if (udv->u_flags & UVM_DEVICE_HOLD) {
319 udv->u_flags |= UVM_DEVICE_WANTED;
320 mutex_exit(&uobj->vmobjlock);
321 UVM_UNLOCK_AND_WAIT(udv, &udv_lock, false, "udv_detach",0);
322 goto again;
326 * got it! nuke it now.
329 LIST_REMOVE(udv, u_list);
330 if (udv->u_flags & UVM_DEVICE_WANTED)
331 wakeup(udv);
332 mutex_exit(&udv_lock);
333 mutex_exit(&uobj->vmobjlock);
334 UVM_OBJ_DESTROY(uobj);
335 free(udv, M_TEMP);
336 UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
340 * udv_fault: non-standard fault routine for device "pages"
342 * => rather than having a "get" function, we have a fault routine
343 * since we don't return vm_pages we need full control over the
344 * pmap_enter map in
345 * => all the usual fault data structured are locked by the caller
346 * (i.e. maps(read), amap (if any), uobj)
347 * => on return, we unlock all fault data structures
348 * => flags: PGO_ALLPAGES: get all of the pages
349 * PGO_LOCKED: fault data structures are locked
350 * XXX: currently PGO_LOCKED is always required ... consider removing
351 * it as a flag
352 * => NOTE: vaddr is the VA of pps[0] in ufi->entry, _NOT_ pps[centeridx]
355 static int
356 udv_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, struct vm_page **pps,
357 int npages, int centeridx, vm_prot_t access_type,
358 int flags)
360 struct vm_map_entry *entry = ufi->entry;
361 struct uvm_object *uobj = entry->object.uvm_obj;
362 struct uvm_device *udv = (struct uvm_device *)uobj;
363 vaddr_t curr_va;
364 off_t curr_offset;
365 paddr_t paddr, mdpgno;
366 int lcv, retval;
367 dev_t device;
368 vm_prot_t mapprot;
369 UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
370 UVMHIST_LOG(maphist," flags=%d", flags,0,0,0);
373 * we do not allow device mappings to be mapped copy-on-write
374 * so we kill any attempt to do so here.
377 if (UVM_ET_ISCOPYONWRITE(entry)) {
378 UVMHIST_LOG(maphist, "<- failed -- COW entry (etype=0x%x)",
379 entry->etype, 0,0,0);
380 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
381 return(EIO);
385 * get device map function.
388 device = udv->u_device;
389 if (cdevsw_lookup(device) == NULL) {
390 /* XXX This should not happen */
391 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
392 return (EIO);
396 * now we must determine the offset in udv to use and the VA to
397 * use for pmap_enter. note that we always use orig_map's pmap
398 * for pmap_enter (even if we have a submap). since virtual
399 * addresses in a submap must match the main map, this is ok.
402 /* udv offset = (offset from start of entry) + entry's offset */
403 curr_offset = entry->offset + (vaddr - entry->start);
404 /* pmap va = vaddr (virtual address of pps[0]) */
405 curr_va = vaddr;
408 * loop over the page range entering in as needed
411 retval = 0;
412 for (lcv = 0 ; lcv < npages ; lcv++, curr_offset += PAGE_SIZE,
413 curr_va += PAGE_SIZE) {
414 if ((flags & PGO_ALLPAGES) == 0 && lcv != centeridx)
415 continue;
417 if (pps[lcv] == PGO_DONTCARE)
418 continue;
420 mdpgno = cdev_mmap(device, curr_offset, access_type);
421 if (mdpgno == -1) {
422 retval = EIO;
423 break;
425 paddr = pmap_phys_address(mdpgno);
426 mapprot = ufi->entry->protection;
427 UVMHIST_LOG(maphist,
428 " MAPPING: device: pm=0x%x, va=0x%x, pa=0x%lx, at=%d",
429 ufi->orig_map->pmap, curr_va, paddr, mapprot);
430 if (pmap_enter(ufi->orig_map->pmap, curr_va, paddr,
431 mapprot, PMAP_CANFAIL | mapprot) != 0) {
433 * pmap_enter() didn't have the resource to
434 * enter this mapping. Unlock everything,
435 * wait for the pagedaemon to free up some
436 * pages, and then tell uvm_fault() to start
437 * the fault again.
439 * XXX Needs some rethinking for the PGO_ALLPAGES
440 * XXX case.
442 pmap_update(ufi->orig_map->pmap); /* sync what we have so far */
443 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
444 uobj, NULL);
445 uvm_wait("udv_fault");
446 return (ERESTART);
450 pmap_update(ufi->orig_map->pmap);
451 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
452 return (retval);