fix a warning fredf pointed out on irc
[newos.git] / kernel / vm / vm_store_device.c
blobecd52ef9a1f376021b15a46041ac521d41f7781c
1 /*
2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
4 */
5 #include <kernel/kernel.h>
6 #include <kernel/vm.h>
7 #include <kernel/vm_priv.h>
8 #include <kernel/heap.h>
9 #include <kernel/debug.h>
10 #include <kernel/lock.h>
11 #include <kernel/vm_store_device.h>
12 #include <newos/errors.h>
14 struct device_store_data {
15 addr_t base_addr;
18 static void device_destroy(struct vm_store *store)
20 if(store) {
21 VERIFY_VM_STORE(store);
22 kfree(store);
26 static off_t device_commit(struct vm_store *store, off_t size)
28 VERIFY_VM_STORE(store);
29 store->committed_size = size;
30 return size;
33 static int device_has_page(struct vm_store *store, off_t offset)
35 VERIFY_VM_STORE(store);
36 // this should never be called
37 return 0;
40 static ssize_t device_read(struct vm_store *store, off_t offset, iovecs *vecs)
42 VERIFY_VM_STORE(store);
43 panic("device_store: read called. Invalid!\n");
44 return ERR_UNIMPLEMENTED;
47 static ssize_t device_write(struct vm_store *store, off_t offset, iovecs *vecs)
49 VERIFY_VM_STORE(store);
50 // no place to write, this will cause the page daemon to skip this store
51 return 0;
54 // this fault handler should take over the page fault routine and map the page in
56 // setup: the cache that this store is part of has a ref being held and will be
57 // released after this handler is done
58 static int device_fault(struct vm_store *store, struct vm_address_space *aspace, off_t offset)
60 struct device_store_data *d = (struct device_store_data *)store->data;
61 vm_cache_ref *cache_ref = store->cache->ref;
62 vm_region *region;
64 VERIFY_VM_STORE(store);
65 VERIFY_VM_CACHE(store->cache);
66 VERIFY_VM_CACHE_REF(store->cache->ref);
67 VERIFY_VM_ASPACE(aspace);
69 // dprintf("device_fault: offset 0x%x 0x%x + base_addr 0x%x\n", offset, d->base_addr);
71 // figure out which page needs to be mapped where
72 mutex_lock(&cache_ref->lock);
73 (*aspace->translation_map.ops->lock)(&aspace->translation_map);
75 // cycle through all of the regions that map this cache and map the page in
76 for(region = cache_ref->region_list; region != NULL; region = region->cache_next) {
78 VERIFY_VM_REGION(region);
80 // make sure this page in the cache that was faulted on is covered in this region
81 if(offset >= region->cache_offset && (offset - region->cache_offset) < region->size) {
82 // dprintf("device_fault: mapping paddr 0x%x to vaddr 0x%x\n",
83 // (addr_t)(d->base_addr + offset),
84 // (addr_t)(region->base + (offset - region->cache_offset)));
85 (*aspace->translation_map.ops->map)(&aspace->translation_map,
86 region->base + (offset - region->cache_offset),
87 d->base_addr + offset, region->lock);
91 (*aspace->translation_map.ops->unlock)(&aspace->translation_map);
92 mutex_unlock(&cache_ref->lock);
94 // dprintf("device_fault: done\n");
96 return 0;
99 static vm_store_ops device_ops = {
100 &device_destroy,
101 &device_commit,
102 &device_has_page,
103 &device_read,
104 &device_write,
105 &device_fault,
106 NULL,
107 NULL
110 vm_store *vm_store_create_device(addr_t base_addr)
112 vm_store *store;
113 struct device_store_data *d;
115 store = kmalloc(sizeof(vm_store) + sizeof(struct device_store_data));
116 if(store == NULL)
117 return NULL;
119 store->magic = VM_STORE_MAGIC;
120 store->ops = &device_ops;
121 store->cache = NULL;
122 store->data = (void *)((addr_t)store + sizeof(vm_store));
123 store->committed_size = 0;
125 d = (struct device_store_data *)store->data;
126 d->base_addr = base_addr;
128 return store;