2 ** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
3 ** Distributed under the terms of the NewOS License.
5 #include <kernel/kernel.h>
7 #include <kernel/vm_priv.h>
8 #include <kernel/heap.h>
9 #include <kernel/debug.h>
10 #include <kernel/lock.h>
11 #include <kernel/vm_store_device.h>
12 #include <newos/errors.h>
14 struct device_store_data
{
18 static void device_destroy(struct vm_store
*store
)
21 VERIFY_VM_STORE(store
);
26 static off_t
device_commit(struct vm_store
*store
, off_t size
)
28 VERIFY_VM_STORE(store
);
29 store
->committed_size
= size
;
33 static int device_has_page(struct vm_store
*store
, off_t offset
)
35 VERIFY_VM_STORE(store
);
36 // this should never be called
40 static ssize_t
device_read(struct vm_store
*store
, off_t offset
, iovecs
*vecs
)
42 VERIFY_VM_STORE(store
);
43 panic("device_store: read called. Invalid!\n");
44 return ERR_UNIMPLEMENTED
;
47 static ssize_t
device_write(struct vm_store
*store
, off_t offset
, iovecs
*vecs
)
49 VERIFY_VM_STORE(store
);
50 // no place to write, this will cause the page daemon to skip this store
54 // this fault handler should take over the page fault routine and map the page in
56 // setup: the cache that this store is part of has a ref being held and will be
57 // released after this handler is done
58 static int device_fault(struct vm_store
*store
, struct vm_address_space
*aspace
, off_t offset
)
60 struct device_store_data
*d
= (struct device_store_data
*)store
->data
;
61 vm_cache_ref
*cache_ref
= store
->cache
->ref
;
64 VERIFY_VM_STORE(store
);
65 VERIFY_VM_CACHE(store
->cache
);
66 VERIFY_VM_CACHE_REF(store
->cache
->ref
);
67 VERIFY_VM_ASPACE(aspace
);
69 // dprintf("device_fault: offset 0x%x 0x%x + base_addr 0x%x\n", offset, d->base_addr);
71 // figure out which page needs to be mapped where
72 mutex_lock(&cache_ref
->lock
);
73 (*aspace
->translation_map
.ops
->lock
)(&aspace
->translation_map
);
75 // cycle through all of the regions that map this cache and map the page in
76 for(region
= cache_ref
->region_list
; region
!= NULL
; region
= region
->cache_next
) {
78 VERIFY_VM_REGION(region
);
80 // make sure this page in the cache that was faulted on is covered in this region
81 if(offset
>= region
->cache_offset
&& (offset
- region
->cache_offset
) < region
->size
) {
82 // dprintf("device_fault: mapping paddr 0x%x to vaddr 0x%x\n",
83 // (addr_t)(d->base_addr + offset),
84 // (addr_t)(region->base + (offset - region->cache_offset)));
85 (*aspace
->translation_map
.ops
->map
)(&aspace
->translation_map
,
86 region
->base
+ (offset
- region
->cache_offset
),
87 d
->base_addr
+ offset
, region
->lock
);
91 (*aspace
->translation_map
.ops
->unlock
)(&aspace
->translation_map
);
92 mutex_unlock(&cache_ref
->lock
);
94 // dprintf("device_fault: done\n");
99 static vm_store_ops device_ops
= {
110 vm_store
*vm_store_create_device(addr_t base_addr
)
113 struct device_store_data
*d
;
115 store
= kmalloc(sizeof(vm_store
) + sizeof(struct device_store_data
));
119 store
->magic
= VM_STORE_MAGIC
;
120 store
->ops
= &device_ops
;
122 store
->data
= (void *)((addr_t
)store
+ sizeof(vm_store
));
123 store
->committed_size
= 0;
125 d
= (struct device_store_data
*)store
->data
;
126 d
->base_addr
= base_addr
;