Remove building with NOCRYPTO option
[minix3.git] / minix / servers / vm / mem_cache.c
bloba702bb9fbc08e8659ec4d106b5da84ff78c97df2
2 /* This file implements the disk cache.
4 * If they exist anywhere, cached pages are always in a private
5 * VM datastructure.
7 * They might also be any combination of:
8 * - be mapped in by a filesystem for reading/writing by it
9 * - be mapped in by a process as the result of an mmap call (future)
11 * This file manages the datastructure of all cache blocks, and
12 * mapping them in and out of filesystems.
15 #include <assert.h>
16 #include <string.h>
18 #include <minix/hash.h>
20 #include <machine/vmparam.h>
22 #include "proto.h"
23 #include "vm.h"
24 #include "region.h"
25 #include "glo.h"
26 #include "cache.h"
28 static int cache_reference(struct phys_region *pr, struct phys_region *pr2);
29 static int cache_unreference(struct phys_region *pr);
30 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line);
31 static int cache_writable(struct phys_region *pr);
32 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l);
33 static int cache_lowshrink(struct vir_region *vr, vir_bytes len);
34 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
35 struct phys_region *ph, int write, vfs_callback_t cb, void *state,
36 int len, int *io);
37 static int cache_pt_flags(struct vir_region *vr);
39 struct mem_type mem_type_cache = {
40 .name = "cache memory",
41 .ev_reference = cache_reference,
42 .ev_unreference = cache_unreference,
43 .ev_resize = cache_resize,
44 .ev_lowshrink = cache_lowshrink,
45 .ev_sanitycheck = cache_sanitycheck,
46 .ev_pagefault = cache_pagefault,
47 .writable = cache_writable,
48 .pt_flags = cache_pt_flags,
51 static int cache_pt_flags(struct vir_region *vr){
52 #if defined(__arm__)
53 return ARM_VM_PTE_CACHED;
54 #else
55 return 0;
56 #endif
60 static int cache_reference(struct phys_region *pr, struct phys_region *pr2)
62 return OK;
65 static int cache_unreference(struct phys_region *pr)
67 return mem_type_anon.ev_unreference(pr);
70 static int cache_sanitycheck(struct phys_region *pr, const char *file, int line)
72 MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
73 return OK;
76 static int cache_writable(struct phys_region *pr)
78 /* Cache blocks are at the moment only used by filesystems so always writable. */
79 assert(pr->ph->refcount > 0);
80 return pr->ph->phys != MAP_NONE;
83 static int cache_resize(struct vmproc *vmp, struct vir_region *vr, vir_bytes l)
85 printf("VM: cannot resize cache blocks.\n");
86 return ENOMEM;
89 static int cache_lowshrink(struct vir_region *vr, vir_bytes len)
91 return OK;
94 int
95 do_mapcache(message *msg)
97 dev_t dev = msg->m_vmmcp.dev;
98 uint64_t dev_off = msg->m_vmmcp.dev_offset;
99 off_t ino_off = msg->m_vmmcp.ino_offset;
100 int n;
101 phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
102 phys_bytes alloc_bytes;
103 struct vir_region *vr;
104 struct vmproc *caller;
105 vir_bytes offset;
106 int io = 0;
108 if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
109 printf("VM: unaligned cache operation\n");
110 return EFAULT;
113 if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
114 caller = &vmproc[n];
116 if(bytes < VM_PAGE_SIZE) return EINVAL;
118 alloc_bytes = bytes;
119 #ifdef _MINIX_MAGIC
120 /* Make sure there is a 1-page hole available before the region,
121 * in case instrumentation needs to allocate in-band metadata later.
122 * This does effectively halve the usable part of the caller's address
123 * space, though, so only do this if we are instrumenting at all.
124 * Also make sure it falls within the mmap range, so that it is
125 * transferred upon live update. This again cuts the usable part of
126 * the address space for caching purposes in half.
128 alloc_bytes += VM_PAGE_SIZE;
129 #endif
130 if (!(vr = map_page_region(caller, VM_MMAPBASE, VM_MMAPTOP,
131 alloc_bytes, VR_ANON | VR_WRITABLE, 0, &mem_type_cache))) {
132 printf("VM: map_page_region failed\n");
133 return ENOMEM;
135 #ifdef _MINIX_MAGIC
136 map_unmap_region(caller, vr, 0, VM_PAGE_SIZE);
137 #endif
139 assert(vr->length == bytes);
141 for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
142 struct cached_page *hb;
144 assert(vr->length == bytes);
145 assert(offset < vr->length);
147 if(!(hb = find_cached_page_bydev(dev, dev_off + offset,
148 msg->m_vmmcp.ino, ino_off + offset, 1)) ||
149 (hb->flags & VMSF_ONCE)) {
150 map_unmap_region(caller, vr, 0, bytes);
151 return ENOENT;
154 assert(!vr->param.pb_cache);
155 vr->param.pb_cache = hb->page;
157 assert(vr->length == bytes);
158 assert(offset < vr->length);
160 if(map_pf(caller, vr, offset, 1, NULL, NULL, 0, &io) != OK) {
161 map_unmap_region(caller, vr, 0, bytes);
162 printf("VM: map_pf failed\n");
163 return ENOMEM;
165 assert(!vr->param.pb_cache);
168 memset(msg, 0, sizeof(*msg));
170 msg->m_vmmcp_reply.addr = (void *) vr->vaddr;
172 assert(vr);
174 #if CACHE_SANITY
175 cache_sanitycheck_internal();
176 #endif
178 return OK;
181 static int cache_pagefault(struct vmproc *vmp, struct vir_region *region,
182 struct phys_region *ph, int write, vfs_callback_t cb,
183 void *state, int len, int *io)
185 vir_bytes offset = ph->offset;
186 assert(ph->ph->phys == MAP_NONE);
187 assert(region->param.pb_cache);
188 pb_unreferenced(region, ph, 0);
189 pb_link(ph, region->param.pb_cache, offset, region);
190 region->param.pb_cache = NULL;
192 return OK;
196 do_setcache(message *msg)
198 int r;
199 dev_t dev = msg->m_vmmcp.dev;
200 uint64_t dev_off = msg->m_vmmcp.dev_offset;
201 off_t ino_off = msg->m_vmmcp.ino_offset;
202 int flags = msg->m_vmmcp.flags;
203 int n;
204 struct vmproc *caller;
205 phys_bytes offset;
206 phys_bytes bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
208 if(bytes < VM_PAGE_SIZE) return EINVAL;
210 if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
211 printf("VM: unaligned cache operation\n");
212 return EFAULT;
215 if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
216 caller = &vmproc[n];
218 for(offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
219 struct vir_region *region;
220 struct phys_region *phys_region = NULL;
221 vir_bytes v = (vir_bytes) msg->m_vmmcp.block + offset;
222 struct cached_page *hb;
224 if(!(region = map_lookup(caller, v, &phys_region))) {
225 printf("VM: error: no reasonable memory region given (offset 0x%lx, 0x%lx)\n", offset, v);
226 return EFAULT;
229 if(!phys_region) {
230 printf("VM: error: no available memory region given\n");
231 return EFAULT;
234 if((hb=find_cached_page_bydev(dev, dev_off + offset,
235 msg->m_vmmcp.ino, ino_off + offset, 1))) {
236 /* block inode info updated */
237 if(hb->page != phys_region->ph ||
238 (hb->flags & VMSF_ONCE)) {
239 /* previous cache entry has become
240 * obsolete; make a new one. rmcache
241 * removes it from the cache and frees
242 * the page if it isn't mapped in anywhere
243 * else.
245 rmcache(hb);
246 } else {
247 /* block was already there, inode info might've changed which is fine */
248 continue;
252 if(phys_region->memtype != &mem_type_anon &&
253 phys_region->memtype != &mem_type_anon_contig) {
254 printf("VM: error: no reasonable memory type\n");
255 return EFAULT;
258 if(phys_region->ph->refcount != 1) {
259 printf("VM: error: no reasonable refcount\n");
260 return EFAULT;
263 phys_region->memtype = &mem_type_cache;
265 if((r=addcache(dev, dev_off + offset, msg->m_vmmcp.ino,
266 ino_off + offset, flags, phys_region->ph)) != OK) {
267 printf("VM: addcache failed\n");
268 return r;
272 #if CACHE_SANITY
273 cache_sanitycheck_internal();
274 #endif
276 return OK;
280 * Forget all pages associated to a particular block in the cache.
283 do_forgetcache(message *msg)
285 struct cached_page *hb;
286 dev_t dev;
287 uint64_t dev_off;
288 phys_bytes bytes, offset;
290 dev = msg->m_vmmcp.dev;
291 dev_off = msg->m_vmmcp.dev_offset;
292 bytes = msg->m_vmmcp.pages * VM_PAGE_SIZE;
294 if (bytes < VM_PAGE_SIZE)
295 return EINVAL;
297 if (dev_off % PAGE_SIZE) {
298 printf("VM: unaligned cache operation\n");
299 return EFAULT;
302 for (offset = 0; offset < bytes; offset += VM_PAGE_SIZE) {
303 if ((hb = find_cached_page_bydev(dev, dev_off + offset,
304 VMC_NO_INODE, 0 /*ino_off*/, 0 /*touchlru*/)) != NULL)
305 rmcache(hb);
308 return OK;
312 * A file system wants to invalidate all pages belonging to a certain device.
315 do_clearcache(message *msg)
317 dev_t dev;
319 dev = msg->m_vmmcp.dev;
321 clear_cache_bydev(dev);
323 return OK;