Remove building with NOCRYPTO option
[minix3.git] / minix / servers / vm / mem_file.c
blobdba3f514a79e65dada68315d899ba9dd0a0a507b
2 /* This file implements the methods of memory-mapped files. */
4 #include <assert.h>
6 #include "proto.h"
7 #include "vm.h"
8 #include "region.h"
9 #include "glo.h"
10 #include "cache.h"
12 /* These functions are static so as to not pollute the
13 * global namespace, and are accessed through their function
14 * pointers.
17 static void mappedfile_split(struct vmproc *vmp, struct vir_region *vr,
18 struct vir_region *r1, struct vir_region *r2);
19 static int mappedfile_unreference(struct phys_region *pr);
20 static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region,
21 struct phys_region *ph, int write, vfs_callback_t callback, void *state,
22 int len, int *io);
23 static int mappedfile_sanitycheck(struct phys_region *pr, const char *file, int line);
24 static int mappedfile_writable(struct phys_region *pr);
25 static int mappedfile_copy(struct vir_region *vr, struct vir_region *newvr);
26 static int mappedfile_lowshrink(struct vir_region *vr, vir_bytes len);
27 static void mappedfile_delete(struct vir_region *region);
28 static int mappedfile_pt_flags(struct vir_region *vr);
30 struct mem_type mem_type_mappedfile = {
31 .name = "file-mapped memory",
32 .ev_unreference = mappedfile_unreference,
33 .ev_pagefault = mappedfile_pagefault,
34 .ev_sanitycheck = mappedfile_sanitycheck,
35 .ev_copy = mappedfile_copy,
36 .writable = mappedfile_writable,
37 .ev_split = mappedfile_split,
38 .ev_lowshrink = mappedfile_lowshrink,
39 .ev_delete = mappedfile_delete,
40 .pt_flags = mappedfile_pt_flags,
43 static int mappedfile_pt_flags(struct vir_region *vr){
44 #if defined(__arm__)
45 return ARM_VM_PTE_CACHED;
46 #else
47 return 0;
48 #endif
51 static int mappedfile_unreference(struct phys_region *pr)
53 assert(pr->ph->refcount == 0);
54 if(pr->ph->phys != MAP_NONE)
55 free_mem(ABS2CLICK(pr->ph->phys), 1);
56 return OK;
59 static int cow_block(struct vmproc *vmp, struct vir_region *region,
60 struct phys_region *ph, u16_t clearend)
62 int r;
64 if((r=mem_cow(region, ph, MAP_NONE, MAP_NONE)) != OK) {
65 printf("mappedfile_pagefault: COW failed\n");
66 return r;
69 /* After COW we are a normal piece of anonymous memory. */
70 ph->memtype = &mem_type_anon;
72 if(clearend) {
73 phys_bytes phaddr = ph->ph->phys, po = VM_PAGE_SIZE-clearend;
74 assert(clearend < VM_PAGE_SIZE);
75 phaddr += po;
76 if(sys_memset(NONE, 0, phaddr, clearend) != OK) {
77 panic("cow_block: clearend failed\n");
81 return OK;
84 static int mappedfile_pagefault(struct vmproc *vmp, struct vir_region *region,
85 struct phys_region *ph, int write, vfs_callback_t cb,
86 void *state, int statelen, int *io)
88 u32_t allocflags;
89 int procfd = region->param.file.fdref->fd;
91 allocflags = vrallocflags(region->flags);
93 assert(ph->ph->refcount > 0);
94 assert(region->param.file.inited);
95 assert(region->param.file.fdref);
96 assert(region->param.file.fdref->dev != NO_DEV);
98 /* Totally new block? Create it. */
99 if(ph->ph->phys == MAP_NONE) {
100 struct cached_page *cp;
101 u64_t referenced_offset =
102 region->param.file.offset + ph->offset;
103 if(region->param.file.fdref->ino == VMC_NO_INODE) {
104 cp = find_cached_page_bydev(region->param.file.fdref->dev,
105 referenced_offset, VMC_NO_INODE, 0, 1);
106 } else {
107 cp = find_cached_page_byino(region->param.file.fdref->dev,
108 region->param.file.fdref->ino, referenced_offset, 1);
111 * Normally, a cache hit saves a round-trip to the file system
112 * to load the page. However, if the page in the VM cache is
113 * marked for one-time use, then force a round-trip through the
114 * file system anyway, so that the FS can update the page by
115 * by readding it to the cache. Thus, for one-time use pages,
116 * no caching is performed. This approach is correct even in
117 * the light of concurrent requests and disappearing processes
118 * but relies on VM requests to VFS being fully serialized.
120 if(cp && (!cb || !(cp->flags & VMSF_ONCE))) {
121 int result = OK;
122 pb_unreferenced(region, ph, 0);
123 pb_link(ph, cp->page, ph->offset, region);
125 if(roundup(ph->offset+region->param.file.clearend,
126 VM_PAGE_SIZE) >= region->length) {
127 result = cow_block(vmp, region, ph,
128 region->param.file.clearend);
129 } else if(result == OK && write) {
130 result = cow_block(vmp, region, ph, 0);
133 /* Discard one-use pages after mapping them in. */
134 if (result == OK && (cp->flags & VMSF_ONCE))
135 rmcache(cp);
137 return result;
140 if(!cb) {
141 #if 0
142 printf("VM: mem_file: no callback, returning EFAULT\n");
143 sys_diagctl_stacktrace(vmp->vm_endpoint);
144 #endif
145 return EFAULT;
148 if(vfs_request(VMVFSREQ_FDIO, procfd, vmp, referenced_offset,
149 VM_PAGE_SIZE, cb, NULL, state, statelen) != OK) {
150 printf("VM: mappedfile_pagefault: vfs_request failed\n");
151 return ENOMEM;
153 *io = 1;
154 return SUSPEND;
157 if(!write) {
158 #if 0
159 printf("mappedfile_pagefault: nonwrite fault?\n");
160 #endif
161 return OK;
164 return cow_block(vmp, region, ph, 0);
167 static int mappedfile_sanitycheck(struct phys_region *pr, const char *file, int line)
169 MYASSERT(usedpages_add(pr->ph->phys, VM_PAGE_SIZE) == OK);
170 return OK;
173 static int mappedfile_writable(struct phys_region *pr)
175 /* We are never writable. */
176 return 0;
179 int mappedfile_copy(struct vir_region *vr, struct vir_region *newvr)
181 assert(vr->param.file.inited);
182 mappedfile_setfile(newvr->parent, newvr, vr->param.file.fdref->fd,
183 vr->param.file.offset,
184 vr->param.file.fdref->dev, vr->param.file.fdref->ino,
185 vr->param.file.clearend, 0, 0);
186 assert(newvr->param.file.inited);
188 return OK;
191 int mappedfile_setfile(struct vmproc *owner,
192 struct vir_region *region, int fd, u64_t offset,
193 dev_t dev, ino_t ino, u16_t clearend, int prefill, int mayclosefd)
195 vir_bytes vaddr;
196 struct fdref *newref;
198 newref = fdref_dedup_or_new(owner, ino, dev, fd, mayclosefd);
200 assert(newref);
201 assert(!region->param.file.inited);
202 assert(dev != NO_DEV);
203 fdref_ref(newref, region);
204 region->param.file.offset = offset;
205 region->param.file.clearend = clearend;
206 region->param.file.inited = 1;
208 if(!prefill) return OK;
210 for(vaddr = 0; vaddr < region->length; vaddr+=VM_PAGE_SIZE) {
211 struct cached_page *cp = NULL;
212 struct phys_region *pr;
213 u64_t referenced_offset = offset + vaddr;
215 if(roundup(vaddr+region->param.file.clearend,
216 VM_PAGE_SIZE) >= region->length) {
217 break;
220 if(ino == VMC_NO_INODE) {
221 cp = find_cached_page_bydev(dev, referenced_offset,
222 VMC_NO_INODE, 0, 1);
223 } else {
224 cp = find_cached_page_byino(dev, ino,
225 referenced_offset, 1);
228 * If we get a hit for a page that is to be used only once,
229 * then either we found a stale page (due to a process dying
230 * before a requested once-page could be mapped in) or this is
231 * a rare case of concurrent requests for the same page. In
232 * both cases, force the page to be obtained from its FS later.
234 if(!cp || (cp->flags & VMSF_ONCE)) continue;
235 if(!(pr = pb_reference(cp->page, vaddr, region,
236 &mem_type_mappedfile))) {
237 printf("mappedfile_setfile: pb_reference failed\n");
238 break;
240 if(map_ph_writept(region->parent, region, pr) != OK) {
241 printf("mappedfile_setfile: map_ph_writept failed\n");
242 break;
246 return OK;
249 static void mappedfile_split(struct vmproc *vmp, struct vir_region *vr,
250 struct vir_region *r1, struct vir_region *r2)
252 assert(!r1->param.file.inited);
253 assert(!r2->param.file.inited);
254 assert(vr->param.file.inited);
255 assert(r1->length + r2->length == vr->length);
256 assert(vr->def_memtype == &mem_type_mappedfile);
257 assert(r1->def_memtype == &mem_type_mappedfile);
258 assert(r2->def_memtype == &mem_type_mappedfile);
260 r1->param.file = vr->param.file;
261 r2->param.file = vr->param.file;
263 fdref_ref(vr->param.file.fdref, r1);
264 fdref_ref(vr->param.file.fdref, r2);
266 r1->param.file.clearend = 0;
267 r2->param.file.offset += r1->length;
269 assert(r1->param.file.inited);
270 assert(r2->param.file.inited);
273 static int mappedfile_lowshrink(struct vir_region *vr, vir_bytes len)
275 assert(vr->param.file.inited);
276 vr->param.file.offset += len;
277 return OK;
280 static void mappedfile_delete(struct vir_region *region)
282 assert(region->def_memtype == &mem_type_mappedfile);
283 assert(region->param.file.inited);
284 assert(region->param.file.fdref);
285 fdref_deref(region);
286 region->param.file.inited = 0;