4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/bitmap.h>
17 #include <minix/vfsif.h>
19 #include <machine/vmparam.h>
40 endpoint_t caller
; /* KERNEL or process? if NONE, no callback */
41 endpoint_t requestor
; /* on behalf of whom? */
42 int transid
; /* VFS transaction id if valid */
43 struct vmproc
*vmp
; /* target address space */
44 vir_bytes mem
, len
; /* memory range */
45 int wrflag
; /* must it be writable or not */
46 int valid
; /* sanity check */
47 int vfs_avail
; /* may vfs be called to satisfy this range? */
51 static void handle_memory_continue(struct vmproc
*vmp
, message
*m
,
52 void *arg
, void *statearg
);
53 static int handle_memory_step(struct hm_state
*hmstate
, int retry
);
54 static void handle_memory_final(struct hm_state
*state
, int result
);
56 /*===========================================================================*
58 *===========================================================================*/
59 char *pf_errstr(u32_t err
)
63 snprintf(buf
, sizeof(buf
), "err 0x%lx ", (long)err
);
64 if(PFERR_NOPAGE(err
)) strcat(buf
, "nopage ");
65 if(PFERR_PROT(err
)) strcat(buf
, "protection ");
66 if(PFERR_WRITE(err
)) strcat(buf
, "write");
67 if(PFERR_READ(err
)) strcat(buf
, "read");
72 static void pf_cont(struct vmproc
*vmp
, message
*m
, void *arg
, void *statearg
);
74 static void handle_memory_continue(struct vmproc
*vmp
, message
*m
, void *arg
, void *statearg
);
76 static void handle_pagefault(endpoint_t ep
, vir_bytes addr
, u32_t err
, int retry
)
80 struct vir_region
*region
;
82 int p
, wr
= PFERR_WRITE(err
);
85 if(vm_isokendpt(ep
, &p
) != OK
)
86 panic("handle_pagefault: endpoint wrong: %d", ep
);
89 assert(vmp
->vm_flags
& VMF_INUSE
);
91 /* See if address is valid at all. */
92 if(!(region
= map_lookup(vmp
, addr
, NULL
))) {
94 printf("VM: pagefault: SIGSEGV %d protected addr 0x%lx; %s\n",
95 ep
, addr
, pf_errstr(err
));
97 assert(PFERR_NOPAGE(err
));
98 printf("VM: pagefault: SIGSEGV %d bad addr 0x%lx; %s\n",
99 ep
, addr
, pf_errstr(err
));
100 sys_diagctl_stacktrace(ep
);
102 if((s
=sys_kill(vmp
->vm_endpoint
, SIGSEGV
)) != OK
)
103 panic("sys_kill failed: %d", s
);
104 if((s
=sys_vmctl(ep
, VMCTL_CLEAR_PAGEFAULT
, 0 /*unused*/)) != OK
)
105 panic("do_pagefaults: sys_vmctl failed: %d", ep
);
109 /* If process was writing, see if it's writable. */
110 if(!(region
->flags
& VR_WRITABLE
) && wr
) {
111 printf("VM: pagefault: SIGSEGV %d ro map 0x%lx %s\n",
112 ep
, addr
, pf_errstr(err
));
113 if((s
=sys_kill(vmp
->vm_endpoint
, SIGSEGV
)) != OK
)
114 panic("sys_kill failed: %d", s
);
115 if((s
=sys_vmctl(ep
, VMCTL_CLEAR_PAGEFAULT
, 0 /*unused*/)) != OK
)
116 panic("do_pagefaults: sys_vmctl failed: %d", ep
);
120 assert(addr
>= region
->vaddr
);
121 offset
= addr
- region
->vaddr
;
123 /* Access is allowed; handle it. */
125 result
= map_pf(vmp
, region
, offset
, wr
, NULL
, NULL
, 0, &io
);
126 assert(result
!= SUSPEND
);
128 struct pf_state state
;
132 result
= map_pf(vmp
, region
, offset
, wr
, pf_cont
,
133 &state
, sizeof(state
), &io
);
136 vmp
->vm_major_page_fault
++;
138 vmp
->vm_minor_page_fault
++;
140 if(result
== SUSPEND
) {
145 printf("VM: pagefault: SIGSEGV %d pagefault not handled\n", ep
);
146 if((s
=sys_kill(ep
, SIGSEGV
)) != OK
)
147 panic("sys_kill failed: %d", s
);
148 if((s
=sys_vmctl(ep
, VMCTL_CLEAR_PAGEFAULT
, 0 /*unused*/)) != OK
)
149 panic("do_pagefaults: sys_vmctl failed: %d", ep
);
155 /* Pagefault is handled, so now reactivate the process. */
156 if((s
=sys_vmctl(ep
, VMCTL_CLEAR_PAGEFAULT
, 0 /*unused*/)) != OK
)
157 panic("do_pagefaults: sys_vmctl failed: %d", ep
);
161 static void pf_cont(struct vmproc
*vmp
, message
*m
,
162 void *arg
, void *statearg
)
164 struct pf_state
*state
= statearg
;
166 if(vm_isokendpt(state
->ep
, &p
) != OK
) return; /* signal */
167 handle_pagefault(state
->ep
, state
->vaddr
, state
->err
, 1);
170 static void handle_memory_continue(struct vmproc
*vmp
, message
*m
,
171 void *arg
, void *statearg
)
174 struct hm_state
*state
= statearg
;
176 assert(state
->caller
!= NONE
);
177 assert(state
->valid
== VALID
);
179 if(m
->VMV_RESULT
!= OK
) {
180 printf("VM: handle_memory_continue: vfs request failed\n");
181 handle_memory_final(state
, m
->VMV_RESULT
);
185 r
= handle_memory_step(state
, TRUE
/*retry*/);
187 assert(state
->valid
== VALID
);
193 assert(state
->valid
== VALID
);
195 handle_memory_final(state
, r
);
198 static void handle_memory_final(struct hm_state
*state
, int result
)
203 assert(state
->valid
== VALID
);
205 if(state
->caller
== KERNEL
) {
206 if((r
=sys_vmctl(state
->requestor
, VMCTL_MEMREQ_REPLY
, result
)) != OK
)
207 panic("handle_memory_continue: sys_vmctl failed: %d", r
);
208 } else if(state
->caller
!= NONE
) {
209 /* Send a reply msg */
211 memset(&msg
, 0, sizeof(msg
));
214 if(IS_VFS_FS_TRANSID(state
->transid
)) {
215 assert(state
->caller
== VFS_PROC_NR
);
216 /* If a transaction ID was set, reset it */
217 msg
.m_type
= TRNS_ADD_ID(msg
.m_type
, state
->transid
);
223 * Use AMF_NOREPLY only if there was a transaction ID, which
224 * signifies that VFS issued the request asynchronously.
226 if(asynsend3(state
->caller
, &msg
, flag
) != OK
) {
227 panic("handle_memory_final: asynsend3 failed");
230 assert(state
->valid
== VALID
);
232 /* fail fast if anyone tries to access this state again */
233 memset(state
, 0, sizeof(*state
));
237 /*===========================================================================*
239 *===========================================================================*/
240 void do_pagefaults(message
*m
)
242 handle_pagefault(m
->m_source
, m
->VPF_ADDR
, m
->VPF_FLAGS
, 0);
245 int handle_memory_once(struct vmproc
*vmp
, vir_bytes mem
, vir_bytes len
,
249 r
= handle_memory_start(vmp
, mem
, len
, wrflag
, NONE
, NONE
, 0, 0);
250 assert(r
!= SUSPEND
);
254 int handle_memory_start(struct vmproc
*vmp
, vir_bytes mem
, vir_bytes len
,
255 int wrflag
, endpoint_t caller
, endpoint_t requestor
, int transid
,
259 struct hm_state state
;
262 if((o
= mem
% PAGE_SIZE
)) {
267 len
= roundup(len
, PAGE_SIZE
);
272 state
.wrflag
= wrflag
;
273 state
.requestor
= requestor
;
274 state
.caller
= caller
;
275 state
.transid
= transid
;
277 state
.vfs_avail
= vfs_avail
;
279 r
= handle_memory_step(&state
, FALSE
/*retry*/);
282 assert(caller
!= NONE
);
285 handle_memory_final(&state
, r
);
291 /*===========================================================================*
293 *===========================================================================*/
296 endpoint_t who
, who_s
, requestor
;
297 vir_bytes mem
, mem_s
;
305 r
= sys_vmctl_get_memreq(&who
, &mem
, &len
, &wrflag
, &who_s
,
314 if(vm_isokendpt(who
, &p
) != OK
)
315 panic("do_memory: bad endpoint: %d", who
);
318 assert(!IS_VFS_FS_TRANSID(transid
));
320 /* is VFS blocked? */
321 if(requestor
== VFS_PROC_NR
) vfs_avail
= 0;
324 handle_memory_start(vmp
, mem
, len
, wrflag
,
325 KERNEL
, requestor
, transid
, vfs_avail
);
336 static int handle_memory_step(struct hm_state
*hmstate
, int retry
)
338 struct vir_region
*region
;
339 vir_bytes offset
, length
, sublen
;
342 /* Page-align memory and length. */
344 assert(hmstate
->valid
== VALID
);
345 assert(!(hmstate
->mem
% VM_PAGE_SIZE
));
346 assert(!(hmstate
->len
% VM_PAGE_SIZE
));
348 while(hmstate
->len
> 0) {
349 if(!(region
= map_lookup(hmstate
->vmp
, hmstate
->mem
, NULL
))) {
351 map_printmap(hmstate
->vmp
);
352 printf("VM: do_memory: memory doesn't exist\n");
355 } else if(!(region
->flags
& VR_WRITABLE
) && hmstate
->wrflag
) {
357 printf("VM: do_memory: write to unwritable map\n");
362 assert(region
->vaddr
<= hmstate
->mem
);
363 assert(!(region
->vaddr
% VM_PAGE_SIZE
));
364 offset
= hmstate
->mem
- region
->vaddr
;
365 length
= hmstate
->len
;
366 if (offset
+ length
> region
->length
)
367 length
= region
->length
- offset
;
370 * Handle one page at a time. While it seems beneficial to
371 * handle multiple pages in one go, the opposite is true:
372 * map_handle_memory will handle one page at a time anyway, and
373 * if we give it the whole range multiple times, it will have
374 * to recheck pages it already handled. In addition, in order
375 * to handle one-shot pages, we need to know whether we are
376 * retrying a single page, and that is not possible if this is
377 * hidden in map_handle_memory.
380 sublen
= VM_PAGE_SIZE
;
382 assert(sublen
<= length
);
383 assert(offset
+ sublen
<= region
->length
);
386 * Upon the second try for this range, do not allow
387 * calling into VFS again. This prevents eternal loops
388 * in case the FS messes up, and allows one-shot pages
389 * to be mapped in on the second call.
391 if((region
->def_memtype
== &mem_type_mappedfile
&&
392 (!hmstate
->vfs_avail
|| retry
)) ||
393 hmstate
->caller
== NONE
) {
394 r
= map_handle_memory(hmstate
->vmp
, region
,
395 offset
, sublen
, hmstate
->wrflag
, NULL
,
397 assert(r
!= SUSPEND
);
399 r
= map_handle_memory(hmstate
->vmp
, region
,
400 offset
, sublen
, hmstate
->wrflag
,
401 handle_memory_continue
, hmstate
,
405 if(r
!= OK
) return r
;
407 hmstate
->len
-= sublen
;
408 hmstate
->mem
+= sublen
;