make vfs & filesystems use failable copying
[minix3.git] / kernel / arch / earm / memory.c
blobe8648897c6391e123997bf4cd1e826abbbcac2d7
2 #include "kernel/kernel.h"
3 #include "kernel/proc.h"
4 #include "kernel/vm.h"
6 #include <machine/vm.h>
8 #include <minix/type.h>
9 #include <minix/board.h>
10 #include <minix/syslib.h>
11 #include <minix/cpufeature.h>
12 #include <string.h>
13 #include <assert.h>
14 #include <signal.h>
15 #include <stdlib.h>
17 #include <machine/vm.h>
19 #include "arch_proto.h"
20 #include "kernel/proto.h"
21 #include "kernel/debug.h"
22 #include "bsp_timer.h"
25 #define HASPT(procptr) ((procptr)->p_seg.p_ttbr != 0)
26 static int nfreepdes = 0;
27 #define MAXFREEPDES 2
28 static int freepdes[MAXFREEPDES];
30 static u32_t phys_get32(phys_bytes v);
32 /* list of requested physical mapping */
33 static kern_phys_map *kern_phys_map_head;
35 void mem_clear_mapcache(void)
37 int i;
38 for(i = 0; i < nfreepdes; i++) {
39 struct proc *ptproc = get_cpulocal_var(ptproc);
40 int pde = freepdes[i];
41 u32_t *ptv;
42 assert(ptproc);
43 ptv = ptproc->p_seg.p_ttbr_v;
44 assert(ptv);
45 ptv[pde] = 0;
49 /* This function sets up a mapping from within the kernel's address
50 * space to any other area of memory, either straight physical
51 * memory (pr == NULL) or a process view of memory, in 1MB windows.
52 * I.e., it maps in 1MB chunks of virtual (or physical) address space
53 * to 1MB chunks of kernel virtual address space.
55 * It recognizes pr already being in memory as a special case (no
56 * mapping required).
58 * The target (i.e. in-kernel) mapping area is one of the freepdes[]
59 * VM has earlier already told the kernel about that is available. It is
60 * identified as the 'pde' parameter. This value can be chosen freely
61 * by the caller, as long as it is in range (i.e. 0 or higher and corresponds
62 * to a known freepde slot). It is up to the caller to keep track of which
63 * freepde's are in use, and to determine which ones are free to use.
65 * The logical number supplied by the caller is translated into an actual
66 * pde number to be used, and a pointer to it (linear address) is returned
67 * for actual use by phys_copy or memset.
69 static phys_bytes createpde(
70 const struct proc *pr, /* Requested process, NULL for physical. */
71 const phys_bytes linaddr,/* Address after segment translation. */
72 phys_bytes *bytes, /* Size of chunk, function may truncate it. */
73 int free_pde_idx, /* index of the free slot to use */
74 int *changed /* If mapping is made, this is set to 1. */
77 u32_t pdeval;
78 phys_bytes offset;
79 int pde;
81 assert(free_pde_idx >= 0 && free_pde_idx < nfreepdes);
82 pde = freepdes[free_pde_idx];
83 assert(pde >= 0 && pde < 4096);
85 if(pr && ((pr == get_cpulocal_var(ptproc)) || iskernelp(pr))) {
86 /* Process memory is requested, and
87 * it's a process that is already in current page table, or
88 * the kernel, which is always there.
89 * Therefore linaddr is valid directly, with the requested
90 * size.
92 return linaddr;
95 if(pr) {
96 /* Requested address is in a process that is not currently
97 * accessible directly. Grab the PDE entry of that process'
98 * page table that corresponds to the requested address.
100 assert(pr->p_seg.p_ttbr_v);
101 pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)];
102 } else {
103 /* Requested address is physical. Make up the PDE entry. */
104 assert (linaddr >= PHYS_MEM_BEGIN && linaddr <= PHYS_MEM_END);
106 /* memory */
107 pdeval = (linaddr & ARM_VM_SECTION_MASK)
108 | ARM_VM_SECTION
109 | ARM_VM_SECTION_DOMAIN
110 | ARM_VM_SECTION_CACHED
111 | ARM_VM_SECTION_USER;
114 /* Write the pde value that we need into a pde that the kernel
115 * can access, into the currently loaded page table so it becomes
116 * visible.
118 assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
119 if(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] != pdeval) {
120 get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] = pdeval;
121 *changed = 1;
124 /* Memory is now available, but only the 1MB window of virtual
125 * address space that we have mapped; calculate how much of
126 * the requested range is visible and return that in *bytes,
127 * if that is less than the requested range.
129 offset = linaddr & ARM_VM_OFFSET_MASK_1MB; /* Offset in 1MB window. */
130 *bytes = MIN(*bytes, ARM_SECTION_SIZE - offset);
132 /* Return the linear address of the start of the new mapping. */
133 return ARM_SECTION_SIZE*pde + offset;
137 /*===========================================================================*
138 * check_resumed_caller *
139 *===========================================================================*/
140 static int check_resumed_caller(struct proc *caller)
142 /* Returns the result from VM if caller was resumed, otherwise OK. */
143 if (caller && (caller->p_misc_flags & MF_KCALL_RESUME)) {
144 assert(caller->p_vmrequest.vmresult != VMSUSPEND);
145 return caller->p_vmrequest.vmresult;
148 return OK;
151 /*===========================================================================*
152 * lin_lin_copy *
153 *===========================================================================*/
154 static int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
155 struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
157 u32_t addr;
158 proc_nr_t procslot;
160 assert(get_cpulocal_var(ptproc));
161 assert(get_cpulocal_var(proc_ptr));
162 assert(read_ttbr0() == get_cpulocal_var(ptproc)->p_seg.p_ttbr);
164 procslot = get_cpulocal_var(ptproc)->p_nr;
166 assert(procslot >= 0 && procslot < ARM_VM_DIR_ENTRIES);
168 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
169 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
170 assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
171 assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
172 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_VMINHIBIT));
173 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_VMINHIBIT));
175 while(bytes > 0) {
176 phys_bytes srcptr, dstptr;
177 vir_bytes chunk = bytes;
178 int changed = 0;
180 #ifdef CONFIG_SMP
181 unsigned cpu = cpuid;
183 if (srcproc && GET_BIT(srcproc->p_stale_tlb, cpu)) {
184 changed = 1;
185 UNSET_BIT(srcproc->p_stale_tlb, cpu);
187 if (dstproc && GET_BIT(dstproc->p_stale_tlb, cpu)) {
188 changed = 1;
189 UNSET_BIT(dstproc->p_stale_tlb, cpu);
191 #endif
193 /* Set up 1MB ranges. */
194 srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed);
195 dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
196 if(changed) {
197 reload_ttbr0();
199 /* Copy pages. */
200 PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
202 if(addr) {
203 /* If addr is nonzero, a page fault was caught.
205 * phys_copy does all memory accesses word-aligned (rounded
206 * down), so pagefaults can occur at a lower address than
207 * the specified offsets. compute the lower bounds for sanity
208 * check use.
210 vir_bytes src_aligned = srcptr & ~0x3, dst_aligned = dstptr & ~0x3;
212 if(addr >= src_aligned && addr < (srcptr + chunk)) {
213 return EFAULT_SRC;
215 if(addr >= dst_aligned && addr < (dstptr + chunk)) {
216 return EFAULT_DST;
219 panic("lin_lin_copy fault out of range");
221 /* Not reached. */
222 return EFAULT;
225 /* Update counter and addresses for next iteration, if any. */
226 bytes -= chunk;
227 srclinaddr += chunk;
228 dstlinaddr += chunk;
231 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
232 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
233 assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
234 assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
236 return OK;
239 static u32_t phys_get32(phys_bytes addr)
241 u32_t v;
242 int r;
244 if((r=lin_lin_copy(NULL, addr,
245 proc_addr(SYSTEM), (phys_bytes) &v, sizeof(v))) != OK) {
246 panic("lin_lin_copy for phys_get32 failed: %d", r);
249 return v;
252 /*===========================================================================*
253 * umap_virtual *
254 *===========================================================================*/
255 phys_bytes umap_virtual(rp, seg, vir_addr, bytes)
256 register struct proc *rp; /* pointer to proc table entry for process */
257 int seg; /* T, D, or S segment */
258 vir_bytes vir_addr; /* virtual address in bytes within the seg */
259 vir_bytes bytes; /* # of bytes to be copied */
261 phys_bytes phys = 0;
263 if(vm_lookup(rp, vir_addr, &phys, NULL) != OK) {
264 printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%x: 0x%lx failed\n", rp->p_name, seg, vir_addr);
265 phys = 0;
266 } else {
267 if(phys == 0)
268 panic("vm_lookup returned phys: 0x%lx", phys);
271 if(phys == 0) {
272 printf("SYSTEM:umap_virtual: lookup failed\n");
273 return 0;
276 /* Now make sure addresses are contiguous in physical memory
277 * so that the umap makes sense.
279 if(bytes > 0 && vm_lookup_range(rp, vir_addr, NULL, bytes) != bytes) {
280 printf("umap_virtual: %s: %lu at 0x%lx (vir 0x%lx) not contiguous\n",
281 rp->p_name, bytes, vir_addr, vir_addr);
282 return 0;
285 /* phys must be larger than 0 (or the caller will think the call
286 * failed), and address must not cross a page boundary.
288 assert(phys);
290 return phys;
294 /*===========================================================================*
295 * vm_lookup *
296 *===========================================================================*/
297 int vm_lookup(const struct proc *proc, const vir_bytes virtual,
298 phys_bytes *physical, u32_t *ptent)
300 u32_t *root, *pt;
301 int pde, pte;
302 u32_t pde_v, pte_v;
304 assert(proc);
305 assert(physical);
306 assert(!isemptyp(proc));
307 assert(HASPT(proc));
309 /* Retrieve page directory entry. */
310 root = (u32_t *) (proc->p_seg.p_ttbr & ARM_TTBR_ADDR_MASK);
311 assert(!((u32_t) root % ARM_PAGEDIR_SIZE));
312 pde = ARM_VM_PDE(virtual);
313 assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
314 pde_v = phys_get32((u32_t) (root + pde));
316 if(! ((pde_v & ARM_VM_PDE_PRESENT)
317 || (pde_v & ARM_VM_SECTION_PRESENT)
318 )) {
319 return EFAULT;
322 if(pde_v & ARM_VM_SECTION) {
323 *physical = pde_v & ARM_VM_SECTION_MASK;
324 if(ptent) *ptent = pde_v;
325 *physical += virtual & ARM_VM_OFFSET_MASK_1MB;
326 } else {
327 /* Retrieve page table entry. */
328 pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK);
329 assert(!((u32_t) pt % ARM_PAGETABLE_SIZE));
330 pte = ARM_VM_PTE(virtual);
331 assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
332 pte_v = phys_get32((u32_t) (pt + pte));
333 if(!(pte_v & ARM_VM_PTE_PRESENT)) {
334 return EFAULT;
337 if(ptent) *ptent = pte_v;
339 /* Actual address now known; retrieve it and add page offset. */
340 *physical = pte_v & ARM_VM_PTE_MASK;
341 *physical += virtual % ARM_PAGE_SIZE;
344 return OK;
347 /*===========================================================================*
348 * vm_lookup_range *
349 *===========================================================================*/
350 size_t vm_lookup_range(const struct proc *proc, vir_bytes vir_addr,
351 phys_bytes *phys_addr, size_t bytes)
353 /* Look up the physical address corresponding to linear virtual address
354 * 'vir_addr' for process 'proc'. Return the size of the range covered
355 * by contiguous physical memory starting from that address; this may
356 * be anywhere between 0 and 'bytes' inclusive. If the return value is
357 * nonzero, and 'phys_addr' is non-NULL, 'phys_addr' will be set to the
358 * base physical address of the range. 'vir_addr' and 'bytes' need not
359 * be page-aligned, but the caller must have verified that the given
360 * linear range is valid for the given process at all.
362 phys_bytes phys, next_phys;
363 size_t len;
365 assert(proc);
366 assert(bytes > 0);
367 assert(HASPT(proc));
369 /* Look up the first page. */
370 if (vm_lookup(proc, vir_addr, &phys, NULL) != OK)
371 return 0;
373 if (phys_addr != NULL)
374 *phys_addr = phys;
376 len = ARM_PAGE_SIZE - (vir_addr % ARM_PAGE_SIZE);
377 vir_addr += len;
378 next_phys = phys + len;
380 /* Look up any next pages and test physical contiguity. */
381 while (len < bytes) {
382 if (vm_lookup(proc, vir_addr, &phys, NULL) != OK)
383 break;
385 if (next_phys != phys)
386 break;
388 len += ARM_PAGE_SIZE;
389 vir_addr += ARM_PAGE_SIZE;
390 next_phys += ARM_PAGE_SIZE;
393 /* We might now have overshot the requested length somewhat. */
394 return MIN(bytes, len);
397 /*===========================================================================*
398 * vm_suspend *
399 *===========================================================================*/
400 static void vm_suspend(struct proc *caller, const struct proc *target,
401 const vir_bytes linaddr, const vir_bytes len, const int type,
402 const int writeflag)
404 /* This range is not OK for this process. Set parameters
405 * of the request and notify VM about the pending request.
407 assert(!RTS_ISSET(caller, RTS_VMREQUEST));
408 assert(!RTS_ISSET(target, RTS_VMREQUEST));
410 RTS_SET(caller, RTS_VMREQUEST);
412 caller->p_vmrequest.req_type = VMPTYPE_CHECK;
413 caller->p_vmrequest.target = target->p_endpoint;
414 caller->p_vmrequest.params.check.start = linaddr;
415 caller->p_vmrequest.params.check.length = len;
416 caller->p_vmrequest.params.check.writeflag = writeflag;
417 caller->p_vmrequest.type = type;
419 /* Connect caller on vmrequest wait queue. */
420 if(!(caller->p_vmrequest.nextrequestor = vmrequest))
421 if(OK != send_sig(VM_PROC_NR, SIGKMEM))
422 panic("send_sig failed");
423 vmrequest = caller;
426 /*===========================================================================*
427 * vm_check_range *
428 *===========================================================================*/
429 int vm_check_range(struct proc *caller, struct proc *target,
430 vir_bytes vir_addr, size_t bytes, int writeflag)
432 /* Public interface to vm_suspend(), for use by kernel calls. On behalf
433 * of 'caller', call into VM to check linear virtual address range of
434 * process 'target', starting at 'vir_addr', for 'bytes' bytes. This
435 * function assumes that it will called twice if VM returned an error
436 * the first time (since nothing has changed in that case), and will
437 * then return the error code resulting from the first call. Upon the
438 * first call, a non-success error code is returned as well.
440 int r;
442 if ((caller->p_misc_flags & MF_KCALL_RESUME) &&
443 (r = caller->p_vmrequest.vmresult) != OK)
444 return r;
446 vm_suspend(caller, target, vir_addr, bytes, VMSTYPE_KERNELCALL,
447 writeflag);
449 return VMSUSPEND;
452 /*===========================================================================*
453 * delivermsg *
454 *===========================================================================*/
455 void delivermsg(struct proc *rp)
457 int r = OK;
459 assert(rp->p_misc_flags & MF_DELIVERMSG);
460 assert(rp->p_delivermsg.m_source != NONE);
462 if (copy_msg_to_user(&rp->p_delivermsg,
463 (message *) rp->p_delivermsg_vir)) {
464 printf("WARNING wrong user pointer 0x%08lx from "
465 "process %s / %d\n",
466 rp->p_delivermsg_vir,
467 rp->p_name,
468 rp->p_endpoint);
469 r = EFAULT;
472 /* Indicate message has been delivered; address is 'used'. */
473 rp->p_delivermsg.m_source = NONE;
474 rp->p_misc_flags &= ~MF_DELIVERMSG;
476 if(!(rp->p_misc_flags & MF_CONTEXT_SET)) {
477 rp->p_reg.retreg = r;
481 /*===========================================================================*
482 * vmmemset *
483 *===========================================================================*/
484 int vm_memset(struct proc* caller, endpoint_t who, phys_bytes ph, int c,
485 phys_bytes count)
487 u32_t pattern;
488 struct proc *whoptr = NULL;
489 phys_bytes cur_ph = ph;
490 phys_bytes left = count;
491 phys_bytes ptr, chunk, pfa = 0;
492 int new_ttbr, r = OK;
494 if ((r = check_resumed_caller(caller)) != OK)
495 return r;
497 /* NONE for physical, otherwise virtual */
498 if (who != NONE && !(whoptr = endpoint_lookup(who)))
499 return ESRCH;
501 c &= 0xFF;
502 pattern = c | (c << 8) | (c << 16) | (c << 24);
504 assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
505 assert(!catch_pagefaults);
506 catch_pagefaults = 1;
508 /* We can memset as many bytes as we have remaining,
509 * or as many as remain in the 1MB chunk we mapped in.
511 while (left > 0) {
512 new_ttbr = 0;
513 chunk = left;
514 ptr = createpde(whoptr, cur_ph, &chunk, 0, &new_ttbr);
516 if (new_ttbr) {
517 reload_ttbr0();
519 /* If a page fault happens, pfa is non-null */
520 if ((pfa = phys_memset(ptr, pattern, chunk))) {
522 /* If a process pagefaults, VM may help out */
523 if (whoptr) {
524 vm_suspend(caller, whoptr, ph, count,
525 VMSTYPE_KERNELCALL, 1);
526 assert(catch_pagefaults);
527 catch_pagefaults = 0;
528 return VMSUSPEND;
531 /* Pagefault when phys copying ?! */
532 panic("vm_memset: pf %lx addr=%lx len=%lu\n",
533 pfa , ptr, chunk);
536 cur_ph += chunk;
537 left -= chunk;
540 assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
541 assert(catch_pagefaults);
542 catch_pagefaults = 0;
544 return OK;
547 /*===========================================================================*
548 * virtual_copy_f *
549 *===========================================================================*/
550 int virtual_copy_f(caller, src_addr, dst_addr, bytes, vmcheck)
551 struct proc * caller;
552 struct vir_addr *src_addr; /* source virtual address */
553 struct vir_addr *dst_addr; /* destination virtual address */
554 vir_bytes bytes; /* # of bytes to copy */
555 int vmcheck; /* if nonzero, can return VMSUSPEND */
557 /* Copy bytes from virtual address src_addr to virtual address dst_addr. */
558 struct vir_addr *vir_addr[2]; /* virtual source and destination address */
559 int i, r;
560 struct proc *procs[2];
562 assert((vmcheck && caller) || (!vmcheck && !caller));
564 /* Check copy count. */
565 if (bytes <= 0) return(EDOM);
567 /* Do some more checks and map virtual addresses to physical addresses. */
568 vir_addr[_SRC_] = src_addr;
569 vir_addr[_DST_] = dst_addr;
571 for (i=_SRC_; i<=_DST_; i++) {
572 endpoint_t proc_e = vir_addr[i]->proc_nr_e;
573 int proc_nr;
574 struct proc *p;
576 if(proc_e == NONE) {
577 p = NULL;
578 } else {
579 if(!isokendpt(proc_e, &proc_nr)) {
580 printf("virtual_copy: no reasonable endpoint\n");
581 return ESRCH;
583 p = proc_addr(proc_nr);
586 procs[i] = p;
589 if ((r = check_resumed_caller(caller)) != OK)
590 return r;
592 if((r=lin_lin_copy(procs[_SRC_], vir_addr[_SRC_]->offset,
593 procs[_DST_], vir_addr[_DST_]->offset, bytes)) != OK) {
594 int writeflag;
595 struct proc *target = NULL;
596 phys_bytes lin;
597 if(r != EFAULT_SRC && r != EFAULT_DST)
598 panic("lin_lin_copy failed: %d", r);
599 if(!vmcheck || !caller) {
600 return r;
603 if(r == EFAULT_SRC) {
604 lin = vir_addr[_SRC_]->offset;
605 target = procs[_SRC_];
606 writeflag = 0;
607 } else if(r == EFAULT_DST) {
608 lin = vir_addr[_DST_]->offset;
609 target = procs[_DST_];
610 writeflag = 1;
611 } else {
612 panic("r strange: %d", r);
615 assert(caller);
616 assert(target);
618 vm_suspend(caller, target, lin, bytes, VMSTYPE_KERNELCALL, writeflag);
619 return VMSUSPEND;
622 return OK;
625 /*===========================================================================*
626 * data_copy *
627 *===========================================================================*/
628 int data_copy(const endpoint_t from_proc, const vir_bytes from_addr,
629 const endpoint_t to_proc, const vir_bytes to_addr,
630 size_t bytes)
632 struct vir_addr src, dst;
634 src.offset = from_addr;
635 dst.offset = to_addr;
636 src.proc_nr_e = from_proc;
637 dst.proc_nr_e = to_proc;
638 assert(src.proc_nr_e != NONE);
639 assert(dst.proc_nr_e != NONE);
641 return virtual_copy(&src, &dst, bytes);
644 /*===========================================================================*
645 * data_copy_vmcheck *
646 *===========================================================================*/
647 int data_copy_vmcheck(struct proc * caller,
648 const endpoint_t from_proc, const vir_bytes from_addr,
649 const endpoint_t to_proc, const vir_bytes to_addr,
650 size_t bytes)
652 struct vir_addr src, dst;
654 src.offset = from_addr;
655 dst.offset = to_addr;
656 src.proc_nr_e = from_proc;
657 dst.proc_nr_e = to_proc;
658 assert(src.proc_nr_e != NONE);
659 assert(dst.proc_nr_e != NONE);
661 return virtual_copy_vmcheck(caller, &src, &dst, bytes);
664 void memory_init(void)
666 assert(nfreepdes == 0);
668 freepdes[nfreepdes++] = kinfo.freepde_start++;
669 freepdes[nfreepdes++] = kinfo.freepde_start++;
671 assert(kinfo.freepde_start < ARM_VM_DIR_ENTRIES);
672 assert(nfreepdes == 2);
673 assert(nfreepdes <= MAXFREEPDES);
676 /*===========================================================================*
677 * arch_proc_init *
678 *===========================================================================*/
679 void arch_proc_init(struct proc *pr, const u32_t ip, const u32_t sp,
680 const u32_t ps_str, char *name)
682 arch_proc_reset(pr);
683 strcpy(pr->p_name, name);
685 /* set custom state we know */
686 pr->p_reg.pc = ip;
687 pr->p_reg.sp = sp;
688 pr->p_reg.retreg = ps_str; /* a.k.a r0*/
691 static int usermapped_glo_index = -1,
692 usermapped_index = -1, first_um_idx = -1;
695 /* defined in kernel.lds */
696 extern char usermapped_start, usermapped_end, usermapped_nonglo_start;
698 int arch_phys_map(const int index,
699 phys_bytes *addr,
700 phys_bytes *len,
701 int *flags)
703 static int first = 1;
704 kern_phys_map *phys_maps;
706 int freeidx = 0;
707 u32_t glo_len = (u32_t) &usermapped_nonglo_start -
708 (u32_t) &usermapped_start;
710 if(first) {
711 memset(&minix_kerninfo, 0, sizeof(minix_kerninfo));
712 if(glo_len > 0) {
713 usermapped_glo_index = freeidx++;
716 usermapped_index = freeidx++;
717 first_um_idx = usermapped_index;
718 if(usermapped_glo_index != -1)
719 first_um_idx = usermapped_glo_index;
720 first = 0;
722 /* list over the maps and index them */
723 phys_maps = kern_phys_map_head;
724 while(phys_maps != NULL){
725 phys_maps->index = freeidx++;
726 phys_maps = phys_maps->next;
731 if(index == usermapped_glo_index) {
732 *addr = vir2phys(&usermapped_start);
733 *len = glo_len;
734 *flags = VMMF_USER | VMMF_GLO;
735 return OK;
737 else if(index == usermapped_index) {
738 *addr = vir2phys(&usermapped_nonglo_start);
739 *len = (u32_t) &usermapped_end -
740 (u32_t) &usermapped_nonglo_start;
741 *flags = VMMF_USER;
742 return OK;
745 /* if this all fails loop over the maps */
746 phys_maps = kern_phys_map_head;
747 while(phys_maps != NULL){
748 if(phys_maps->index == index){
749 *addr = phys_maps->addr;
750 *len = phys_maps->size;
751 *flags = phys_maps->vm_flags;
752 return OK;
754 phys_maps = phys_maps->next;
757 return EINVAL;
760 int arch_phys_map_reply(const int index, const vir_bytes addr)
762 kern_phys_map *phys_maps;
764 if(index == first_um_idx) {
765 u32_t usermapped_offset;
766 assert(addr > (u32_t) &usermapped_start);
767 usermapped_offset = addr - (u32_t) &usermapped_start;
768 #define FIXEDPTR(ptr) (void *) ((u32_t)ptr + usermapped_offset)
769 #define FIXPTR(ptr) ptr = FIXEDPTR(ptr)
770 #define ASSIGN(minixstruct) minix_kerninfo.minixstruct = FIXEDPTR(&minixstruct)
771 ASSIGN(kinfo);
772 ASSIGN(machine);
773 ASSIGN(kmessages);
774 ASSIGN(loadinfo);
776 /* adjust the pointers of the functions and the struct
777 * itself to the user-accessible mapping
779 minix_kerninfo.kerninfo_magic = KERNINFO_MAGIC;
780 minix_kerninfo.minix_feature_flags = minix_feature_flags;
781 minix_kerninfo_user = (vir_bytes) FIXEDPTR(&minix_kerninfo);
782 return OK;
785 if (index == usermapped_index) {
786 return OK;
789 /* if this all fails loop over the maps */
790 /* list over the maps and index them */
791 phys_maps = kern_phys_map_head;
792 while(phys_maps != NULL){
793 if(phys_maps->index == index){
794 assert(phys_maps->cb != NULL);
795 /* only update the vir addr we are
796 going to call the callback in enable
797 paging
799 phys_maps->vir = addr;
800 return OK;
802 phys_maps = phys_maps->next;
805 return EINVAL;
808 int arch_enable_paging(struct proc * caller)
810 kern_phys_map *phys_maps;
811 assert(caller->p_seg.p_ttbr);
814 /* load caller's page table */
815 switch_address_space(caller);
817 /* We have now switched address spaces and the mappings are
818 valid. We can now remap previous mappings. This is not a
819 good time to do printf as the initial massing is gone and
820 the new mapping is not in place */
821 phys_maps = kern_phys_map_head;
822 while(phys_maps != NULL){
823 assert(phys_maps->cb != NULL);
824 phys_maps->cb(phys_maps->id, phys_maps->vir);
825 phys_maps = phys_maps->next;
828 return OK;
831 void release_address_space(struct proc *pr)
833 pr->p_seg.p_ttbr_v = NULL;
834 barrier();
840 * Request a physical mapping
842 int kern_req_phys_map( phys_bytes base_address, vir_bytes io_size,
843 int vm_flags, kern_phys_map * priv,
844 kern_phys_map_mapped cb, vir_bytes id)
846 /* Assign the values to the given struct and add priv
847 to the list */
848 assert(base_address != 0);
849 assert(io_size % ARM_PAGE_SIZE == 0);
850 assert(cb != NULL);
852 priv->addr = base_address;
853 priv->size = io_size;
854 priv->vm_flags = vm_flags;
855 priv->cb = cb;
856 priv->id = id;
857 priv->index = -1;
858 priv->next = NULL;
861 if (kern_phys_map_head == NULL){
862 /* keep a list of items this is the first one */
863 kern_phys_map_head = priv;
864 kern_phys_map_head->next = NULL;
865 } else {
866 /* insert the item head but first keep track
867 of the current by putting it in next */
868 priv->next = kern_phys_map_head;
869 /* replace the head */
870 kern_phys_map_head = priv;
872 return 0;
876 * Callback implementation where the id given to the
877 * kern_phys_map is a pointer to the io map base address.
878 * this implementation will just change that base address.
879 * once that area is remapped.
881 int kern_phys_map_mapped_ptr(vir_bytes id, phys_bytes address){
882 *((vir_bytes*)id) = address;
883 return 0;
887 * Request a physical mapping and put the result in the given prt
888 * Note that ptr will only be valid once the callback happened.
890 int kern_phys_map_ptr(
891 phys_bytes base_address,
892 vir_bytes io_size,
893 int vm_flags,
894 kern_phys_map * priv,
895 vir_bytes ptr)
897 return kern_req_phys_map(base_address,io_size,vm_flags,priv,kern_phys_map_mapped_ptr,ptr);