tools/llvm: Do not build with symbols
[minix3.git] / minix / servers / vm / pagetable.c
blobe0fe60af2759a5be16c0a8cb01afcf16590ef7fd
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/cpufeature.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <assert.h>
23 #include <string.h>
24 #include <env.h>
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <stdlib.h>
29 #include "proto.h"
30 #include "glo.h"
31 #include "util.h"
32 #include "vm.h"
33 #include "sanitycheck.h"
35 static int vm_self_pages;
37 /* PDE used to map in kernel, kernel physical address. */
38 #define MAX_PAGEDIR_PDES 5
39 static struct pdm {
40 int pdeno;
41 u32_t val;
42 phys_bytes phys;
43 u32_t *page_directories;
44 } pagedir_mappings[MAX_PAGEDIR_PDES];
46 static multiboot_module_t *kern_mb_mod = NULL;
47 static size_t kern_size = 0;
48 static int kern_start_pde = -1;
50 /* big page size available in hardware? */
51 static int bigpage_ok = 1;
53 /* Our process table entry. */
54 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
56 /* Spare memory, ready to go after initialization, to avoid a
57 * circular dependency on allocating memory and writing it into VM's
58 * page table.
60 #if SANITYCHECKS
61 #define SPAREPAGES 200
62 #define STATIC_SPAREPAGES 190
63 #else
64 #ifdef __arm__
65 # define SPAREPAGES 150
66 # define STATIC_SPAREPAGES 140
67 #else
68 # define SPAREPAGES 20
69 # define STATIC_SPAREPAGES 15
70 #endif /* __arm__ */
71 #endif
73 #ifdef __i386__
74 static u32_t global_bit = 0;
75 #endif
77 #define SPAREPAGEDIRS 1
78 #define STATIC_SPAREPAGEDIRS 1
80 int missing_sparedirs = SPAREPAGEDIRS;
81 static struct {
82 void *pagedir;
83 phys_bytes phys;
84 } sparepagedirs[SPAREPAGEDIRS];
86 extern char _end;
87 #define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
89 #define MAX_KERNMAPPINGS 10
90 static struct {
91 phys_bytes phys_addr; /* Physical addr. */
92 phys_bytes len; /* Length in bytes. */
93 vir_bytes vir_addr; /* Offset in page table. */
94 int flags;
95 } kern_mappings[MAX_KERNMAPPINGS];
96 int kernmappings = 0;
98 /* Clicks must be pages, as
99 * - they must be page aligned to map them
100 * - they must be a multiple of the page size
101 * - it's inconvenient to have them bigger than pages, because we often want
102 * just one page
103 * May as well require them to be equal then.
105 #if CLICK_SIZE != VM_PAGE_SIZE
106 #error CLICK_SIZE must be page size.
107 #endif
109 static void *spare_pagequeue;
110 static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
111 __aligned(VM_PAGE_SIZE);
113 #if defined(__arm__)
114 static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
115 #endif
117 #if SANITYCHECKS
118 /*===========================================================================*
119 * pt_sanitycheck *
120 *===========================================================================*/
121 void pt_sanitycheck(pt_t *pt, const char *file, int line)
123 /* Basic pt sanity check. */
124 int slot;
126 MYASSERT(pt);
127 MYASSERT(pt->pt_dir);
128 MYASSERT(pt->pt_dir_phys);
130 for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
131 if(pt == &vmproc[slot].vm_pt)
132 break;
135 if(slot >= ELEMENTS(vmproc)) {
136 panic("pt_sanitycheck: passed pt not in any proc");
139 MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK);
141 #endif
143 /*===========================================================================*
144 * findhole *
145 *===========================================================================*/
146 static u32_t findhole(int pages)
148 /* Find a space in the virtual address space of VM. */
149 u32_t curv;
150 int pde = 0, try_restart;
151 static u32_t lastv = 0;
152 pt_t *pt = &vmprocess->vm_pt;
153 vir_bytes vmin, vmax;
154 u32_t holev = NO_MEM;
155 int holesize = -1;
157 vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
158 vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */
159 vmin &= ARCH_VM_ADDR_MASK;
160 vmax = vmin + 100 * 1024 * 1024; /* allow 100MB of address space for VM */
162 /* Input sanity check. */
163 assert(vmin + VM_PAGE_SIZE >= vmin);
164 assert(vmax >= vmin + VM_PAGE_SIZE);
165 assert((vmin % VM_PAGE_SIZE) == 0);
166 assert((vmax % VM_PAGE_SIZE) == 0);
167 assert(pages > 0);
169 curv = lastv;
170 if(curv < vmin || curv >= vmax)
171 curv = vmin;
173 try_restart = 1;
175 /* Start looking for a free page starting at vmin. */
176 while(curv < vmax) {
177 int pte;
179 assert(curv >= vmin);
180 assert(curv < vmax);
182 pde = ARCH_VM_PDE(curv);
183 pte = ARCH_VM_PTE(curv);
185 if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
186 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
187 /* there is a page here - so keep looking for holes */
188 holev = NO_MEM;
189 holesize = 0;
190 } else {
191 /* there is no page here - so we have a hole, a bigger
192 * one if we already had one
194 if(holev == NO_MEM) {
195 holev = curv;
196 holesize = 1;
197 } else holesize++;
199 assert(holesize > 0);
200 assert(holesize <= pages);
202 /* if it's big enough, return it */
203 if(holesize == pages) {
204 lastv = curv + VM_PAGE_SIZE;
205 return holev;
209 curv+=VM_PAGE_SIZE;
211 /* if we reached the limit, start scanning from the beginning if
212 * we haven't looked there yet
214 if(curv >= vmax && try_restart) {
215 try_restart = 0;
216 curv = vmin;
220 printf("VM: out of virtual address space in vm\n");
222 return NO_MEM;
225 /*===========================================================================*
226 * vm_freepages *
227 *===========================================================================*/
228 void vm_freepages(vir_bytes vir, int pages)
230 assert(!(vir % VM_PAGE_SIZE));
232 if(is_staticaddr(vir)) {
233 printf("VM: not freeing static page\n");
234 return;
237 if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
238 MAP_NONE, pages*VM_PAGE_SIZE, 0,
239 WMF_OVERWRITE | WMF_FREE) != OK)
240 panic("vm_freepages: pt_writemap failed");
242 vm_self_pages--;
244 #if SANITYCHECKS
245 /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
246 * always trapped, also if not in tlb.
248 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
249 panic("VMCTL_FLUSHTLB failed");
251 #endif
254 /*===========================================================================*
255 * vm_getsparepage *
256 *===========================================================================*/
257 static void *vm_getsparepage(phys_bytes *phys)
259 void *ptr;
260 if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
261 printf("vm_getsparepage: no spare found\n");
262 return NULL;
264 assert(ptr);
265 return ptr;
268 /*===========================================================================*
269 * vm_getsparepagedir *
270 *===========================================================================*/
271 static void *vm_getsparepagedir(phys_bytes *phys)
273 int s;
274 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
275 for(s = 0; s < SPAREPAGEDIRS; s++) {
276 if(sparepagedirs[s].pagedir) {
277 void *sp;
278 sp = sparepagedirs[s].pagedir;
279 *phys = sparepagedirs[s].phys;
280 sparepagedirs[s].pagedir = NULL;
281 missing_sparedirs++;
282 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
283 return sp;
286 return NULL;
289 void *vm_mappages(phys_bytes p, int pages)
291 vir_bytes loc;
292 int r;
293 pt_t *pt = &vmprocess->vm_pt;
295 /* Where in our virtual address space can we put it? */
296 loc = findhole(pages);
297 if(loc == NO_MEM) {
298 printf("vm_mappages: findhole failed\n");
299 return NULL;
302 /* Map this page into our address space. */
303 if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
304 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
305 #if defined(__arm__)
306 | ARM_VM_PTE_CACHED
307 #endif
308 , 0)) != OK) {
309 printf("vm_mappages writemap failed\n");
310 return NULL;
313 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
314 panic("VMCTL_FLUSHTLB failed: %d", r);
317 assert(loc);
319 return (void *) loc;
322 static int pt_init_done;
324 /*===========================================================================*
325 * vm_allocpage *
326 *===========================================================================*/
327 void *vm_allocpages(phys_bytes *phys, int reason, int pages)
329 /* Allocate a page for use by VM itself. */
330 phys_bytes newpage;
331 static int level = 0;
332 void *ret;
333 u32_t mem_flags = 0;
335 assert(reason >= 0 && reason < VMP_CATEGORIES);
337 assert(pages > 0);
339 level++;
341 assert(level >= 1);
342 assert(level <= 2);
344 if((level > 1) || !pt_init_done) {
345 void *s;
347 if(pages == 1) s=vm_getsparepage(phys);
348 else if(pages == 4) s=vm_getsparepagedir(phys);
349 else panic("%d pages", pages);
351 level--;
352 if(!s) {
353 util_stacktrace();
354 printf("VM: warning: out of spare pages\n");
356 if(!is_staticaddr(s)) vm_self_pages++;
357 return s;
360 #if defined(__arm__)
361 if (reason == VMP_PAGEDIR) {
362 mem_flags |= PAF_ALIGN16K;
364 #endif
366 /* Allocate page of memory for use by VM. As VM
367 * is trusted, we don't have to pre-clear it.
369 if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
370 level--;
371 printf("VM: vm_allocpage: alloc_mem failed\n");
372 return NULL;
375 *phys = CLICK2ABS(newpage);
377 if(!(ret = vm_mappages(*phys, pages))) {
378 level--;
379 printf("VM: vm_allocpage: vm_mappages failed\n");
380 return NULL;
383 level--;
384 vm_self_pages++;
386 return ret;
389 void *vm_allocpage(phys_bytes *phys, int reason)
391 return vm_allocpages(phys, reason, 1);
394 /*===========================================================================*
395 * vm_pagelock *
396 *===========================================================================*/
397 void vm_pagelock(void *vir, int lockflag)
399 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
400 vir_bytes m = (vir_bytes) vir;
401 int r;
402 u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
403 pt_t *pt;
405 pt = &vmprocess->vm_pt;
407 assert(!(m % VM_PAGE_SIZE));
409 if(!lockflag)
410 flags |= ARCH_VM_PTE_RW;
411 #if defined(__arm__)
412 else
413 flags |= ARCH_VM_PTE_RO;
415 flags |= ARM_VM_PTE_CACHED ;
416 #endif
418 /* Update flags. */
419 if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
420 flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
421 panic("vm_lockpage: pt_writemap failed");
424 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
425 panic("VMCTL_FLUSHTLB failed: %d", r);
428 return;
431 /*===========================================================================*
432 * vm_addrok *
433 *===========================================================================*/
434 int vm_addrok(void *vir, int writeflag)
436 pt_t *pt = &vmprocess->vm_pt;
437 int pde, pte;
438 vir_bytes v = (vir_bytes) vir;
440 pde = ARCH_VM_PDE(v);
441 pte = ARCH_VM_PTE(v);
443 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
444 printf("addr not ok: missing pde %d\n", pde);
445 return 0;
448 #if defined(__i386__)
449 if(writeflag &&
450 !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
451 printf("addr not ok: pde %d present but pde unwritable\n", pde);
452 return 0;
454 #elif defined(__arm__)
455 if(writeflag &&
456 (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
457 printf("addr not ok: pde %d present but pde unwritable\n", pde);
458 return 0;
461 #endif
462 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
463 printf("addr not ok: missing pde %d / pte %d\n",
464 pde, pte);
465 return 0;
468 #if defined(__i386__)
469 if(writeflag &&
470 !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
471 printf("addr not ok: pde %d / pte %d present but unwritable\n",
472 pde, pte);
473 #elif defined(__arm__)
474 if(writeflag &&
475 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
476 printf("addr not ok: pde %d / pte %d present but unwritable\n",
477 pde, pte);
478 #endif
479 return 0;
482 return 1;
485 /*===========================================================================*
486 * pt_ptalloc *
487 *===========================================================================*/
488 static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
490 /* Allocate a page table and write its address into the page directory. */
491 int i;
492 phys_bytes pt_phys;
493 u32_t *p;
495 /* Argument must make sense. */
496 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
497 assert(!(flags & ~(PTF_ALLFLAGS)));
499 /* We don't expect to overwrite page directory entry, nor
500 * storage for the page table.
502 assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
503 assert(!pt->pt_pt[pde]);
505 /* Get storage for the page table. The allocation call may in fact
506 * recursively create the directory entry as a side effect. In that
507 * case, we free the newly allocated page and do nothing else.
509 if (!(p = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
510 return ENOMEM;
511 if (pt->pt_pt[pde]) {
512 vm_freepages((vir_bytes) p, 1);
513 assert(pt->pt_pt[pde]);
514 return OK;
516 pt->pt_pt[pde] = p;
518 for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
519 pt->pt_pt[pde][i] = 0; /* Empty entry. */
521 /* Make page directory entry.
522 * The PDE is always 'present,' 'writable,' and 'user accessible,'
523 * relying on the PTE for protection.
525 #if defined(__i386__)
526 pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
527 | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
528 #elif defined(__arm__)
529 pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
530 | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
531 #endif
533 return OK;
536 /*===========================================================================*
537 * pt_ptalloc_in_range *
538 *===========================================================================*/
539 int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
540 u32_t flags, int verify)
542 /* Allocate all the page tables in the range specified. */
543 int pde, first_pde, last_pde;
545 first_pde = ARCH_VM_PDE(start);
546 last_pde = ARCH_VM_PDE(end-1);
548 assert(first_pde >= 0);
549 assert(last_pde < ARCH_VM_DIR_ENTRIES);
551 /* Scan all page-directory entries in the range. */
552 for(pde = first_pde; pde <= last_pde; pde++) {
553 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
554 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
555 int r;
556 if(verify) {
557 printf("pt_ptalloc_in_range: no pde %d\n", pde);
558 return EFAULT;
560 assert(!pt->pt_dir[pde]);
561 if((r=pt_ptalloc(pt, pde, flags)) != OK) {
562 /* Couldn't do (complete) mapping.
563 * Don't bother freeing any previously
564 * allocated page tables, they're
565 * still writable, don't point to nonsense,
566 * and pt_ptalloc leaves the directory
567 * and other data in a consistent state.
569 return r;
571 assert(pt->pt_pt[pde]);
573 assert(pt->pt_pt[pde]);
574 assert(pt->pt_dir[pde]);
575 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
578 return OK;
581 static const char *ptestr(u32_t pte)
583 #define FLAG(constant, name) { \
584 if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
587 static char str[30];
588 if(!(pte & ARCH_VM_PTE_PRESENT)) {
589 return "not present";
591 str[0] = '\0';
592 #if defined(__i386__)
593 FLAG(ARCH_VM_PTE_RW, "W");
594 #elif defined(__arm__)
595 if(pte & ARCH_VM_PTE_RO) {
596 strcat(str, "R ");
597 } else {
598 strcat(str, "W ");
600 #endif
601 FLAG(ARCH_VM_PTE_USER, "U");
602 #if defined(__i386__)
603 FLAG(I386_VM_PWT, "PWT");
604 FLAG(I386_VM_PCD, "PCD");
605 FLAG(I386_VM_ACC, "ACC");
606 FLAG(I386_VM_DIRTY, "DIRTY");
607 FLAG(I386_VM_PS, "PS");
608 FLAG(I386_VM_GLOBAL, "G");
609 FLAG(I386_VM_PTAVAIL1, "AV1");
610 FLAG(I386_VM_PTAVAIL2, "AV2");
611 FLAG(I386_VM_PTAVAIL3, "AV3");
612 #elif defined(__arm__)
613 FLAG(ARM_VM_PTE_SUPER, "S");
614 FLAG(ARM_VM_PTE_S, "SH");
615 FLAG(ARM_VM_PTE_WB, "WB");
616 FLAG(ARM_VM_PTE_WT, "WT");
617 #endif
619 return str;
622 /*===========================================================================*
623 * pt_map_in_range *
624 *===========================================================================*/
625 int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
626 vir_bytes start, vir_bytes end)
628 /* Transfer all the mappings from the pt of the source process to the pt of
629 * the destination process in the range specified.
631 int pde, pte;
632 vir_bytes viraddr;
633 pt_t *pt, *dst_pt;
635 pt = &src_vmp->vm_pt;
636 dst_pt = &dst_vmp->vm_pt;
638 end = end ? end : VM_DATATOP;
639 assert(start % VM_PAGE_SIZE == 0);
640 assert(end % VM_PAGE_SIZE == 0);
642 assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end);
643 assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
645 #if LU_DEBUG
646 printf("VM: pt_map_in_range: src = %d, dst = %d\n",
647 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
648 printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
649 start, ARCH_VM_PDE(start), ARCH_VM_PTE(start),
650 end, ARCH_VM_PDE(end), ARCH_VM_PTE(end));
651 #endif
653 /* Scan all page-table entries in the range. */
654 for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) {
655 pde = ARCH_VM_PDE(viraddr);
656 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
657 if(viraddr == VM_DATATOP) break;
658 continue;
660 pte = ARCH_VM_PTE(viraddr);
661 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
662 if(viraddr == VM_DATATOP) break;
663 continue;
666 /* Transfer the mapping. */
667 dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
669 if(viraddr == VM_DATATOP) break;
672 return OK;
675 /*===========================================================================*
676 * pt_ptmap *
677 *===========================================================================*/
678 int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
680 /* Transfer mappings to page dir and page tables from source process and
681 * destination process. Make sure all the mappings are above the stack, not
682 * to corrupt valid mappings in the data segment of the destination process.
684 int pde, r;
685 phys_bytes physaddr;
686 vir_bytes viraddr;
687 pt_t *pt;
689 pt = &src_vmp->vm_pt;
691 #if LU_DEBUG
692 printf("VM: pt_ptmap: src = %d, dst = %d\n",
693 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
694 #endif
696 /* Transfer mapping to the page directory. */
697 viraddr = (vir_bytes) pt->pt_dir;
698 physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
699 #if defined(__i386__)
700 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
701 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
702 #elif defined(__arm__)
703 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
704 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
705 ARM_VM_PTE_CACHED ,
706 #endif
707 WMF_OVERWRITE)) != OK) {
708 return r;
710 #if LU_DEBUG
711 printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
712 viraddr, physaddr);
713 #endif
715 /* Scan all non-reserved page-directory entries. */
716 for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
717 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
718 continue;
721 /* Transfer mapping to the page table. */
722 viraddr = (vir_bytes) pt->pt_pt[pde];
723 #if defined(__i386__)
724 physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
725 #elif defined(__arm__)
726 physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
727 #endif
728 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
729 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
730 #ifdef __arm__
731 | ARM_VM_PTE_CACHED
732 #endif
734 WMF_OVERWRITE)) != OK) {
735 return r;
739 return OK;
742 void pt_clearmapcache(void)
744 /* Make sure kernel will invalidate tlb when using current
745 * pagetable (i.e. vm's) to make new mappings before new cr3
746 * is loaded.
748 if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
749 panic("VMCTL_CLEARMAPCACHE failed");
752 int pt_writable(struct vmproc *vmp, vir_bytes v)
754 u32_t entry;
755 pt_t *pt = &vmp->vm_pt;
756 assert(!(v % VM_PAGE_SIZE));
757 int pde = ARCH_VM_PDE(v);
758 int pte = ARCH_VM_PTE(v);
760 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
761 assert(pt->pt_pt[pde]);
763 entry = pt->pt_pt[pde][pte];
765 #if defined(__i386__)
766 return((entry & PTF_WRITE) ? 1 : 0);
767 #elif defined(__arm__)
768 return((entry & ARCH_VM_PTE_RO) ? 0 : 1);
769 #endif
772 /*===========================================================================*
773 * pt_writemap *
774 *===========================================================================*/
775 int pt_writemap(struct vmproc * vmp,
776 pt_t *pt,
777 vir_bytes v,
778 phys_bytes physaddr,
779 size_t bytes,
780 u32_t flags,
781 u32_t writemapflags)
783 /* Write mapping into page table. Allocate a new page table if necessary. */
784 /* Page directory and table entries for this virtual address. */
785 int p, pages;
786 int verify = 0;
787 int ret = OK;
789 #ifdef CONFIG_SMP
790 int vminhibit_clear = 0;
791 /* FIXME
792 * don't do it everytime, stop the process only on the first change and
793 * resume the execution on the last change. Do in a wrapper of this
794 * function
796 if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
797 !(vmp->vm_flags & VMF_EXITING)) {
798 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
799 vminhibit_clear = 1;
801 #endif
803 if(writemapflags & WMF_VERIFY)
804 verify = 1;
806 assert(!(bytes % VM_PAGE_SIZE));
807 assert(!(flags & ~(PTF_ALLFLAGS)));
809 pages = bytes / VM_PAGE_SIZE;
811 /* MAP_NONE means to clear the mapping. It doesn't matter
812 * what's actually written into the PTE if PRESENT
813 * isn't on, so we can just write MAP_NONE into it.
815 assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
816 assert(physaddr != MAP_NONE || !flags);
818 /* First make sure all the necessary page tables are allocated,
819 * before we start writing in any of them, because it's a pain
820 * to undo our work properly.
822 ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
823 if(ret != OK) {
824 printf("VM: writemap: pt_ptalloc_in_range failed\n");
825 goto resume_exit;
828 /* Now write in them. */
829 for(p = 0; p < pages; p++) {
830 u32_t entry;
831 int pde = ARCH_VM_PDE(v);
832 int pte = ARCH_VM_PTE(v);
834 assert(!(v % VM_PAGE_SIZE));
835 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
836 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
838 /* Page table has to be there. */
839 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
841 /* We do not expect it to be a bigpage. */
842 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
844 /* Make sure page directory entry for this page table
845 * is marked present and page table entry is available.
847 assert(pt->pt_pt[pde]);
849 #if SANITYCHECKS
850 /* We don't expect to overwrite a page. */
851 if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
852 assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
853 #endif
854 if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
855 #if defined(__i386__)
856 physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
857 #elif defined(__arm__)
858 physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
859 #endif
862 if(writemapflags & WMF_FREE) {
863 free_mem(ABS2CLICK(physaddr), 1);
866 /* Entry we will write. */
867 #if defined(__i386__)
868 entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
869 #elif defined(__arm__)
870 entry = (physaddr & ARM_VM_PTE_MASK) | flags;
871 #endif
873 if(verify) {
874 u32_t maskedentry;
875 maskedentry = pt->pt_pt[pde][pte];
876 #if defined(__i386__)
877 maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
878 #endif
879 /* Verify pagetable entry. */
880 #if defined(__i386__)
881 if(entry & ARCH_VM_PTE_RW) {
882 /* If we expect a writable page, allow a readonly page. */
883 maskedentry |= ARCH_VM_PTE_RW;
885 #elif defined(__arm__)
886 if(!(entry & ARCH_VM_PTE_RO)) {
887 /* If we expect a writable page, allow a readonly page. */
888 maskedentry &= ~ARCH_VM_PTE_RO;
890 maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
891 #endif
892 if(maskedentry != entry) {
893 printf("pt_writemap: mismatch: ");
894 #if defined(__i386__)
895 if((entry & ARCH_VM_ADDR_MASK) !=
896 (maskedentry & ARCH_VM_ADDR_MASK)) {
897 #elif defined(__arm__)
898 if((entry & ARM_VM_PTE_MASK) !=
899 (maskedentry & ARM_VM_PTE_MASK)) {
900 #endif
901 printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
902 (long)entry, (long)maskedentry);
903 } else printf("phys ok; ");
904 printf(" flags: found %s; ",
905 ptestr(pt->pt_pt[pde][pte]));
906 printf(" masked %s; ",
907 ptestr(maskedentry));
908 printf(" expected %s\n", ptestr(entry));
909 printf("found 0x%x, wanted 0x%x\n",
910 pt->pt_pt[pde][pte], entry);
911 ret = EFAULT;
912 goto resume_exit;
914 } else {
915 /* Write pagetable entry. */
916 pt->pt_pt[pde][pte] = entry;
919 physaddr += VM_PAGE_SIZE;
920 v += VM_PAGE_SIZE;
923 resume_exit:
925 #ifdef CONFIG_SMP
926 if (vminhibit_clear) {
927 assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
928 !(vmp->vm_flags & VMF_EXITING));
929 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
931 #endif
933 return ret;
936 /*===========================================================================*
937 * pt_checkrange *
938 *===========================================================================*/
939 int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes,
940 int write)
942 int p, pages;
944 assert(!(bytes % VM_PAGE_SIZE));
946 pages = bytes / VM_PAGE_SIZE;
948 for(p = 0; p < pages; p++) {
949 int pde = ARCH_VM_PDE(v);
950 int pte = ARCH_VM_PTE(v);
952 assert(!(v % VM_PAGE_SIZE));
953 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
954 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
956 /* Page table has to be there. */
957 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
958 return EFAULT;
960 /* Make sure page directory entry for this page table
961 * is marked present and page table entry is available.
963 assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);
965 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
966 return EFAULT;
969 #if defined(__i386__)
970 if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
971 #elif defined(__arm__)
972 if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
973 #endif
974 return EFAULT;
977 v += VM_PAGE_SIZE;
980 return OK;
983 /*===========================================================================*
984 * pt_new *
985 *===========================================================================*/
986 int pt_new(pt_t *pt)
988 /* Allocate a pagetable root. Allocate a page-aligned page directory
989 * and set them to 0 (indicating no page tables are allocated). Lookup
990 * its physical address as we'll need that in the future. Verify it's
991 * page-aligned.
993 int i, r;
995 /* Don't ever re-allocate/re-move a certain process slot's
996 * page directory once it's been created. This is a fraction
997 * faster, but also avoids having to invalidate the page
998 * mappings from in-kernel page tables pointing to
999 * the page directories (the page_directories data).
1001 if(!pt->pt_dir &&
1002 !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
1003 VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
1004 return ENOMEM;
1007 assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1009 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
1010 pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
1011 pt->pt_pt[i] = NULL;
1014 /* Where to start looking for free virtual address space? */
1015 pt->pt_virtop = 0;
1017 /* Map in kernel. */
1018 if((r=pt_mapkernel(pt)) != OK)
1019 return r;
1021 return OK;
1024 static int freepde(void)
1026 int p = kernel_boot_info.freepde_start++;
1027 assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES);
1028 return p;
1031 /*===========================================================================*
1032 * pt_init *
1033 *===========================================================================*/
1034 void pt_init(void)
1036 pt_t *newpt;
1037 int s, r, p;
1038 vir_bytes sparepages_mem;
1039 #if defined(__arm__)
1040 vir_bytes sparepagedirs_mem;
1041 #endif
1042 static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
1043 int m = kernel_boot_info.kern_mod;
1044 #if defined(__i386__)
1045 int global_bit_ok = 0;
1046 u32_t mypdbr; /* Page Directory Base Register (cr3) value */
1047 #elif defined(__arm__)
1048 u32_t myttbr;
1049 #endif
1051 /* Find what the physical location of the kernel is. */
1052 assert(m >= 0);
1053 assert(m < kernel_boot_info.mods_with_kernel);
1054 assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
1055 kern_mb_mod = &kernel_boot_info.module_list[m];
1056 kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
1057 assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
1058 assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
1059 kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;
1061 /* Get ourselves spare pages. */
1062 sparepages_mem = (vir_bytes) static_sparepages;
1063 assert(!(sparepages_mem % VM_PAGE_SIZE));
1065 #if defined(__arm__)
1066 /* Get ourselves spare pagedirs. */
1067 sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
1068 assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
1069 #endif
1071 /* Spare pages are used to allocate memory before VM has its own page
1072 * table that things (i.e. arbitrary physical memory) can be mapped into.
1073 * We get it by pre-allocating it in our bss (allocated and mapped in by
1074 * the kernel) in static_sparepages. We also need the physical addresses
1075 * though; we look them up now so they are ready for use.
1077 #if defined(__arm__)
1078 missing_sparedirs = 0;
1079 assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
1080 for(s = 0; s < SPAREPAGEDIRS; s++) {
1081 vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
1082 phys_bytes ph;
1083 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1084 ARCH_PAGEDIR_SIZE, &ph)) != OK)
1085 panic("pt_init: sys_umap failed: %d", r);
1086 if(s >= STATIC_SPAREPAGEDIRS) {
1087 sparepagedirs[s].pagedir = NULL;
1088 missing_sparedirs++;
1089 continue;
1091 sparepagedirs[s].pagedir = (void *) v;
1092 sparepagedirs[s].phys = ph;
1094 #endif
1096 if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
1097 panic("reservedqueue_new for single pages failed");
1099 assert(STATIC_SPAREPAGES < SPAREPAGES);
1100 for(s = 0; s < STATIC_SPAREPAGES; s++) {
1101 void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
1102 phys_bytes ph;
1103 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1104 VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
1105 panic("pt_init: sys_umap failed: %d", r);
1106 reservedqueue_add(spare_pagequeue, v, ph);
1109 #if defined(__i386__)
1110 /* global bit and 4MB pages available? */
1111 global_bit_ok = _cpufeature(_CPUF_I386_PGE);
1112 bigpage_ok = _cpufeature(_CPUF_I386_PSE);
1114 /* Set bit for PTE's and PDE's if available. */
1115 if(global_bit_ok)
1116 global_bit = I386_VM_GLOBAL;
1117 #endif
1119 /* Now reserve another pde for kernel's own mappings. */
1121 int kernmap_pde;
1122 phys_bytes addr, len;
1123 int flags, pindex = 0;
1124 u32_t offset = 0;
1126 kernmap_pde = freepde();
1127 offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
1129 while(sys_vmctl_get_mapping(pindex, &addr, &len,
1130 &flags) == OK) {
1131 int usedpde;
1132 vir_bytes vir;
1133 if(pindex >= MAX_KERNMAPPINGS)
1134 panic("VM: too many kernel mappings: %d", pindex);
1135 kern_mappings[pindex].phys_addr = addr;
1136 kern_mappings[pindex].len = len;
1137 kern_mappings[pindex].flags = flags;
1138 kern_mappings[pindex].vir_addr = offset;
1139 kern_mappings[pindex].flags =
1140 ARCH_VM_PTE_PRESENT;
1141 if(flags & VMMF_UNCACHED)
1142 #if defined(__i386__)
1143 kern_mappings[pindex].flags |= PTF_NOCACHE;
1144 #elif defined(__arm__)
1145 kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE;
1146 else {
1147 kern_mappings[pindex].flags |= ARM_VM_PTE_CACHED;
1149 #endif
1150 if(flags & VMMF_USER)
1151 kern_mappings[pindex].flags |= ARCH_VM_PTE_USER;
1152 #if defined(__arm__)
1153 else
1154 kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER;
1155 #endif
1156 if(flags & VMMF_WRITE)
1157 kern_mappings[pindex].flags |= ARCH_VM_PTE_RW;
1158 #if defined(__arm__)
1159 else
1160 kern_mappings[pindex].flags |= ARCH_VM_PTE_RO;
1161 #endif
1163 #if defined(__i386__)
1164 if(flags & VMMF_GLO)
1165 kern_mappings[pindex].flags |= I386_VM_GLOBAL;
1166 #endif
1168 if(addr % VM_PAGE_SIZE)
1169 panic("VM: addr unaligned: %lu", addr);
1170 if(len % VM_PAGE_SIZE)
1171 panic("VM: len unaligned: %lu", len);
1172 vir = offset;
1173 if(sys_vmctl_reply_mapping(pindex, vir) != OK)
1174 panic("VM: reply failed");
1175 offset += len;
1176 pindex++;
1177 kernmappings++;
1179 usedpde = ARCH_VM_PDE(offset);
1180 while(usedpde > kernmap_pde) {
1181 int newpde = freepde();
1182 assert(newpde == kernmap_pde+1);
1183 kernmap_pde = newpde;
1188 /* Reserve PDEs available for mapping in the page directories. */
1190 int pd;
1191 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
1192 struct pdm *pdm = &pagedir_mappings[pd];
1193 pdm->pdeno = freepde();
1194 phys_bytes ph;
1196 /* Allocate us a page table in which to
1197 * remember page directory pointers.
1199 if(!(pdm->page_directories =
1200 vm_allocpage(&ph, VMP_PAGETABLE))) {
1201 panic("no virt addr for vm mappings");
1203 memset(pdm->page_directories, 0, VM_PAGE_SIZE);
1204 pdm->phys = ph;
1206 #if defined(__i386__)
1207 pdm->val = (ph & ARCH_VM_ADDR_MASK) |
1208 ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
1209 #elif defined(__arm__)
1210 pdm->val = (ph & ARCH_VM_PDE_MASK)
1211 | ARCH_VM_PDE_PRESENT
1212 | ARM_VM_PTE_CACHED
1213 | ARM_VM_PDE_DOMAIN; //LSC FIXME
1214 #endif
1218 /* Allright. Now. We have to make our own page directory and page tables,
1219 * that the kernel has already set up, accessible to us. It's easier to
1220 * understand if we just copy all the required pages (i.e. page directory
1221 * and page tables), and set up the pointers as if VM had done it itself.
1223 * This allocation will happen without using any page table, and just
1224 * uses spare pages.
1226 newpt = &vmprocess->vm_pt;
1227 if(pt_new(newpt) != OK)
1228 panic("vm pt_new failed");
1230 /* Get our current pagedir so we can see it. */
1231 #if defined(__i386__)
1232 if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
1233 #elif defined(__arm__)
1234 if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
1235 #endif
1237 panic("VM: sys_vmctl_get_pdbr failed");
1238 #if defined(__i386__)
1239 if(sys_vircopy(NONE, mypdbr, SELF,
1240 (vir_bytes) currentpagedir, VM_PAGE_SIZE, 0) != OK)
1241 #elif defined(__arm__)
1242 if(sys_vircopy(NONE, myttbr, SELF,
1243 (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE, 0) != OK)
1244 #endif
1245 panic("VM: sys_vircopy failed");
1247 /* We have mapped in kernel ourselves; now copy mappings for VM
1248 * that kernel made, including allocations for BSS. Skip identity
1249 * mapping bits; just map in VM.
1251 for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
1252 u32_t entry = currentpagedir[p];
1253 phys_bytes ptaddr_kern, ptaddr_us;
1255 /* BIGPAGEs are kernel mapping (do ourselves) or boot
1256 * identity mapping (don't want).
1258 if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
1259 if((entry & ARCH_VM_BIGPAGE)) continue;
1261 if(pt_ptalloc(newpt, p, 0) != OK)
1262 panic("pt_ptalloc failed");
1263 assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);
1265 #if defined(__i386__)
1266 ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
1267 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
1268 #elif defined(__arm__)
1269 ptaddr_kern = entry & ARCH_VM_PDE_MASK;
1270 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
1271 #endif
1273 /* Copy kernel-initialized pagetable contents into our
1274 * normally accessible pagetable.
1276 if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
1277 panic("pt_init: abscopy failed");
1280 /* Inform kernel vm has a newly built page table. */
1281 assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
1282 pt_bind(newpt, &vmproc[VM_PROC_NR]);
1284 pt_init_done = 1;
1286 /* All OK. */
1287 return;
1290 /*===========================================================================*
1291 * pt_bind *
1292 *===========================================================================*/
1293 int pt_bind(pt_t *pt, struct vmproc *who)
1295 int procslot, pdeslot;
1296 u32_t phys;
1297 void *pdes;
1298 int pagedir_pde;
1299 int slots_per_pde;
1300 int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE;
1301 struct pdm *pdm;
1303 slots_per_pde = ARCH_VM_PT_ENTRIES / pages_per_pagedir;
1305 /* Basic sanity checks. */
1306 assert(who);
1307 assert(who->vm_flags & VMF_INUSE);
1308 assert(pt);
1310 procslot = who->vm_slot;
1311 pdm = &pagedir_mappings[procslot/slots_per_pde];
1312 pdeslot = procslot%slots_per_pde;
1313 pagedir_pde = pdm->pdeno;
1314 assert(pdeslot >= 0);
1315 assert(procslot < ELEMENTS(vmproc));
1316 assert(pdeslot < ARCH_VM_PT_ENTRIES / pages_per_pagedir);
1317 assert(pagedir_pde >= 0);
1319 #if defined(__i386__)
1320 phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
1321 #elif defined(__arm__)
1322 phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
1323 #endif
1324 assert(pt->pt_dir_phys == phys);
1325 assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1327 /* Update "page directory pagetable." */
1328 #if defined(__i386__)
1329 pdm->page_directories[pdeslot] =
1330 phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW;
1331 #elif defined(__arm__)
1333 int i;
1334 for (i = 0; i < pages_per_pagedir; i++) {
1335 pdm->page_directories[pdeslot*pages_per_pagedir+i] =
1336 (phys+i*VM_PAGE_SIZE)
1337 | ARCH_VM_PTE_PRESENT
1338 | ARCH_VM_PTE_RW
1339 | ARM_VM_PTE_CACHED
1340 | ARCH_VM_PTE_USER; //LSC FIXME
1343 #endif
1345 /* This is where the PDE's will be visible to the kernel
1346 * in its address space.
1348 pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE +
1349 #if defined(__i386__)
1350 pdeslot * VM_PAGE_SIZE);
1351 #elif defined(__arm__)
1352 pdeslot * ARCH_PAGEDIR_SIZE);
1353 #endif
1355 /* Tell kernel about new page table root. */
1356 return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys , pdes);
1359 /*===========================================================================*
1360 * pt_free *
1361 *===========================================================================*/
1362 void pt_free(pt_t *pt)
1364 /* Free memory associated with this pagetable. */
1365 int i;
1367 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++)
1368 if(pt->pt_pt[i])
1369 vm_freepages((vir_bytes) pt->pt_pt[i], 1);
1371 return;
1374 /*===========================================================================*
1375 * pt_mapkernel *
1376 *===========================================================================*/
1377 int pt_mapkernel(pt_t *pt)
1379 int i;
1380 int kern_pde = kern_start_pde;
1381 phys_bytes addr, mapped = 0;
1383 /* Any page table needs to map in the kernel address space. */
1384 assert(bigpage_ok);
1385 assert(kern_pde >= 0);
1387 /* pt_init() has made sure this is ok. */
1388 addr = kern_mb_mod->mod_start;
1390 /* Actually mapping in kernel */
1391 while(mapped < kern_size) {
1392 #if defined(__i386__)
1393 pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
1394 ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
1395 #elif defined(__arm__)
1396 pt->pt_dir[kern_pde] = (addr & ARM_VM_SECTION_MASK)
1397 | ARM_VM_SECTION
1398 | ARM_VM_SECTION_DOMAIN
1399 | ARM_VM_SECTION_CACHED
1400 | ARM_VM_SECTION_SUPER;
1401 #endif
1402 kern_pde++;
1403 mapped += ARCH_BIG_PAGE_SIZE;
1404 addr += ARCH_BIG_PAGE_SIZE;
1407 /* Kernel also wants to know about all page directories. */
1409 int pd;
1410 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
1411 struct pdm *pdm = &pagedir_mappings[pd];
1413 assert(pdm->pdeno > 0);
1414 assert(pdm->pdeno > kern_pde);
1415 pt->pt_dir[pdm->pdeno] = pdm->val;
1419 /* Kernel also wants various mappings of its own. */
1420 for(i = 0; i < kernmappings; i++) {
1421 int r;
1422 if((r=pt_writemap(NULL, pt,
1423 kern_mappings[i].vir_addr,
1424 kern_mappings[i].phys_addr,
1425 kern_mappings[i].len,
1426 kern_mappings[i].flags, 0)) != OK) {
1427 return r;
1432 return OK;
1435 int get_vm_self_pages(void) { return vm_self_pages; }