Remove building with NOCRYPTO option
[minix3.git] / minix / servers / vm / pagetable.c
blobf7f6101b256b183e8b26c8077d3d828fcbbd0139
2 #define _SYSTEM 1
4 #include <minix/callnr.h>
5 #include <minix/com.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
8 #include <minix/ds.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/cpufeature.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <assert.h>
23 #include <string.h>
24 #include <stdio.h>
25 #include <fcntl.h>
26 #include <stdlib.h>
28 #include "proto.h"
29 #include "glo.h"
30 #include "util.h"
31 #include "vm.h"
32 #include "sanitycheck.h"
34 static int vm_self_pages;
36 /* PDE used to map in kernel, kernel physical address. */
37 #define MAX_PAGEDIR_PDES 5
38 static struct pdm {
39 int pdeno;
40 u32_t val;
41 phys_bytes phys;
42 u32_t *page_directories;
43 } pagedir_mappings[MAX_PAGEDIR_PDES];
45 static multiboot_module_t *kern_mb_mod = NULL;
46 static size_t kern_size = 0;
47 static int kern_start_pde = -1;
49 /* big page size available in hardware? */
50 static int bigpage_ok = 1;
52 /* Our process table entry. */
53 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
55 /* Spare memory, ready to go after initialization, to avoid a
56 * circular dependency on allocating memory and writing it into VM's
57 * page table.
59 #if SANITYCHECKS
60 #define SPAREPAGES 200
61 #define STATIC_SPAREPAGES 190
62 #else
63 #ifdef __arm__
64 # define SPAREPAGES 150
65 # define STATIC_SPAREPAGES 140
66 #else
67 # define SPAREPAGES 20
68 # define STATIC_SPAREPAGES 15
69 #endif /* __arm__ */
70 #endif
72 #ifdef __i386__
73 static u32_t global_bit = 0;
74 #endif
76 #define SPAREPAGEDIRS 1
77 #define STATIC_SPAREPAGEDIRS 1
79 int missing_sparedirs = SPAREPAGEDIRS;
80 static struct {
81 void *pagedir;
82 phys_bytes phys;
83 } sparepagedirs[SPAREPAGEDIRS];
85 #define is_staticaddr(v) ((vir_bytes) (v) < VM_OWN_HEAPSTART)
87 #define MAX_KERNMAPPINGS 10
88 static struct {
89 phys_bytes phys_addr; /* Physical addr. */
90 phys_bytes len; /* Length in bytes. */
91 vir_bytes vir_addr; /* Offset in page table. */
92 int flags;
93 } kern_mappings[MAX_KERNMAPPINGS];
94 int kernmappings = 0;
96 /* Clicks must be pages, as
97 * - they must be page aligned to map them
98 * - they must be a multiple of the page size
99 * - it's inconvenient to have them bigger than pages, because we often want
100 * just one page
101 * May as well require them to be equal then.
103 #if CLICK_SIZE != VM_PAGE_SIZE
104 #error CLICK_SIZE must be page size.
105 #endif
107 static void *spare_pagequeue;
108 static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
109 __aligned(VM_PAGE_SIZE);
111 #if defined(__arm__)
112 static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
113 #endif
115 void pt_assert(pt_t *pt)
117 char dir[4096];
118 pt_clearmapcache();
119 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
120 panic("VMCTL_FLUSHTLB failed");
122 sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0);
123 assert(!memcmp(dir, pt->pt_dir, sizeof(dir)));
126 #if SANITYCHECKS
127 /*===========================================================================*
128 * pt_sanitycheck *
129 *===========================================================================*/
130 void pt_sanitycheck(pt_t *pt, const char *file, int line)
132 /* Basic pt sanity check. */
133 int slot;
135 MYASSERT(pt);
136 MYASSERT(pt->pt_dir);
137 MYASSERT(pt->pt_dir_phys);
139 for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
140 if(pt == &vmproc[slot].vm_pt)
141 break;
144 if(slot >= ELEMENTS(vmproc)) {
145 panic("pt_sanitycheck: passed pt not in any proc");
148 MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK);
150 #endif
152 /*===========================================================================*
153 * findhole *
154 *===========================================================================*/
155 static u32_t findhole(int pages)
157 /* Find a space in the virtual address space of VM. */
158 u32_t curv;
159 int pde = 0, try_restart;
160 static void *lastv = 0;
161 pt_t *pt = &vmprocess->vm_pt;
162 vir_bytes vmin, vmax;
163 u32_t holev = NO_MEM;
164 int holesize = -1;
166 vmin = VM_OWN_MMAPBASE;
167 vmax = VM_OWN_MMAPTOP;
169 /* Input sanity check. */
170 assert(vmin + VM_PAGE_SIZE >= vmin);
171 assert(vmax >= vmin + VM_PAGE_SIZE);
172 assert((vmin % VM_PAGE_SIZE) == 0);
173 assert((vmax % VM_PAGE_SIZE) == 0);
174 assert(pages > 0);
176 curv = (u32_t) lastv;
177 if(curv < vmin || curv >= vmax)
178 curv = vmin;
180 try_restart = 1;
182 /* Start looking for a free page starting at vmin. */
183 while(curv < vmax) {
184 int pte;
186 assert(curv >= vmin);
187 assert(curv < vmax);
189 pde = ARCH_VM_PDE(curv);
190 pte = ARCH_VM_PTE(curv);
192 if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
193 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
194 /* there is a page here - so keep looking for holes */
195 holev = NO_MEM;
196 holesize = 0;
197 } else {
198 /* there is no page here - so we have a hole, a bigger
199 * one if we already had one
201 if(holev == NO_MEM) {
202 holev = curv;
203 holesize = 1;
204 } else holesize++;
206 assert(holesize > 0);
207 assert(holesize <= pages);
209 /* if it's big enough, return it */
210 if(holesize == pages) {
211 lastv = (void*) (curv + VM_PAGE_SIZE);
212 return holev;
216 curv+=VM_PAGE_SIZE;
218 /* if we reached the limit, start scanning from the beginning if
219 * we haven't looked there yet
221 if(curv >= vmax && try_restart) {
222 try_restart = 0;
223 curv = vmin;
227 printf("VM: out of virtual address space in vm\n");
229 return NO_MEM;
232 /*===========================================================================*
233 * vm_freepages *
234 *===========================================================================*/
235 void vm_freepages(vir_bytes vir, int pages)
237 assert(!(vir % VM_PAGE_SIZE));
239 if(is_staticaddr(vir)) {
240 printf("VM: not freeing static page\n");
241 return;
244 if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
245 MAP_NONE, pages*VM_PAGE_SIZE, 0,
246 WMF_OVERWRITE | WMF_FREE) != OK)
247 panic("vm_freepages: pt_writemap failed");
249 vm_self_pages--;
251 #if SANITYCHECKS
252 /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
253 * always trapped, also if not in tlb.
255 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
256 panic("VMCTL_FLUSHTLB failed");
258 #endif
261 /*===========================================================================*
262 * vm_getsparepage *
263 *===========================================================================*/
264 static void *vm_getsparepage(phys_bytes *phys)
266 void *ptr;
267 if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) {
268 return NULL;
270 assert(ptr);
271 return ptr;
274 /*===========================================================================*
275 * vm_getsparepagedir *
276 *===========================================================================*/
277 static void *vm_getsparepagedir(phys_bytes *phys)
279 int s;
280 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
281 for(s = 0; s < SPAREPAGEDIRS; s++) {
282 if(sparepagedirs[s].pagedir) {
283 void *sp;
284 sp = sparepagedirs[s].pagedir;
285 *phys = sparepagedirs[s].phys;
286 sparepagedirs[s].pagedir = NULL;
287 missing_sparedirs++;
288 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
289 return sp;
292 return NULL;
295 void *vm_mappages(phys_bytes p, int pages)
297 vir_bytes loc;
298 int r;
299 pt_t *pt = &vmprocess->vm_pt;
301 /* Where in our virtual address space can we put it? */
302 loc = findhole(pages);
303 if(loc == NO_MEM) {
304 printf("vm_mappages: findhole failed\n");
305 return NULL;
308 /* Map this page into our address space. */
309 if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages,
310 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
311 #if defined(__arm__)
312 | ARM_VM_PTE_CACHED
313 #endif
314 , 0)) != OK) {
315 printf("vm_mappages writemap failed\n");
316 return NULL;
319 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
320 panic("VMCTL_FLUSHTLB failed: %d", r);
323 assert(loc);
325 return (void *) loc;
328 static int pt_init_done;
330 /*===========================================================================*
331 * vm_allocpage *
332 *===========================================================================*/
333 void *vm_allocpages(phys_bytes *phys, int reason, int pages)
335 /* Allocate a page for use by VM itself. */
336 phys_bytes newpage;
337 static int level = 0;
338 void *ret;
339 u32_t mem_flags = 0;
341 assert(reason >= 0 && reason < VMP_CATEGORIES);
343 assert(pages > 0);
345 level++;
347 assert(level >= 1);
348 assert(level <= 2);
350 if((level > 1) || !pt_init_done) {
351 void *s;
353 if(pages == 1) s=vm_getsparepage(phys);
354 else if(pages == 4) s=vm_getsparepagedir(phys);
355 else panic("%d pages", pages);
357 level--;
358 if(!s) {
359 util_stacktrace();
360 printf("VM: warning: out of spare pages\n");
362 if(!is_staticaddr(s)) vm_self_pages++;
363 return s;
366 #if defined(__arm__)
367 if (reason == VMP_PAGEDIR) {
368 mem_flags |= PAF_ALIGN16K;
370 #endif
372 /* Allocate page of memory for use by VM. As VM
373 * is trusted, we don't have to pre-clear it.
375 if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
376 level--;
377 printf("VM: vm_allocpage: alloc_mem failed\n");
378 return NULL;
381 *phys = CLICK2ABS(newpage);
383 if(!(ret = vm_mappages(*phys, pages))) {
384 level--;
385 printf("VM: vm_allocpage: vm_mappages failed\n");
386 return NULL;
389 level--;
390 vm_self_pages++;
392 return ret;
395 void *vm_allocpage(phys_bytes *phys, int reason)
397 return vm_allocpages(phys, reason, 1);
400 /*===========================================================================*
401 * vm_pagelock *
402 *===========================================================================*/
403 void vm_pagelock(void *vir, int lockflag)
405 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
406 vir_bytes m = (vir_bytes) vir;
407 int r;
408 u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
409 pt_t *pt;
411 pt = &vmprocess->vm_pt;
413 assert(!(m % VM_PAGE_SIZE));
415 if(!lockflag)
416 flags |= ARCH_VM_PTE_RW;
417 #if defined(__arm__)
418 else
419 flags |= ARCH_VM_PTE_RO;
421 flags |= ARM_VM_PTE_CACHED ;
422 #endif
424 /* Update flags. */
425 if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
426 flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
427 panic("vm_lockpage: pt_writemap failed");
430 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
431 panic("VMCTL_FLUSHTLB failed: %d", r);
434 return;
437 /*===========================================================================*
438 * vm_addrok *
439 *===========================================================================*/
440 int vm_addrok(void *vir, int writeflag)
442 pt_t *pt = &vmprocess->vm_pt;
443 int pde, pte;
444 vir_bytes v = (vir_bytes) vir;
446 pde = ARCH_VM_PDE(v);
447 pte = ARCH_VM_PTE(v);
449 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
450 printf("addr not ok: missing pde %d\n", pde);
451 return 0;
454 #if defined(__i386__)
455 if(writeflag &&
456 !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
457 printf("addr not ok: pde %d present but pde unwritable\n", pde);
458 return 0;
460 #elif defined(__arm__)
461 if(writeflag &&
462 (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
463 printf("addr not ok: pde %d present but pde unwritable\n", pde);
464 return 0;
467 #endif
468 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
469 printf("addr not ok: missing pde %d / pte %d\n",
470 pde, pte);
471 return 0;
474 #if defined(__i386__)
475 if(writeflag &&
476 !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
477 printf("addr not ok: pde %d / pte %d present but unwritable\n",
478 pde, pte);
479 #elif defined(__arm__)
480 if(writeflag &&
481 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
482 printf("addr not ok: pde %d / pte %d present but unwritable\n",
483 pde, pte);
484 #endif
485 return 0;
488 return 1;
491 /*===========================================================================*
492 * pt_ptalloc *
493 *===========================================================================*/
494 static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
496 /* Allocate a page table and write its address into the page directory. */
497 int i;
498 phys_bytes pt_phys;
499 u32_t *p;
501 /* Argument must make sense. */
502 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
503 assert(!(flags & ~(PTF_ALLFLAGS)));
505 /* We don't expect to overwrite page directory entry, nor
506 * storage for the page table.
508 assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
509 assert(!pt->pt_pt[pde]);
511 /* Get storage for the page table. The allocation call may in fact
512 * recursively create the directory entry as a side effect. In that
513 * case, we free the newly allocated page and do nothing else.
515 if (!(p = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
516 return ENOMEM;
517 if (pt->pt_pt[pde]) {
518 vm_freepages((vir_bytes) p, 1);
519 assert(pt->pt_pt[pde]);
520 return OK;
522 pt->pt_pt[pde] = p;
524 for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
525 pt->pt_pt[pde][i] = 0; /* Empty entry. */
527 /* Make page directory entry.
528 * The PDE is always 'present,' 'writable,' and 'user accessible,'
529 * relying on the PTE for protection.
531 #if defined(__i386__)
532 pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
533 | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
534 #elif defined(__arm__)
535 pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
536 | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
537 #endif
539 return OK;
542 /*===========================================================================*
543 * pt_ptalloc_in_range *
544 *===========================================================================*/
545 int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
546 u32_t flags, int verify)
548 /* Allocate all the page tables in the range specified. */
549 int pde, first_pde, last_pde;
551 first_pde = ARCH_VM_PDE(start);
552 last_pde = ARCH_VM_PDE(end-1);
554 assert(first_pde >= 0);
555 assert(last_pde < ARCH_VM_DIR_ENTRIES);
557 /* Scan all page-directory entries in the range. */
558 for(pde = first_pde; pde <= last_pde; pde++) {
559 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
560 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
561 int r;
562 if(verify) {
563 printf("pt_ptalloc_in_range: no pde %d\n", pde);
564 return EFAULT;
566 assert(!pt->pt_dir[pde]);
567 if((r=pt_ptalloc(pt, pde, flags)) != OK) {
568 /* Couldn't do (complete) mapping.
569 * Don't bother freeing any previously
570 * allocated page tables, they're
571 * still writable, don't point to nonsense,
572 * and pt_ptalloc leaves the directory
573 * and other data in a consistent state.
575 return r;
577 assert(pt->pt_pt[pde]);
579 assert(pt->pt_pt[pde]);
580 assert(pt->pt_dir[pde]);
581 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
584 return OK;
587 static const char *ptestr(u32_t pte)
589 #define FLAG(constant, name) { \
590 if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
593 static char str[30];
594 if(!(pte & ARCH_VM_PTE_PRESENT)) {
595 return "not present";
597 str[0] = '\0';
598 #if defined(__i386__)
599 FLAG(ARCH_VM_PTE_RW, "W");
600 #elif defined(__arm__)
601 if(pte & ARCH_VM_PTE_RO) {
602 strcat(str, "R ");
603 } else {
604 strcat(str, "W ");
606 #endif
607 FLAG(ARCH_VM_PTE_USER, "U");
608 #if defined(__i386__)
609 FLAG(I386_VM_PWT, "PWT");
610 FLAG(I386_VM_PCD, "PCD");
611 FLAG(I386_VM_ACC, "ACC");
612 FLAG(I386_VM_DIRTY, "DIRTY");
613 FLAG(I386_VM_PS, "PS");
614 FLAG(I386_VM_GLOBAL, "G");
615 FLAG(I386_VM_PTAVAIL1, "AV1");
616 FLAG(I386_VM_PTAVAIL2, "AV2");
617 FLAG(I386_VM_PTAVAIL3, "AV3");
618 #elif defined(__arm__)
619 FLAG(ARM_VM_PTE_SUPER, "S");
620 FLAG(ARM_VM_PTE_S, "SH");
621 FLAG(ARM_VM_PTE_WB, "WB");
622 FLAG(ARM_VM_PTE_WT, "WT");
623 #endif
625 return str;
628 /*===========================================================================*
629 * pt_map_in_range *
630 *===========================================================================*/
631 int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
632 vir_bytes start, vir_bytes end)
634 /* Transfer all the mappings from the pt of the source process to the pt of
635 * the destination process in the range specified.
637 int pde, pte;
638 vir_bytes viraddr;
639 pt_t *pt, *dst_pt;
641 pt = &src_vmp->vm_pt;
642 dst_pt = &dst_vmp->vm_pt;
644 end = end ? end : VM_DATATOP;
645 assert(start % VM_PAGE_SIZE == 0);
646 assert(end % VM_PAGE_SIZE == 0);
648 assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end);
649 assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
651 #if LU_DEBUG
652 printf("VM: pt_map_in_range: src = %d, dst = %d\n",
653 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
654 printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
655 start, ARCH_VM_PDE(start), ARCH_VM_PTE(start),
656 end, ARCH_VM_PDE(end), ARCH_VM_PTE(end));
657 #endif
659 /* Scan all page-table entries in the range. */
660 for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) {
661 pde = ARCH_VM_PDE(viraddr);
662 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
663 if(viraddr == VM_DATATOP) break;
664 continue;
666 pte = ARCH_VM_PTE(viraddr);
667 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
668 if(viraddr == VM_DATATOP) break;
669 continue;
672 /* Transfer the mapping. */
673 dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
674 assert(dst_pt->pt_pt[pde]);
676 if(viraddr == VM_DATATOP) break;
679 return OK;
682 /*===========================================================================*
683 * pt_ptmap *
684 *===========================================================================*/
685 int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
687 /* Transfer mappings to page dir and page tables from source process and
688 * destination process.
690 int pde, r;
691 phys_bytes physaddr;
692 vir_bytes viraddr;
693 pt_t *pt;
695 pt = &src_vmp->vm_pt;
697 #if LU_DEBUG
698 printf("VM: pt_ptmap: src = %d, dst = %d\n",
699 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
700 #endif
702 /* Transfer mapping to the page directory. */
703 viraddr = (vir_bytes) pt->pt_dir;
704 physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
705 #if defined(__i386__)
706 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
707 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
708 #elif defined(__arm__)
709 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
710 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
711 ARM_VM_PTE_CACHED ,
712 #endif
713 WMF_OVERWRITE)) != OK) {
714 return r;
716 #if LU_DEBUG
717 printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
718 viraddr, physaddr);
719 #endif
721 /* Scan all non-reserved page-directory entries. */
722 for(pde=0; pde < kern_start_pde; pde++) {
723 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
724 continue;
727 if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); }
729 /* Transfer mapping to the page table. */
730 viraddr = (vir_bytes) pt->pt_pt[pde];
731 #if defined(__i386__)
732 physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
733 #elif defined(__arm__)
734 physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
735 #endif
736 assert(viraddr);
737 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
738 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
739 #ifdef __arm__
740 | ARM_VM_PTE_CACHED
741 #endif
743 WMF_OVERWRITE)) != OK) {
744 return r;
748 return OK;
751 void pt_clearmapcache(void)
753 /* Make sure kernel will invalidate tlb when using current
754 * pagetable (i.e. vm's) to make new mappings before new cr3
755 * is loaded.
757 if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
758 panic("VMCTL_CLEARMAPCACHE failed");
761 int pt_writable(struct vmproc *vmp, vir_bytes v)
763 u32_t entry;
764 pt_t *pt = &vmp->vm_pt;
765 assert(!(v % VM_PAGE_SIZE));
766 int pde = ARCH_VM_PDE(v);
767 int pte = ARCH_VM_PTE(v);
769 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
770 assert(pt->pt_pt[pde]);
772 entry = pt->pt_pt[pde][pte];
774 #if defined(__i386__)
775 return((entry & PTF_WRITE) ? 1 : 0);
776 #elif defined(__arm__)
777 return((entry & ARCH_VM_PTE_RO) ? 0 : 1);
778 #endif
781 /*===========================================================================*
782 * pt_writemap *
783 *===========================================================================*/
784 int pt_writemap(struct vmproc * vmp,
785 pt_t *pt,
786 vir_bytes v,
787 phys_bytes physaddr,
788 size_t bytes,
789 u32_t flags,
790 u32_t writemapflags)
792 /* Write mapping into page table. Allocate a new page table if necessary. */
793 /* Page directory and table entries for this virtual address. */
794 int p, pages;
795 int verify = 0;
796 int ret = OK;
798 #ifdef CONFIG_SMP
799 int vminhibit_clear = 0;
800 /* FIXME
801 * don't do it everytime, stop the process only on the first change and
802 * resume the execution on the last change. Do in a wrapper of this
803 * function
805 if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
806 !(vmp->vm_flags & VMF_EXITING)) {
807 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
808 vminhibit_clear = 1;
810 #endif
812 if(writemapflags & WMF_VERIFY)
813 verify = 1;
815 assert(!(bytes % VM_PAGE_SIZE));
816 assert(!(flags & ~(PTF_ALLFLAGS)));
818 pages = bytes / VM_PAGE_SIZE;
820 /* MAP_NONE means to clear the mapping. It doesn't matter
821 * what's actually written into the PTE if PRESENT
822 * isn't on, so we can just write MAP_NONE into it.
824 assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
825 assert(physaddr != MAP_NONE || !flags);
827 /* First make sure all the necessary page tables are allocated,
828 * before we start writing in any of them, because it's a pain
829 * to undo our work properly.
831 ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
832 if(ret != OK) {
833 printf("VM: writemap: pt_ptalloc_in_range failed\n");
834 goto resume_exit;
837 /* Now write in them. */
838 for(p = 0; p < pages; p++) {
839 u32_t entry;
840 int pde = ARCH_VM_PDE(v);
841 int pte = ARCH_VM_PTE(v);
843 assert(!(v % VM_PAGE_SIZE));
844 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
845 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
847 /* Page table has to be there. */
848 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
850 /* We do not expect it to be a bigpage. */
851 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
853 /* Make sure page directory entry for this page table
854 * is marked present and page table entry is available.
856 assert(pt->pt_pt[pde]);
858 if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
859 #if defined(__i386__)
860 physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
861 #elif defined(__arm__)
862 physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
863 #endif
866 if(writemapflags & WMF_FREE) {
867 free_mem(ABS2CLICK(physaddr), 1);
870 /* Entry we will write. */
871 #if defined(__i386__)
872 entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
873 #elif defined(__arm__)
874 entry = (physaddr & ARM_VM_PTE_MASK) | flags;
875 #endif
877 if(verify) {
878 u32_t maskedentry;
879 maskedentry = pt->pt_pt[pde][pte];
880 #if defined(__i386__)
881 maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
882 #endif
883 /* Verify pagetable entry. */
884 #if defined(__i386__)
885 if(entry & ARCH_VM_PTE_RW) {
886 /* If we expect a writable page, allow a readonly page. */
887 maskedentry |= ARCH_VM_PTE_RW;
889 #elif defined(__arm__)
890 if(!(entry & ARCH_VM_PTE_RO)) {
891 /* If we expect a writable page, allow a readonly page. */
892 maskedentry &= ~ARCH_VM_PTE_RO;
894 maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT);
895 #endif
896 if(maskedentry != entry) {
897 printf("pt_writemap: mismatch: ");
898 #if defined(__i386__)
899 if((entry & ARCH_VM_ADDR_MASK) !=
900 (maskedentry & ARCH_VM_ADDR_MASK)) {
901 #elif defined(__arm__)
902 if((entry & ARM_VM_PTE_MASK) !=
903 (maskedentry & ARM_VM_PTE_MASK)) {
904 #endif
905 printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
906 (long)entry, (long)maskedentry);
907 } else printf("phys ok; ");
908 printf(" flags: found %s; ",
909 ptestr(pt->pt_pt[pde][pte]));
910 printf(" masked %s; ",
911 ptestr(maskedentry));
912 printf(" expected %s\n", ptestr(entry));
913 printf("found 0x%x, wanted 0x%x\n",
914 pt->pt_pt[pde][pte], entry);
915 ret = EFAULT;
916 goto resume_exit;
918 } else {
919 /* Write pagetable entry. */
920 pt->pt_pt[pde][pte] = entry;
923 physaddr += VM_PAGE_SIZE;
924 v += VM_PAGE_SIZE;
927 resume_exit:
929 #ifdef CONFIG_SMP
930 if (vminhibit_clear) {
931 assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
932 !(vmp->vm_flags & VMF_EXITING));
933 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
935 #endif
937 return ret;
940 /*===========================================================================*
941 * pt_checkrange *
942 *===========================================================================*/
943 int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes,
944 int write)
946 int p, pages;
948 assert(!(bytes % VM_PAGE_SIZE));
950 pages = bytes / VM_PAGE_SIZE;
952 for(p = 0; p < pages; p++) {
953 int pde = ARCH_VM_PDE(v);
954 int pte = ARCH_VM_PTE(v);
956 assert(!(v % VM_PAGE_SIZE));
957 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
958 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
960 /* Page table has to be there. */
961 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
962 return EFAULT;
964 /* Make sure page directory entry for this page table
965 * is marked present and page table entry is available.
967 assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);
969 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
970 return EFAULT;
973 #if defined(__i386__)
974 if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
975 #elif defined(__arm__)
976 if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
977 #endif
978 return EFAULT;
981 v += VM_PAGE_SIZE;
984 return OK;
987 /*===========================================================================*
988 * pt_new *
989 *===========================================================================*/
990 int pt_new(pt_t *pt)
992 /* Allocate a pagetable root. Allocate a page-aligned page directory
993 * and set them to 0 (indicating no page tables are allocated). Lookup
994 * its physical address as we'll need that in the future. Verify it's
995 * page-aligned.
997 int i, r;
999 /* Don't ever re-allocate/re-move a certain process slot's
1000 * page directory once it's been created. This is a fraction
1001 * faster, but also avoids having to invalidate the page
1002 * mappings from in-kernel page tables pointing to
1003 * the page directories (the page_directories data).
1005 if(!pt->pt_dir &&
1006 !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
1007 VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
1008 return ENOMEM;
1011 assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1013 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
1014 pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
1015 pt->pt_pt[i] = NULL;
1018 /* Where to start looking for free virtual address space? */
1019 pt->pt_virtop = 0;
1021 /* Map in kernel. */
1022 if((r=pt_mapkernel(pt)) != OK)
1023 return r;
1025 return OK;
1028 static int freepde(void)
1030 int p = kernel_boot_info.freepde_start++;
1031 assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES);
1032 return p;
1035 void pt_allocate_kernel_mapped_pagetables(void)
1037 /* Reserve PDEs available for mapping in the page directories. */
1038 int pd;
1039 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
1040 struct pdm *pdm = &pagedir_mappings[pd];
1041 if(!pdm->pdeno) {
1042 pdm->pdeno = freepde();
1043 assert(pdm->pdeno);
1045 phys_bytes ph;
1047 /* Allocate us a page table in which to
1048 * remember page directory pointers.
1050 if(!(pdm->page_directories =
1051 vm_allocpage(&ph, VMP_PAGETABLE))) {
1052 panic("no virt addr for vm mappings");
1054 memset(pdm->page_directories, 0, VM_PAGE_SIZE);
1055 pdm->phys = ph;
1057 #if defined(__i386__)
1058 pdm->val = (ph & ARCH_VM_ADDR_MASK) |
1059 ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
1060 #elif defined(__arm__)
1061 pdm->val = (ph & ARCH_VM_PDE_MASK)
1062 | ARCH_VM_PDE_PRESENT
1063 | ARM_VM_PTE_CACHED
1064 | ARM_VM_PDE_DOMAIN; //LSC FIXME
1065 #endif
1069 static void pt_copy(pt_t *dst, pt_t *src)
1071 int pde;
1072 for(pde=0; pde < kern_start_pde; pde++) {
1073 if(!(src->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
1074 continue;
1076 assert(!(src->pt_dir[pde] & ARCH_VM_BIGPAGE));
1077 if(!src->pt_pt[pde]) { panic("pde %d empty\n", pde); }
1078 if(pt_ptalloc(dst, pde, 0) != OK)
1079 panic("pt_ptalloc failed");
1080 memcpy(dst->pt_pt[pde], src->pt_pt[pde],
1081 ARCH_VM_PT_ENTRIES * sizeof(*dst->pt_pt[pde]));
1085 /*===========================================================================*
1086 * pt_init *
1087 *===========================================================================*/
1088 void pt_init(void)
1090 pt_t *newpt, newpt_dyn;
1091 int s, r, p;
1092 phys_bytes phys;
1093 vir_bytes sparepages_mem;
1094 #if defined(__arm__)
1095 vir_bytes sparepagedirs_mem;
1096 #endif
1097 static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
1098 int m = kernel_boot_info.kern_mod;
1099 #if defined(__i386__)
1100 int global_bit_ok = 0;
1101 u32_t mypdbr; /* Page Directory Base Register (cr3) value */
1102 #elif defined(__arm__)
1103 u32_t myttbr;
1104 #endif
1106 /* Find what the physical location of the kernel is. */
1107 assert(m >= 0);
1108 assert(m < kernel_boot_info.mods_with_kernel);
1109 assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
1110 kern_mb_mod = &kernel_boot_info.module_list[m];
1111 kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
1112 assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
1113 assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
1114 kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;
1116 /* Get ourselves spare pages. */
1117 sparepages_mem = (vir_bytes) static_sparepages;
1118 assert(!(sparepages_mem % VM_PAGE_SIZE));
1120 #if defined(__arm__)
1121 /* Get ourselves spare pagedirs. */
1122 sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
1123 assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
1124 #endif
1126 /* Spare pages are used to allocate memory before VM has its own page
1127 * table that things (i.e. arbitrary physical memory) can be mapped into.
1128 * We get it by pre-allocating it in our bss (allocated and mapped in by
1129 * the kernel) in static_sparepages. We also need the physical addresses
1130 * though; we look them up now so they are ready for use.
1132 #if defined(__arm__)
1133 missing_sparedirs = 0;
1134 assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS);
1135 for(s = 0; s < SPAREPAGEDIRS; s++) {
1136 vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
1137 phys_bytes ph;
1138 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1139 ARCH_PAGEDIR_SIZE, &ph)) != OK)
1140 panic("pt_init: sys_umap failed: %d", r);
1141 if(s >= STATIC_SPAREPAGEDIRS) {
1142 sparepagedirs[s].pagedir = NULL;
1143 missing_sparedirs++;
1144 continue;
1146 sparepagedirs[s].pagedir = (void *) v;
1147 sparepagedirs[s].phys = ph;
1149 #endif
1151 if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0)))
1152 panic("reservedqueue_new for single pages failed");
1154 assert(STATIC_SPAREPAGES < SPAREPAGES);
1155 for(s = 0; s < STATIC_SPAREPAGES; s++) {
1156 void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE);
1157 phys_bytes ph;
1158 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1159 VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
1160 panic("pt_init: sys_umap failed: %d", r);
1161 reservedqueue_add(spare_pagequeue, v, ph);
1164 #if defined(__i386__)
1165 /* global bit and 4MB pages available? */
1166 global_bit_ok = _cpufeature(_CPUF_I386_PGE);
1167 bigpage_ok = _cpufeature(_CPUF_I386_PSE);
1169 /* Set bit for PTE's and PDE's if available. */
1170 if(global_bit_ok)
1171 global_bit = I386_VM_GLOBAL;
1172 #endif
1174 /* Now reserve another pde for kernel's own mappings. */
1176 int kernmap_pde;
1177 phys_bytes addr, len;
1178 int flags, pindex = 0;
1179 u32_t offset = 0;
1181 kernmap_pde = freepde();
1182 offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
1184 while(sys_vmctl_get_mapping(pindex, &addr, &len,
1185 &flags) == OK) {
1186 int usedpde;
1187 vir_bytes vir;
1188 if(pindex >= MAX_KERNMAPPINGS)
1189 panic("VM: too many kernel mappings: %d", pindex);
1190 kern_mappings[pindex].phys_addr = addr;
1191 kern_mappings[pindex].len = len;
1192 kern_mappings[pindex].flags = flags;
1193 kern_mappings[pindex].vir_addr = offset;
1194 kern_mappings[pindex].flags =
1195 ARCH_VM_PTE_PRESENT;
1196 if(flags & VMMF_UNCACHED)
1197 #if defined(__i386__)
1198 kern_mappings[pindex].flags |= PTF_NOCACHE;
1199 #elif defined(__arm__)
1200 kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE;
1201 else {
1202 kern_mappings[pindex].flags |= ARM_VM_PTE_CACHED;
1204 #endif
1205 if(flags & VMMF_USER)
1206 kern_mappings[pindex].flags |= ARCH_VM_PTE_USER;
1207 #if defined(__arm__)
1208 else
1209 kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER;
1210 #endif
1211 if(flags & VMMF_WRITE)
1212 kern_mappings[pindex].flags |= ARCH_VM_PTE_RW;
1213 #if defined(__arm__)
1214 else
1215 kern_mappings[pindex].flags |= ARCH_VM_PTE_RO;
1216 #endif
1218 #if defined(__i386__)
1219 if(flags & VMMF_GLO)
1220 kern_mappings[pindex].flags |= I386_VM_GLOBAL;
1221 #endif
1223 if(addr % VM_PAGE_SIZE)
1224 panic("VM: addr unaligned: %lu", addr);
1225 if(len % VM_PAGE_SIZE)
1226 panic("VM: len unaligned: %lu", len);
1227 vir = offset;
1228 if(sys_vmctl_reply_mapping(pindex, vir) != OK)
1229 panic("VM: reply failed");
1230 offset += len;
1231 pindex++;
1232 kernmappings++;
1234 usedpde = ARCH_VM_PDE(offset);
1235 while(usedpde > kernmap_pde) {
1236 int newpde = freepde();
1237 assert(newpde == kernmap_pde+1);
1238 kernmap_pde = newpde;
1243 pt_allocate_kernel_mapped_pagetables();
1245 /* Allright. Now. We have to make our own page directory and page tables,
1246 * that the kernel has already set up, accessible to us. It's easier to
1247 * understand if we just copy all the required pages (i.e. page directory
1248 * and page tables), and set up the pointers as if VM had done it itself.
1250 * This allocation will happen without using any page table, and just
1251 * uses spare pages.
1253 newpt = &vmprocess->vm_pt;
1254 if(pt_new(newpt) != OK)
1255 panic("vm pt_new failed");
1257 /* Get our current pagedir so we can see it. */
1258 #if defined(__i386__)
1259 if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
1260 #elif defined(__arm__)
1261 if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
1262 #endif
1264 panic("VM: sys_vmctl_get_pdbr failed");
1265 #if defined(__i386__)
1266 if(sys_vircopy(NONE, mypdbr, SELF,
1267 (vir_bytes) currentpagedir, VM_PAGE_SIZE, 0) != OK)
1268 #elif defined(__arm__)
1269 if(sys_vircopy(NONE, myttbr, SELF,
1270 (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE, 0) != OK)
1271 #endif
1272 panic("VM: sys_vircopy failed");
1274 /* We have mapped in kernel ourselves; now copy mappings for VM
1275 * that kernel made, including allocations for BSS. Skip identity
1276 * mapping bits; just map in VM.
1278 for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
1279 u32_t entry = currentpagedir[p];
1280 phys_bytes ptaddr_kern, ptaddr_us;
1282 /* BIGPAGEs are kernel mapping (do ourselves) or boot
1283 * identity mapping (don't want).
1285 if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
1286 if((entry & ARCH_VM_BIGPAGE)) continue;
1288 if(pt_ptalloc(newpt, p, 0) != OK)
1289 panic("pt_ptalloc failed");
1290 assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);
1292 #if defined(__i386__)
1293 ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
1294 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
1295 #elif defined(__arm__)
1296 ptaddr_kern = entry & ARCH_VM_PDE_MASK;
1297 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
1298 #endif
1300 /* Copy kernel-initialized pagetable contents into our
1301 * normally accessible pagetable.
1303 if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
1304 panic("pt_init: abscopy failed");
1307 /* Inform kernel vm has a newly built page table. */
1308 assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
1309 pt_bind(newpt, &vmproc[VM_PROC_NR]);
1311 pt_init_done = 1;
1313 /* VM is now fully functional in that it can dynamically allocate memory
1314 * for itself.
1316 * We don't want to keep using the bootstrap statically allocated spare
1317 * pages though, as the physical addresses will change on liveupdate. So we
1318 * re-do part of the initialization now with purely dynamically allocated
1319 * memory. First throw out the static pool.
1321 * Then allocate the kernel-shared-pagetables and VM pagetables with dynamic
1322 * memory.
1325 alloc_cycle(); /* Make sure allocating works */
1326 while(vm_getsparepage(&phys)) ; /* Use up all static pages */
1327 alloc_cycle(); /* Refill spares with dynamic */
1328 pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */
1329 pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */
1330 pt_mapkernel(newpt); /* Rewrite pagetable info */
1332 /* Flush TLB just in case any of those mappings have been touched */
1333 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
1334 panic("VMCTL_FLUSHTLB failed");
1337 /* Recreate VM page table with dynamic-only allocations */
1338 memset(&newpt_dyn, 0, sizeof(newpt_dyn));
1339 pt_new(&newpt_dyn);
1340 pt_copy(&newpt_dyn, newpt);
1341 memcpy(newpt, &newpt_dyn, sizeof(*newpt));
1343 pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */
1344 pt_mapkernel(newpt); /* Rewrite pagetable info */
1346 /* Flush TLB just in case any of those mappings have been touched */
1347 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
1348 panic("VMCTL_FLUSHTLB failed");
1351 /* All OK. */
1352 return;
1355 /*===========================================================================*
1356 * pt_bind *
1357 *===========================================================================*/
1358 int pt_bind(pt_t *pt, struct vmproc *who)
1360 int procslot, pdeslot;
1361 u32_t phys;
1362 void *pdes;
1363 int pagedir_pde;
1364 int slots_per_pde;
1365 int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE;
1366 struct pdm *pdm;
1368 slots_per_pde = ARCH_VM_PT_ENTRIES / pages_per_pagedir;
1370 /* Basic sanity checks. */
1371 assert(who);
1372 assert(who->vm_flags & VMF_INUSE);
1373 assert(pt);
1375 procslot = who->vm_slot;
1376 pdm = &pagedir_mappings[procslot/slots_per_pde];
1377 pdeslot = procslot%slots_per_pde;
1378 pagedir_pde = pdm->pdeno;
1379 assert(pdeslot >= 0);
1380 assert(procslot < ELEMENTS(vmproc));
1381 assert(pdeslot < ARCH_VM_PT_ENTRIES / pages_per_pagedir);
1382 assert(pagedir_pde >= 0);
1384 #if defined(__i386__)
1385 phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
1386 #elif defined(__arm__)
1387 phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
1388 #endif
1389 assert(pt->pt_dir_phys == phys);
1390 assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1392 /* Update "page directory pagetable." */
1393 #if defined(__i386__)
1394 pdm->page_directories[pdeslot] =
1395 phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW;
1396 #elif defined(__arm__)
1398 int i;
1399 for (i = 0; i < pages_per_pagedir; i++) {
1400 pdm->page_directories[pdeslot*pages_per_pagedir+i] =
1401 (phys+i*VM_PAGE_SIZE)
1402 | ARCH_VM_PTE_PRESENT
1403 | ARCH_VM_PTE_RW
1404 | ARM_VM_PTE_CACHED
1405 | ARCH_VM_PTE_USER; //LSC FIXME
1408 #endif
1410 /* This is where the PDE's will be visible to the kernel
1411 * in its address space.
1413 pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE +
1414 #if defined(__i386__)
1415 pdeslot * VM_PAGE_SIZE);
1416 #elif defined(__arm__)
1417 pdeslot * ARCH_PAGEDIR_SIZE);
1418 #endif
1420 /* Tell kernel about new page table root. */
1421 return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys , pdes);
1424 /*===========================================================================*
1425 * pt_free *
1426 *===========================================================================*/
1427 void pt_free(pt_t *pt)
1429 /* Free memory associated with this pagetable. */
1430 int i;
1432 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++)
1433 if(pt->pt_pt[i])
1434 vm_freepages((vir_bytes) pt->pt_pt[i], 1);
1436 return;
1439 /*===========================================================================*
1440 * pt_mapkernel *
1441 *===========================================================================*/
1442 int pt_mapkernel(pt_t *pt)
1444 int i;
1445 int kern_pde = kern_start_pde;
1446 phys_bytes addr, mapped = 0;
1448 /* Any page table needs to map in the kernel address space. */
1449 assert(bigpage_ok);
1450 assert(kern_pde >= 0);
1452 /* pt_init() has made sure this is ok. */
1453 addr = kern_mb_mod->mod_start;
1455 /* Actually mapping in kernel */
1456 while(mapped < kern_size) {
1457 #if defined(__i386__)
1458 pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
1459 ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
1460 #elif defined(__arm__)
1461 pt->pt_dir[kern_pde] = (addr & ARM_VM_SECTION_MASK)
1462 | ARM_VM_SECTION
1463 | ARM_VM_SECTION_DOMAIN
1464 | ARM_VM_SECTION_CACHED
1465 | ARM_VM_SECTION_SUPER;
1466 #endif
1467 kern_pde++;
1468 mapped += ARCH_BIG_PAGE_SIZE;
1469 addr += ARCH_BIG_PAGE_SIZE;
1472 /* Kernel also wants to know about all page directories. */
1474 int pd;
1475 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) {
1476 struct pdm *pdm = &pagedir_mappings[pd];
1478 assert(pdm->pdeno > 0);
1479 assert(pdm->pdeno > kern_pde);
1480 pt->pt_dir[pdm->pdeno] = pdm->val;
1484 /* Kernel also wants various mappings of its own. */
1485 for(i = 0; i < kernmappings; i++) {
1486 int r;
1487 if((r=pt_writemap(NULL, pt,
1488 kern_mappings[i].vir_addr,
1489 kern_mappings[i].phys_addr,
1490 kern_mappings[i].len,
1491 kern_mappings[i].flags, 0)) != OK) {
1492 return r;
1497 return OK;
1500 int get_vm_self_pages(void) { return vm_self_pages; }