vm: replace phys avl by array
[minix.git] / servers / vm / arch / i386 / pagetable.c
blob19051299b9e5ad814703d730ebf19828dc382fe3
2 #define _SYSTEM 1
3 #define _POSIX_SOURCE 1
5 #include <minix/callnr.h>
6 #include <minix/com.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
9 #include <minix/ds.h>
10 #include <minix/endpoint.h>
11 #include <minix/keymap.h>
12 #include <minix/minlib.h>
13 #include <minix/type.h>
14 #include <minix/ipc.h>
15 #include <minix/sysutil.h>
16 #include <minix/syslib.h>
17 #include <minix/safecopies.h>
18 #include <minix/cpufeature.h>
19 #include <minix/bitmap.h>
20 #include <minix/debug.h>
22 #include <errno.h>
23 #include <stdlib.h>
24 #include <assert.h>
25 #include <string.h>
26 #include <env.h>
27 #include <stdio.h>
28 #include <fcntl.h>
29 #include <stdlib.h>
31 #include "proto.h"
32 #include "glo.h"
33 #include "util.h"
34 #include "vm.h"
35 #include "sanitycheck.h"
37 static int vm_self_pages;
39 /* PDE used to map in kernel, kernel physical address. */
40 static int pagedir_pde = -1;
41 static u32_t global_bit = 0, pagedir_pde_val;
43 static multiboot_module_t *kern_mb_mod = NULL;
44 static size_t kern_size = 0;
45 static int kern_start_pde = -1;
47 /* big page size available in hardware? */
48 static int bigpage_ok = 1;
50 /* Our process table entry. */
51 struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
53 /* Spare memory, ready to go after initialization, to avoid a
54 * circular dependency on allocating memory and writing it into VM's
55 * page table.
57 #if SANITYCHECKS
58 #define SPAREPAGES 100
59 #define STATIC_SPAREPAGES 90
60 #else
61 #define SPAREPAGES 20
62 #define STATIC_SPAREPAGES 15
63 #endif
65 #define SPAREPAGEDIRS 11
66 #define STATIC_SPAREPAGEDIRS 10
68 int missing_sparedirs = SPAREPAGEDIRS;
69 static struct {
70 void *pagedir;
71 phys_bytes phys;
72 } sparepagedirs[SPAREPAGEDIRS];
74 int missing_spares = SPAREPAGES;
75 static struct {
76 void *page;
77 phys_bytes phys;
78 } sparepages[SPAREPAGES];
80 extern char _end;
81 #define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
83 #define MAX_KERNMAPPINGS 10
84 static struct {
85 phys_bytes phys_addr; /* Physical addr. */
86 phys_bytes len; /* Length in bytes. */
87 vir_bytes vir_addr; /* Offset in page table. */
88 int flags;
89 } kern_mappings[MAX_KERNMAPPINGS];
90 int kernmappings = 0;
92 /* Clicks must be pages, as
93 * - they must be page aligned to map them
94 * - they must be a multiple of the page size
95 * - it's inconvenient to have them bigger than pages, because we often want
96 * just one page
97 * May as well require them to be equal then.
99 #if CLICK_SIZE != VM_PAGE_SIZE
100 #error CLICK_SIZE must be page size.
101 #endif
103 /* Page table that contains pointers to all page directories. */
104 phys_bytes page_directories_phys;
105 u32_t *page_directories = NULL;
107 static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES]
108 __aligned(VM_PAGE_SIZE);
110 #if defined(__arm__)
111 static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE);
112 #endif
114 #if SANITYCHECKS
115 /*===========================================================================*
116 * pt_sanitycheck *
117 *===========================================================================*/
118 void pt_sanitycheck(pt_t *pt, char *file, int line)
120 /* Basic pt sanity check. */
121 int slot;
123 MYASSERT(pt);
124 MYASSERT(pt->pt_dir);
125 MYASSERT(pt->pt_dir_phys);
127 for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
128 if(pt == &vmproc[slot].vm_pt)
129 break;
132 if(slot >= ELEMENTS(vmproc)) {
133 panic("pt_sanitycheck: passed pt not in any proc");
136 MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK);
138 #endif
140 /*===========================================================================*
141 * findhole *
142 *===========================================================================*/
143 static u32_t findhole(int pages)
145 /* Find a space in the virtual address space of VM. */
146 u32_t curv;
147 int pde = 0, try_restart;
148 static u32_t lastv = 0;
149 pt_t *pt = &vmprocess->vm_pt;
150 vir_bytes vmin, vmax;
151 #if defined(__arm__)
152 u32_t holev;
153 #endif
155 vmin = (vir_bytes) (&_end); /* marks end of VM BSS */
156 vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */
157 vmin &= ARCH_VM_ADDR_MASK;
158 vmax = VM_STACKTOP;
160 /* Input sanity check. */
161 assert(vmin + VM_PAGE_SIZE >= vmin);
162 assert(vmax >= vmin + VM_PAGE_SIZE);
163 assert((vmin % VM_PAGE_SIZE) == 0);
164 assert((vmax % VM_PAGE_SIZE) == 0);
165 #if defined(__arm__)
166 assert(pages > 0);
167 #endif
169 #if SANITYCHECKS
170 curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE);
171 curv *= VM_PAGE_SIZE;
172 curv += vmin;
173 #else
174 curv = lastv;
175 if(curv < vmin || curv >= vmax)
176 curv = vmin;
177 #endif
178 try_restart = 1;
180 /* Start looking for a free page starting at vmin. */
181 while(curv < vmax) {
182 int pte;
183 #if defined(__arm__)
184 int i, nohole;
185 #endif
187 assert(curv >= vmin);
188 assert(curv < vmax);
190 #if defined(__i386__)
191 pde = I386_VM_PDE(curv);
192 pte = I386_VM_PTE(curv);
193 #elif defined(__arm__)
194 holev = curv; /* the candidate hole */
195 nohole = 0;
196 for (i = 0; i < pages && !nohole; ++i) {
197 if(curv >= vmax) {
198 break;
200 #endif
202 #if defined(__i386__)
203 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) ||
204 !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) {
205 #elif defined(__arm__)
206 pde = ARM_VM_PDE(curv);
207 pte = ARM_VM_PTE(curv);
209 /* if page present, no hole */
210 if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) &&
211 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT))
212 nohole = 1;
214 /* if not contiguous, no hole */
215 if (curv != holev + i * VM_PAGE_SIZE)
216 nohole = 1;
218 curv+=VM_PAGE_SIZE;
221 /* there's a large enough hole */
222 if (!nohole && i == pages) {
223 #endif
224 lastv = curv;
225 #if defined(__i386__)
226 return curv;
227 #elif defined(__arm__)
228 return holev;
229 #endif
232 #if defined(__i386__)
233 curv+=VM_PAGE_SIZE;
235 #elif defined(__arm__)
236 /* Reset curv */
237 #endif
238 if(curv >= vmax && try_restart) {
239 curv = vmin;
240 try_restart = 0;
244 printf("VM: out of virtual address space in vm\n");
246 return NO_MEM;
249 /*===========================================================================*
250 * vm_freepages *
251 *===========================================================================*/
252 void vm_freepages(vir_bytes vir, int pages)
254 assert(!(vir % VM_PAGE_SIZE));
256 if(is_staticaddr(vir)) {
257 printf("VM: not freeing static page\n");
258 return;
261 if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
262 MAP_NONE, pages*VM_PAGE_SIZE, 0,
263 WMF_OVERWRITE | WMF_FREE) != OK)
264 panic("vm_freepages: pt_writemap failed");
266 vm_self_pages--;
268 #if SANITYCHECKS
269 /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
270 * always trapped, also if not in tlb.
272 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
273 panic("VMCTL_FLUSHTLB failed");
275 #endif
278 /*===========================================================================*
279 * vm_getsparepage *
280 *===========================================================================*/
281 static void *vm_getsparepage(phys_bytes *phys)
283 int s;
284 assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
285 for(s = 0; s < SPAREPAGES; s++) {
286 if(sparepages[s].page) {
287 void *sp;
288 sp = sparepages[s].page;
289 *phys = sparepages[s].phys;
290 sparepages[s].page = NULL;
291 missing_spares++;
292 assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
293 return sp;
296 printf("no spare found, %d missing\n", missing_spares);
297 return NULL;
300 /*===========================================================================*
301 * vm_getsparepagedir *
302 *===========================================================================*/
303 static void *vm_getsparepagedir(phys_bytes *phys)
305 int s;
306 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
307 for(s = 0; s < SPAREPAGEDIRS; s++) {
308 if(sparepagedirs[s].pagedir) {
309 void *sp;
310 sp = sparepagedirs[s].pagedir;
311 *phys = sparepagedirs[s].phys;
312 sparepagedirs[s].pagedir = NULL;
313 missing_sparedirs++;
314 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
315 return sp;
318 return NULL;
321 /*===========================================================================*
322 * vm_checkspares *
323 *===========================================================================*/
324 static void *vm_checkspares(void)
326 int s, n = 0;
327 static int total = 0, worst = 0;
328 assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
329 for(s = 0; s < SPAREPAGES && missing_spares > 0; s++) {
330 if(!sparepages[s].page) {
331 n++;
332 if((sparepages[s].page = vm_allocpage(&sparepages[s].phys,
333 VMP_SPARE))) {
334 missing_spares--;
335 assert(missing_spares >= 0);
336 assert(missing_spares <= SPAREPAGES);
337 } else {
338 printf("VM: warning: couldn't get new spare page\n");
342 if(worst < n) worst = n;
343 total += n;
345 return NULL;
348 #if defined(__arm__)
349 /*===========================================================================*
350 * vm_checksparedirs *
351 *===========================================================================*/
352 static void *vm_checksparedirs(void)
354 int s, n = 0;
355 static int total = 0, worst = 0;
356 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
357 for(s = 0; s < SPAREPAGEDIRS && missing_sparedirs > 0; s++)
358 if(!sparepagedirs[s].pagedir) {
359 n++;
360 if((sparepagedirs[s].pagedir = vm_allocpage(&sparepagedirs[s].phys,
361 VMP_SPARE))) {
362 missing_sparedirs--;
363 assert(missing_sparedirs >= 0);
364 assert(missing_sparedirs <= SPAREPAGEDIRS);
365 } else {
366 printf("VM: warning: couldn't get new spare pagedir\n");
369 if(worst < n) worst = n;
370 total += n;
372 return NULL;
374 #endif
376 static int pt_init_done;
378 /*===========================================================================*
379 * vm_allocpage *
380 *===========================================================================*/
381 void *vm_allocpages(phys_bytes *phys, int reason, int pages)
383 /* Allocate a page for use by VM itself. */
384 phys_bytes newpage;
385 vir_bytes loc;
386 pt_t *pt;
387 int r;
388 static int level = 0;
389 void *ret;
390 u32_t mem_flags = 0;
392 pt = &vmprocess->vm_pt;
393 assert(reason >= 0 && reason < VMP_CATEGORIES);
395 assert(pages > 0);
397 level++;
399 assert(level >= 1);
400 assert(level <= 2);
402 if((level > 1) || !pt_init_done) {
403 void *s;
405 s=vm_getsparepage(phys);
407 if(pages == 1) s=vm_getsparepage(phys);
408 else if(pages == 4) s=vm_getsparepagedir(phys);
409 else panic("%d pages", pages);
411 level--;
412 if(!s) {
413 util_stacktrace();
414 printf("VM: warning: out of spare pages\n");
416 if(!is_staticaddr(s)) vm_self_pages++;
417 return s;
420 #if defined(__arm__)
421 if (reason == VMP_PAGEDIR) {
422 mem_flags |= PAF_ALIGN16K;
424 #endif
426 /* VM does have a pagetable, so get a page and map it in there.
427 * Where in our virtual address space can we put it?
429 loc = findhole(pages);
430 if(loc == NO_MEM) {
431 level--;
432 printf("VM: vm_allocpage: findhole failed\n");
433 return NULL;
436 /* Allocate page of memory for use by VM. As VM
437 * is trusted, we don't have to pre-clear it.
439 if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) {
440 level--;
441 printf("VM: vm_allocpage: alloc_mem failed\n");
442 return NULL;
445 *phys = CLICK2ABS(newpage);
447 /* Map this page into our address space. */
448 if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
449 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
450 #if defined(__arm__)
451 | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
452 #endif
453 , 0)) != OK) {
454 free_mem(newpage, pages);
455 printf("vm_allocpage writemap failed\n");
456 level--;
457 return NULL;
460 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
461 panic("VMCTL_FLUSHTLB failed: %d", r);
464 level--;
466 /* Return user-space-ready pointer to it. */
467 ret = (void *) loc;
469 vm_self_pages++;
470 return ret;
473 void *vm_allocpage(phys_bytes *phys, int reason)
475 return vm_allocpages(phys, reason, 1);
478 /*===========================================================================*
479 * vm_pagelock *
480 *===========================================================================*/
481 void vm_pagelock(void *vir, int lockflag)
483 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
484 vir_bytes m = (vir_bytes) vir;
485 int r;
486 u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER;
487 pt_t *pt;
489 pt = &vmprocess->vm_pt;
491 assert(!(m % VM_PAGE_SIZE));
493 if(!lockflag)
494 flags |= ARCH_VM_PTE_RW;
495 #if defined(__arm__)
496 else
497 flags |= ARCH_VM_PTE_RO;
498 flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
499 #endif
501 /* Update flags. */
502 if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE,
503 flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
504 panic("vm_lockpage: pt_writemap failed");
507 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
508 panic("VMCTL_FLUSHTLB failed: %d", r);
511 return;
514 /*===========================================================================*
515 * vm_addrok *
516 *===========================================================================*/
517 int vm_addrok(void *vir, int writeflag)
519 pt_t *pt = &vmprocess->vm_pt;
520 int pde, pte;
521 vir_bytes v = (vir_bytes) vir;
523 #if defined(__i386__)
524 pde = I386_VM_PDE(v);
525 pte = I386_VM_PTE(v);
526 #elif defined(__arm__)
527 pde = ARM_VM_PDE(v);
528 pte = ARM_VM_PTE(v);
529 #endif
531 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
532 printf("addr not ok: missing pde %d\n", pde);
533 return 0;
536 #if defined(__i386__)
537 if(writeflag &&
538 !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) {
539 printf("addr not ok: pde %d present but pde unwritable\n", pde);
540 return 0;
543 #endif
544 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
545 printf("addr not ok: missing pde %d / pte %d\n",
546 pde, pte);
547 return 0;
550 #if defined(__i386__)
551 if(writeflag &&
552 !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
553 printf("addr not ok: pde %d / pte %d present but unwritable\n",
554 #elif defined(__arm__)
555 if(!writeflag &&
556 !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
557 printf("addr not ok: pde %d / pte %d present but writable\n",
558 #endif
559 pde, pte);
560 return 0;
563 return 1;
566 /*===========================================================================*
567 * pt_ptalloc *
568 *===========================================================================*/
569 static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
571 /* Allocate a page table and write its address into the page directory. */
572 int i;
573 phys_bytes pt_phys;
575 /* Argument must make sense. */
576 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
577 assert(!(flags & ~(PTF_ALLFLAGS)));
579 /* We don't expect to overwrite page directory entry, nor
580 * storage for the page table.
582 assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT));
583 assert(!pt->pt_pt[pde]);
585 /* Get storage for the page table. */
586 if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
587 return ENOMEM;
589 for(i = 0; i < ARCH_VM_PT_ENTRIES; i++)
590 pt->pt_pt[pde][i] = 0; /* Empty entry. */
592 /* Make page directory entry.
593 * The PDE is always 'present,' 'writable,' and 'user accessible,'
594 * relying on the PTE for protection.
596 #if defined(__i386__)
597 pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags
598 | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
599 #elif defined(__arm__)
600 pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
601 | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
602 #endif
604 return OK;
607 /*===========================================================================*
608 * pt_ptalloc_in_range *
609 *===========================================================================*/
610 int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
611 u32_t flags, int verify)
613 /* Allocate all the page tables in the range specified. */
614 int pde, first_pde, last_pde;
616 #if defined(__i386__)
617 first_pde = I386_VM_PDE(start);
618 last_pde = I386_VM_PDE(end-1);
619 #elif defined(__arm__)
620 first_pde = ARM_VM_PDE(start);
621 last_pde = ARM_VM_PDE(end-1);
622 #endif
623 assert(first_pde >= 0);
624 assert(last_pde < ARCH_VM_DIR_ENTRIES);
626 /* Scan all page-directory entries in the range. */
627 for(pde = first_pde; pde <= last_pde; pde++) {
628 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
629 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
630 int r;
631 if(verify) {
632 printf("pt_ptalloc_in_range: no pde %d\n", pde);
633 return EFAULT;
635 assert(!pt->pt_dir[pde]);
636 if((r=pt_ptalloc(pt, pde, flags)) != OK) {
637 /* Couldn't do (complete) mapping.
638 * Don't bother freeing any previously
639 * allocated page tables, they're
640 * still writable, don't point to nonsense,
641 * and pt_ptalloc leaves the directory
642 * and other data in a consistent state.
644 return r;
647 assert(pt->pt_dir[pde]);
648 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
651 return OK;
654 static char *ptestr(u32_t pte)
656 #define FLAG(constant, name) { \
657 if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
660 static char str[30];
661 if(!(pte & ARCH_VM_PTE_PRESENT)) {
662 return "not present";
664 str[0] = '\0';
665 #if defined(__i386__)
666 FLAG(ARCH_VM_PTE_RW, "W");
667 #elif defined(__arm__)
668 if(pte & ARCH_VM_PTE_RO) {
669 strcat(str, "R ");
670 } else {
671 strcat(str, "W ");
673 #endif
674 FLAG(ARCH_VM_PTE_USER, "U");
675 #if defined(__i386__)
676 FLAG(I386_VM_PWT, "PWT");
677 FLAG(I386_VM_PCD, "PCD");
678 FLAG(I386_VM_ACC, "ACC");
679 FLAG(I386_VM_DIRTY, "DIRTY");
680 FLAG(I386_VM_PS, "PS");
681 FLAG(I386_VM_GLOBAL, "G");
682 FLAG(I386_VM_PTAVAIL1, "AV1");
683 FLAG(I386_VM_PTAVAIL2, "AV2");
684 FLAG(I386_VM_PTAVAIL3, "AV3");
685 #elif defined(__arm__)
686 FLAG(ARM_VM_PTE_SUPER, "S");
687 FLAG(ARM_VM_PTE_SHAREABLE, "SH");
688 FLAG(ARM_VM_PTE_WB, "WB");
689 FLAG(ARM_VM_PTE_WT, "WT");
690 #endif
692 return str;
695 /*===========================================================================*
696 * pt_map_in_range *
697 *===========================================================================*/
698 int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
699 vir_bytes start, vir_bytes end)
701 /* Transfer all the mappings from the pt of the source process to the pt of
702 * the destination process in the range specified.
704 int pde, pte;
705 vir_bytes viraddr;
706 pt_t *pt, *dst_pt;
708 pt = &src_vmp->vm_pt;
709 dst_pt = &dst_vmp->vm_pt;
711 end = end ? end : VM_DATATOP;
712 assert(start % VM_PAGE_SIZE == 0);
713 assert(end % VM_PAGE_SIZE == 0);
714 #if defined(__i386__)
715 assert(start <= end);
716 assert(I386_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
717 #elif defined(__arm__)
718 assert(ARM_VM_PDE(start) >= 0 && start <= end);
719 assert(ARM_VM_PDE(end) < ARCH_VM_DIR_ENTRIES);
720 #endif
722 #if LU_DEBUG
723 printf("VM: pt_map_in_range: src = %d, dst = %d\n",
724 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
725 printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
726 #if defined(__i386__)
727 start, I386_VM_PDE(start), I386_VM_PTE(start),
728 end, I386_VM_PDE(end), I386_VM_PTE(end));
729 #elif defined(__arm__)
730 start, ARM_VM_PDE(start), ARM_VM_PTE(start),
731 end, ARM_VM_PDE(end), ARM_VM_PTE(end));
732 #endif
733 #endif
735 /* Scan all page-table entries in the range. */
736 for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) {
737 #if defined(__i386__)
738 pde = I386_VM_PDE(viraddr);
739 #elif defined(__arm__)
740 pde = ARM_VM_PDE(viraddr);
741 #endif
742 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
743 if(viraddr == VM_DATATOP) break;
744 continue;
746 #if defined(__i386__)
747 pte = I386_VM_PTE(viraddr);
748 #elif defined(__arm__)
749 pte = ARM_VM_PTE(viraddr);
750 #endif
751 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
752 if(viraddr == VM_DATATOP) break;
753 continue;
756 /* Transfer the mapping. */
757 dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
759 if(viraddr == VM_DATATOP) break;
762 return OK;
765 /*===========================================================================*
766 * pt_ptmap *
767 *===========================================================================*/
768 int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
770 /* Transfer mappings to page dir and page tables from source process and
771 * destination process. Make sure all the mappings are above the stack, not
772 * to corrupt valid mappings in the data segment of the destination process.
774 int pde, r;
775 phys_bytes physaddr;
776 vir_bytes viraddr;
777 pt_t *pt;
779 pt = &src_vmp->vm_pt;
781 #if LU_DEBUG
782 printf("VM: pt_ptmap: src = %d, dst = %d\n",
783 src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
784 #endif
786 /* Transfer mapping to the page directory. */
787 viraddr = (vir_bytes) pt->pt_dir;
788 physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
789 #if defined(__i386__)
790 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
791 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
792 #elif defined(__arm__)
793 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
794 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
795 ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
796 #endif
797 WMF_OVERWRITE)) != OK) {
798 return r;
800 #if LU_DEBUG
801 printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
802 viraddr, physaddr);
803 #endif
805 /* Scan all non-reserved page-directory entries. */
806 for(pde=0; pde < ARCH_VM_DIR_ENTRIES; pde++) {
807 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) {
808 continue;
811 /* Transfer mapping to the page table. */
812 viraddr = (vir_bytes) pt->pt_pt[pde];
813 #if defined(__i386__)
814 physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK;
815 #elif defined(__arm__)
816 physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK;
817 #endif
818 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
819 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
820 #ifdef __arm__
821 | ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
822 ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
823 #endif
825 WMF_OVERWRITE)) != OK) {
826 return r;
830 return OK;
833 void pt_clearmapcache(void)
835 /* Make sure kernel will invalidate tlb when using current
836 * pagetable (i.e. vm's) to make new mappings before new cr3
837 * is loaded.
839 if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
840 panic("VMCTL_CLEARMAPCACHE failed");
843 /*===========================================================================*
844 * pt_writemap *
845 *===========================================================================*/
846 int pt_writemap(struct vmproc * vmp,
847 pt_t *pt,
848 vir_bytes v,
849 phys_bytes physaddr,
850 size_t bytes,
851 u32_t flags,
852 u32_t writemapflags)
854 /* Write mapping into page table. Allocate a new page table if necessary. */
855 /* Page directory and table entries for this virtual address. */
856 int p, pages;
857 int verify = 0;
858 int ret = OK;
860 #ifdef CONFIG_SMP
861 int vminhibit_clear = 0;
862 /* FIXME
863 * don't do it everytime, stop the process only on the first change and
864 * resume the execution on the last change. Do in a wrapper of this
865 * function
867 if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
868 !(vmp->vm_flags & VMF_EXITING)) {
869 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
870 vminhibit_clear = 1;
872 #endif
874 if(writemapflags & WMF_VERIFY)
875 verify = 1;
877 assert(!(bytes % VM_PAGE_SIZE));
878 assert(!(flags & ~(PTF_ALLFLAGS)));
880 pages = bytes / VM_PAGE_SIZE;
882 /* MAP_NONE means to clear the mapping. It doesn't matter
883 * what's actually written into the PTE if PRESENT
884 * isn't on, so we can just write MAP_NONE into it.
886 assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT));
887 assert(physaddr != MAP_NONE || !flags);
889 /* First make sure all the necessary page tables are allocated,
890 * before we start writing in any of them, because it's a pain
891 * to undo our work properly.
893 ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify);
894 if(ret != OK) {
895 printf("VM: writemap: pt_ptalloc_in_range failed\n");
896 goto resume_exit;
899 /* Now write in them. */
900 for(p = 0; p < pages; p++) {
901 u32_t entry;
902 #if defined(__i386__)
903 int pde = I386_VM_PDE(v);
904 int pte = I386_VM_PTE(v);
905 #elif defined(__arm__)
906 int pde = ARM_VM_PDE(v);
907 int pte = ARM_VM_PTE(v);
908 #endif
910 assert(!(v % VM_PAGE_SIZE));
911 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
912 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
914 /* Page table has to be there. */
915 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT);
917 /* We do not expect it to be a bigpage. */
918 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE));
920 /* Make sure page directory entry for this page table
921 * is marked present and page table entry is available.
923 assert(pt->pt_pt[pde]);
925 #if SANITYCHECKS
926 /* We don't expect to overwrite a page. */
927 if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
928 assert(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT));
929 #endif
930 if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
931 #if defined(__i386__)
932 physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK;
933 #elif defined(__arm__)
934 physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
935 #endif
938 if(writemapflags & WMF_FREE) {
939 free_mem(ABS2CLICK(physaddr), 1);
942 /* Entry we will write. */
943 #if defined(__i386__)
944 entry = (physaddr & ARCH_VM_ADDR_MASK) | flags;
945 #elif defined(__arm__)
946 entry = (physaddr & ARM_VM_PTE_MASK) | flags;
947 #endif
949 if(verify) {
950 u32_t maskedentry;
951 maskedentry = pt->pt_pt[pde][pte];
952 #if defined(__i386__)
953 maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY);
954 #endif
955 /* Verify pagetable entry. */
956 if(entry & ARCH_VM_PTE_RW) {
957 /* If we expect a writable page, allow a readonly page. */
958 maskedentry |= ARCH_VM_PTE_RW;
960 if(maskedentry != entry) {
961 printf("pt_writemap: mismatch: ");
962 #if defined(__i386__)
963 if((entry & ARCH_VM_ADDR_MASK) !=
964 (maskedentry & ARCH_VM_ADDR_MASK)) {
965 #elif defined(__arm__)
966 if((entry & ARM_VM_PTE_MASK) !=
967 (maskedentry & ARM_VM_PTE_MASK)) {
968 #endif
969 printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
970 (long)entry, (long)maskedentry);
971 } else printf("phys ok; ");
972 printf(" flags: found %s; ",
973 ptestr(pt->pt_pt[pde][pte]));
974 printf(" masked %s; ",
975 ptestr(maskedentry));
976 printf(" expected %s\n", ptestr(entry));
977 ret = EFAULT;
978 goto resume_exit;
980 } else {
981 /* Write pagetable entry. */
982 pt->pt_pt[pde][pte] = entry;
985 physaddr += VM_PAGE_SIZE;
986 v += VM_PAGE_SIZE;
989 resume_exit:
991 #ifdef CONFIG_SMP
992 if (vminhibit_clear) {
993 assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
994 !(vmp->vm_flags & VMF_EXITING));
995 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
997 #endif
999 return ret;
1002 /*===========================================================================*
1003 * pt_checkrange *
1004 *===========================================================================*/
1005 int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes,
1006 int write)
1008 int p, pages;
1010 assert(!(bytes % VM_PAGE_SIZE));
1012 pages = bytes / VM_PAGE_SIZE;
1014 for(p = 0; p < pages; p++) {
1015 #if defined(__i386__)
1016 int pde = I386_VM_PDE(v);
1017 int pte = I386_VM_PTE(v);
1018 #elif defined(__arm__)
1019 int pde = ARM_VM_PDE(v);
1020 int pte = ARM_VM_PTE(v);
1021 #endif
1023 assert(!(v % VM_PAGE_SIZE));
1024 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES);
1025 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES);
1027 /* Page table has to be there. */
1028 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT))
1029 return EFAULT;
1031 /* Make sure page directory entry for this page table
1032 * is marked present and page table entry is available.
1034 assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]);
1036 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
1037 return EFAULT;
1040 #if defined(__i386__)
1041 if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
1042 #elif defined(__arm__)
1043 if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
1044 #endif
1045 return EFAULT;
1048 v += VM_PAGE_SIZE;
1051 return OK;
1054 /*===========================================================================*
1055 * pt_new *
1056 *===========================================================================*/
1057 int pt_new(pt_t *pt)
1059 /* Allocate a pagetable root. Allocate a page-aligned page directory
1060 * and set them to 0 (indicating no page tables are allocated). Lookup
1061 * its physical address as we'll need that in the future. Verify it's
1062 * page-aligned.
1064 int i, r;
1066 /* Don't ever re-allocate/re-move a certain process slot's
1067 * page directory once it's been created. This is a fraction
1068 * faster, but also avoids having to invalidate the page
1069 * mappings from in-kernel page tables pointing to
1070 * the page directories (the page_directories data).
1072 if(!pt->pt_dir &&
1073 !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys,
1074 VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) {
1075 return ENOMEM;
1078 assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1080 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) {
1081 pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */
1082 pt->pt_pt[i] = NULL;
1085 /* Where to start looking for free virtual address space? */
1086 pt->pt_virtop = 0;
1088 /* Map in kernel. */
1089 if((r=pt_mapkernel(pt)) != OK)
1090 return r;
1092 return OK;
1095 static int freepde(void)
1097 int p = kernel_boot_info.freepde_start++;
1098 assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES);
1099 return p;
1102 /*===========================================================================*
1103 * pt_init *
1104 *===========================================================================*/
1105 void pt_init(void)
1107 pt_t *newpt;
1108 int s, r, p;
1109 int global_bit_ok = 0;
1110 vir_bytes sparepages_mem;
1111 #if defined(__arm__)
1112 vir_bytes sparepagedirs_mem;
1113 #endif
1114 static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES];
1115 int m = kernel_boot_info.kern_mod;
1116 #if defined(__i386__)
1117 u32_t mypdbr; /* Page Directory Base Register (cr3) value */
1118 #elif defined(__arm__)
1119 u32_t myttbr;
1120 #endif
1122 /* Find what the physical location of the kernel is. */
1123 assert(m >= 0);
1124 assert(m < kernel_boot_info.mods_with_kernel);
1125 assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
1126 kern_mb_mod = &kernel_boot_info.module_list[m];
1127 kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
1128 assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE));
1129 assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE));
1130 kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE;
1132 /* Get ourselves spare pages. */
1133 sparepages_mem = (vir_bytes) static_sparepages;
1134 assert(!(sparepages_mem % VM_PAGE_SIZE));
1136 #if defined(__arm__)
1137 /* Get ourselves spare pagedirs. */
1138 sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
1139 assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE));
1140 #endif
1142 /* Spare pages are used to allocate memory before VM has its own page
1143 * table that things (i.e. arbitrary physical memory) can be mapped into.
1144 * We get it by pre-allocating it in our bss (allocated and mapped in by
1145 * the kernel) in static_sparepages. We also need the physical addresses
1146 * though; we look them up now so they are ready for use.
1148 #if defined(__arm__)
1149 missing_sparedirs = 0;
1150 assert(STATIC_SPAREPAGEDIRS < SPAREPAGEDIRS);
1151 for(s = 0; s < SPAREPAGEDIRS; s++) {
1152 vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);;
1153 phys_bytes ph;
1154 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1155 ARCH_PAGEDIR_SIZE, &ph)) != OK)
1156 panic("pt_init: sys_umap failed: %d", r);
1157 if(s >= STATIC_SPAREPAGEDIRS) {
1158 sparepagedirs[s].pagedir = NULL;
1159 missing_sparedirs++;
1160 continue;
1162 sparepagedirs[s].pagedir = (void *) v;
1163 sparepagedirs[s].phys = ph;
1165 #endif
1167 missing_spares = 0;
1168 assert(STATIC_SPAREPAGES < SPAREPAGES);
1169 for(s = 0; s < SPAREPAGES; s++) {
1170 vir_bytes v = (sparepages_mem + s*VM_PAGE_SIZE);;
1171 phys_bytes ph;
1172 if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
1173 VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
1174 panic("pt_init: sys_umap failed: %d", r);
1175 if(s >= STATIC_SPAREPAGES) {
1176 sparepages[s].page = NULL;
1177 missing_spares++;
1178 continue;
1180 sparepages[s].page = (void *) v;
1181 sparepages[s].phys = ph;
1184 #if defined(__i386__)
1185 /* global bit and 4MB pages available? */
1186 global_bit_ok = _cpufeature(_CPUF_I386_PGE);
1187 bigpage_ok = _cpufeature(_CPUF_I386_PSE);
1189 /* Set bit for PTE's and PDE's if available. */
1190 if(global_bit_ok)
1191 global_bit = I386_VM_GLOBAL;
1192 #endif
1194 /* Allocate us a page table in which to remember page directory
1195 * pointers.
1197 if(!(page_directories = vm_allocpage(&page_directories_phys,
1198 VMP_PAGETABLE)))
1199 panic("no virt addr for vm mappings");
1201 memset(page_directories, 0, VM_PAGE_SIZE);
1203 /* Now reserve another pde for kernel's own mappings. */
1205 int kernmap_pde;
1206 phys_bytes addr, len;
1207 int flags, index = 0;
1208 u32_t offset = 0;
1210 kernmap_pde = freepde();
1211 offset = kernmap_pde * ARCH_BIG_PAGE_SIZE;
1213 while(sys_vmctl_get_mapping(index, &addr, &len,
1214 &flags) == OK) {
1215 vir_bytes vir;
1216 if(index >= MAX_KERNMAPPINGS)
1217 panic("VM: too many kernel mappings: %d", index);
1218 kern_mappings[index].phys_addr = addr;
1219 kern_mappings[index].len = len;
1220 kern_mappings[index].flags = flags;
1221 #if defined(__i386__)
1222 kern_mappings[index].vir_addr = offset;
1223 #elif defined(__arm__)
1224 kern_mappings[index].vir_addr = addr;
1225 #endif
1226 kern_mappings[index].flags =
1227 ARCH_VM_PTE_PRESENT;
1228 if(flags & VMMF_UNCACHED)
1229 #if defined(__i386__)
1230 kern_mappings[index].flags |= PTF_NOCACHE;
1231 #elif defined(__arm__)
1232 kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
1233 else
1234 kern_mappings[index].flags |=
1235 ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
1236 #endif
1237 if(flags & VMMF_USER)
1238 kern_mappings[index].flags |= ARCH_VM_PTE_USER;
1239 #if defined(__arm__)
1240 else
1241 kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
1242 #endif
1243 if(flags & VMMF_WRITE)
1244 kern_mappings[index].flags |= ARCH_VM_PTE_RW;
1245 #if defined(__i386__)
1246 if(flags & VMMF_GLO)
1247 kern_mappings[index].flags |= I386_VM_GLOBAL;
1248 #elif defined(__arm__)
1249 else
1250 kern_mappings[index].flags |= ARCH_VM_PTE_RO;
1251 #endif
1252 if(addr % VM_PAGE_SIZE)
1253 panic("VM: addr unaligned: %d", addr);
1254 if(len % VM_PAGE_SIZE)
1255 panic("VM: len unaligned: %d", len);
1256 vir = offset;
1257 if(sys_vmctl_reply_mapping(index, vir) != OK)
1258 panic("VM: reply failed");
1259 offset += len;
1260 index++;
1261 kernmappings++;
1265 /* Find a PDE below processes available for mapping in the
1266 * page directories.
1268 pagedir_pde = freepde();
1269 #if defined(__i386__)
1270 pagedir_pde_val = (page_directories_phys & ARCH_VM_ADDR_MASK) |
1271 ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
1272 #elif defined(__arm__)
1273 pagedir_pde_val = (page_directories_phys & ARCH_VM_PDE_MASK) |
1274 ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
1275 #endif
1277 /* Allright. Now. We have to make our own page directory and page tables,
1278 * that the kernel has already set up, accessible to us. It's easier to
1279 * understand if we just copy all the required pages (i.e. page directory
1280 * and page tables), and set up the pointers as if VM had done it itself.
1282 * This allocation will happen without using any page table, and just
1283 * uses spare pages.
1285 newpt = &vmprocess->vm_pt;
1286 if(pt_new(newpt) != OK)
1287 panic("vm pt_new failed");
1289 /* Get our current pagedir so we can see it. */
1290 #if defined(__i386__)
1291 if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK)
1292 #elif defined(__arm__)
1293 if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
1294 #endif
1295 panic("VM: sys_vmctl_get_pdbr failed");
1296 #if defined(__i386__)
1297 if(sys_vircopy(NONE, mypdbr, SELF,
1298 (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK)
1299 #elif defined(__arm__)
1300 if(sys_vircopy(NONE, myttbr, SELF,
1301 (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK)
1302 #endif
1303 panic("VM: sys_vircopy failed");
1305 /* We have mapped in kernel ourselves; now copy mappings for VM
1306 * that kernel made, including allocations for BSS. Skip identity
1307 * mapping bits; just map in VM.
1309 for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) {
1310 u32_t entry = currentpagedir[p];
1311 phys_bytes ptaddr_kern, ptaddr_us;
1313 /* BIGPAGEs are kernel mapping (do ourselves) or boot
1314 * identity mapping (don't want).
1316 if(!(entry & ARCH_VM_PDE_PRESENT)) continue;
1317 if((entry & ARCH_VM_BIGPAGE)) continue;
1319 if(pt_ptalloc(newpt, p, 0) != OK)
1320 panic("pt_ptalloc failed");
1321 assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT);
1323 #if defined(__i386__)
1324 ptaddr_kern = entry & ARCH_VM_ADDR_MASK;
1325 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK;
1326 #elif defined(__arm__)
1327 ptaddr_kern = entry & ARCH_VM_PDE_MASK;
1328 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK;
1329 #endif
1331 /* Copy kernel-initialized pagetable contents into our
1332 * normally accessible pagetable.
1334 if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK)
1335 panic("pt_init: abscopy failed");
1338 /* Inform kernel vm has a newly built page table. */
1339 assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
1340 pt_bind(newpt, &vmproc[VM_PROC_NR]);
1342 pt_init_done = 1;
1344 vm_checkspares();
1346 /* All OK. */
1347 return;
1350 /*===========================================================================*
1351 * pt_bind *
1352 *===========================================================================*/
1353 int pt_bind(pt_t *pt, struct vmproc *who)
1355 int slot;
1356 u32_t phys;
1357 void *pdes;
1358 int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE;
1360 /* Basic sanity checks. */
1361 assert(who);
1362 assert(who->vm_flags & VMF_INUSE);
1363 assert(pt);
1365 assert(pagedir_pde >= 0);
1367 slot = who->vm_slot;
1368 assert(slot >= 0);
1369 assert(slot < ELEMENTS(vmproc));
1370 assert(slot < ARCH_VM_PT_ENTRIES / pages_per_pagedir);
1372 #if defined(__i386__)
1373 phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK;
1374 #elif defined(__arm__)
1375 phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
1376 #endif
1377 assert(pt->pt_dir_phys == phys);
1378 assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE));
1380 /* Update "page directory pagetable." */
1381 #if defined(__i386__)
1382 page_directories[slot] = phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW;
1383 #elif defined(__arm__)
1385 int i;
1386 for (i = 0; i < pages_per_pagedir; i++)
1387 page_directories[slot*pages_per_pagedir+i] =
1388 (phys+i*VM_PAGE_SIZE) |
1389 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_RW |
1390 ARCH_VM_PTE_USER;
1392 #endif
1394 /* This is where the PDE's will be visible to the kernel
1395 * in its address space.
1397 pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE +
1398 #if defined(__i386__)
1399 slot * VM_PAGE_SIZE);
1400 #elif defined(__arm__)
1401 slot * ARCH_PAGEDIR_SIZE);
1402 #endif
1404 #if 0
1405 printf("VM: slot %d endpoint %d has pde val 0x%lx at kernel address 0x%lx\n",
1406 slot, who->vm_endpoint, page_directories[slot], pdes);
1407 #endif
1408 /* Tell kernel about new page table root. */
1409 return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys, pdes);
1412 /*===========================================================================*
1413 * pt_free *
1414 *===========================================================================*/
1415 void pt_free(pt_t *pt)
1417 /* Free memory associated with this pagetable. */
1418 int i;
1420 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++)
1421 if(pt->pt_pt[i])
1422 vm_freepages((vir_bytes) pt->pt_pt[i], 1);
1424 return;
1427 /*===========================================================================*
1428 * pt_mapkernel *
1429 *===========================================================================*/
1430 int pt_mapkernel(pt_t *pt)
1432 int i;
1433 int kern_pde = kern_start_pde;
1434 phys_bytes addr, mapped = 0;
1436 /* Any page table needs to map in the kernel address space. */
1437 assert(bigpage_ok);
1438 assert(pagedir_pde >= 0);
1439 assert(kern_pde >= 0);
1441 /* pt_init() has made sure this is ok. */
1442 addr = kern_mb_mod->mod_start;
1444 /* Actually mapping in kernel */
1445 while(mapped < kern_size) {
1446 #if defined(__i386__)
1447 pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
1448 ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
1449 #elif defined(__arm__)
1450 pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) |
1451 ARM_VM_SECTION |
1452 ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
1453 ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
1454 #endif
1455 kern_pde++;
1456 mapped += ARCH_BIG_PAGE_SIZE;
1457 addr += ARCH_BIG_PAGE_SIZE;
1460 /* Kernel also wants to know about all page directories. */
1461 assert(pagedir_pde > kern_pde);
1462 pt->pt_dir[pagedir_pde] = pagedir_pde_val;
1464 /* Kernel also wants various mappings of its own. */
1465 for(i = 0; i < kernmappings; i++) {
1466 int r;
1467 if((r=pt_writemap(NULL, pt,
1468 kern_mappings[i].vir_addr,
1469 kern_mappings[i].phys_addr,
1470 kern_mappings[i].len,
1471 kern_mappings[i].flags, 0)) != OK) {
1472 return r;
1476 return OK;
1479 /*===========================================================================*
1480 * pt_cycle *
1481 *===========================================================================*/
1482 void pt_cycle(void)
1484 vm_checkspares();
1485 #if defined(__arm__)
1486 vm_checksparedirs();
1487 #endif
1490 int get_vm_self_pages(void) { return vm_self_pages; }