4 #include <minix/callnr.h>
6 #include <minix/config.h>
7 #include <minix/const.h>
9 #include <minix/endpoint.h>
10 #include <minix/minlib.h>
11 #include <minix/type.h>
12 #include <minix/ipc.h>
13 #include <minix/sysutil.h>
14 #include <minix/syslib.h>
15 #include <minix/safecopies.h>
16 #include <minix/cpufeature.h>
17 #include <minix/bitmap.h>
18 #include <minix/debug.h>
32 #include "sanitycheck.h"
34 static int vm_self_pages
;
36 /* PDE used to map in kernel, kernel physical address. */
37 #define MAX_PAGEDIR_PDES 5
42 u32_t
*page_directories
;
43 } pagedir_mappings
[MAX_PAGEDIR_PDES
];
45 static multiboot_module_t
*kern_mb_mod
= NULL
;
46 static size_t kern_size
= 0;
47 static int kern_start_pde
= -1;
49 /* big page size available in hardware? */
50 static int bigpage_ok
= 1;
52 /* Our process table entry. */
53 struct vmproc
*vmprocess
= &vmproc
[VM_PROC_NR
];
55 /* Spare memory, ready to go after initialization, to avoid a
56 * circular dependency on allocating memory and writing it into VM's
60 #define SPAREPAGES 200
61 #define STATIC_SPAREPAGES 190
64 # define SPAREPAGES 150
65 # define STATIC_SPAREPAGES 140
67 # define SPAREPAGES 20
68 # define STATIC_SPAREPAGES 15
73 static u32_t global_bit
= 0;
76 #define SPAREPAGEDIRS 1
77 #define STATIC_SPAREPAGEDIRS 1
79 int missing_sparedirs
= SPAREPAGEDIRS
;
83 } sparepagedirs
[SPAREPAGEDIRS
];
85 #define is_staticaddr(v) ((vir_bytes) (v) < VM_OWN_HEAPSTART)
87 #define MAX_KERNMAPPINGS 10
89 phys_bytes phys_addr
; /* Physical addr. */
90 phys_bytes len
; /* Length in bytes. */
91 vir_bytes vir_addr
; /* Offset in page table. */
93 } kern_mappings
[MAX_KERNMAPPINGS
];
96 /* Clicks must be pages, as
97 * - they must be page aligned to map them
98 * - they must be a multiple of the page size
99 * - it's inconvenient to have them bigger than pages, because we often want
101 * May as well require them to be equal then.
103 #if CLICK_SIZE != VM_PAGE_SIZE
104 #error CLICK_SIZE must be page size.
107 static void *spare_pagequeue
;
108 static char static_sparepages
[VM_PAGE_SIZE
*STATIC_SPAREPAGES
]
109 __aligned(VM_PAGE_SIZE
);
112 static char static_sparepagedirs
[ARCH_PAGEDIR_SIZE
*STATIC_SPAREPAGEDIRS
+ ARCH_PAGEDIR_SIZE
] __aligned(ARCH_PAGEDIR_SIZE
);
115 void pt_assert(pt_t
*pt
)
119 if((sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
120 panic("VMCTL_FLUSHTLB failed");
122 sys_physcopy(NONE
, pt
->pt_dir_phys
, SELF
, (vir_bytes
) dir
, sizeof(dir
), 0);
123 assert(!memcmp(dir
, pt
->pt_dir
, sizeof(dir
)));
127 /*===========================================================================*
129 *===========================================================================*/
130 void pt_sanitycheck(pt_t
*pt
, const char *file
, int line
)
132 /* Basic pt sanity check. */
136 MYASSERT(pt
->pt_dir
);
137 MYASSERT(pt
->pt_dir_phys
);
139 for(slot
= 0; slot
< ELEMENTS(vmproc
); slot
++) {
140 if(pt
== &vmproc
[slot
].vm_pt
)
144 if(slot
>= ELEMENTS(vmproc
)) {
145 panic("pt_sanitycheck: passed pt not in any proc");
148 MYASSERT(usedpages_add(pt
->pt_dir_phys
, VM_PAGE_SIZE
) == OK
);
152 /*===========================================================================*
154 *===========================================================================*/
155 static u32_t
findhole(int pages
)
157 /* Find a space in the virtual address space of VM. */
159 int pde
= 0, try_restart
;
160 static void *lastv
= 0;
161 pt_t
*pt
= &vmprocess
->vm_pt
;
162 vir_bytes vmin
, vmax
;
163 u32_t holev
= NO_MEM
;
166 vmin
= VM_OWN_MMAPBASE
;
167 vmax
= VM_OWN_MMAPTOP
;
169 /* Input sanity check. */
170 assert(vmin
+ VM_PAGE_SIZE
>= vmin
);
171 assert(vmax
>= vmin
+ VM_PAGE_SIZE
);
172 assert((vmin
% VM_PAGE_SIZE
) == 0);
173 assert((vmax
% VM_PAGE_SIZE
) == 0);
176 curv
= (u32_t
) lastv
;
177 if(curv
< vmin
|| curv
>= vmax
)
182 /* Start looking for a free page starting at vmin. */
186 assert(curv
>= vmin
);
189 pde
= ARCH_VM_PDE(curv
);
190 pte
= ARCH_VM_PTE(curv
);
192 if((pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
) &&
193 (pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_PRESENT
)) {
194 /* there is a page here - so keep looking for holes */
198 /* there is no page here - so we have a hole, a bigger
199 * one if we already had one
201 if(holev
== NO_MEM
) {
206 assert(holesize
> 0);
207 assert(holesize
<= pages
);
209 /* if it's big enough, return it */
210 if(holesize
== pages
) {
211 lastv
= (void*) (curv
+ VM_PAGE_SIZE
);
218 /* if we reached the limit, start scanning from the beginning if
219 * we haven't looked there yet
221 if(curv
>= vmax
&& try_restart
) {
227 printf("VM: out of virtual address space in vm\n");
232 /*===========================================================================*
234 *===========================================================================*/
235 void vm_freepages(vir_bytes vir
, int pages
)
237 assert(!(vir
% VM_PAGE_SIZE
));
239 if(is_staticaddr(vir
)) {
240 printf("VM: not freeing static page\n");
244 if(pt_writemap(vmprocess
, &vmprocess
->vm_pt
, vir
,
245 MAP_NONE
, pages
*VM_PAGE_SIZE
, 0,
246 WMF_OVERWRITE
| WMF_FREE
) != OK
)
247 panic("vm_freepages: pt_writemap failed");
252 /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
253 * always trapped, also if not in tlb.
255 if((sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
256 panic("VMCTL_FLUSHTLB failed");
261 /*===========================================================================*
263 *===========================================================================*/
264 static void *vm_getsparepage(phys_bytes
*phys
)
267 if(reservedqueue_alloc(spare_pagequeue
, phys
, &ptr
) != OK
) {
274 /*===========================================================================*
275 * vm_getsparepagedir *
276 *===========================================================================*/
277 static void *vm_getsparepagedir(phys_bytes
*phys
)
280 assert(missing_sparedirs
>= 0 && missing_sparedirs
<= SPAREPAGEDIRS
);
281 for(s
= 0; s
< SPAREPAGEDIRS
; s
++) {
282 if(sparepagedirs
[s
].pagedir
) {
284 sp
= sparepagedirs
[s
].pagedir
;
285 *phys
= sparepagedirs
[s
].phys
;
286 sparepagedirs
[s
].pagedir
= NULL
;
288 assert(missing_sparedirs
>= 0 && missing_sparedirs
<= SPAREPAGEDIRS
);
295 void *vm_mappages(phys_bytes p
, int pages
)
299 pt_t
*pt
= &vmprocess
->vm_pt
;
301 /* Where in our virtual address space can we put it? */
302 loc
= findhole(pages
);
304 printf("vm_mappages: findhole failed\n");
308 /* Map this page into our address space. */
309 if((r
=pt_writemap(vmprocess
, pt
, loc
, p
, VM_PAGE_SIZE
*pages
,
310 ARCH_VM_PTE_PRESENT
| ARCH_VM_PTE_USER
| ARCH_VM_PTE_RW
315 printf("vm_mappages writemap failed\n");
319 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
320 panic("VMCTL_FLUSHTLB failed: %d", r
);
328 static int pt_init_done
;
330 /*===========================================================================*
332 *===========================================================================*/
333 void *vm_allocpages(phys_bytes
*phys
, int reason
, int pages
)
335 /* Allocate a page for use by VM itself. */
337 static int level
= 0;
341 assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
350 if((level
> 1) || !pt_init_done
) {
353 if(pages
== 1) s
=vm_getsparepage(phys
);
354 else if(pages
== 4) s
=vm_getsparepagedir(phys
);
355 else panic("%d pages", pages
);
360 printf("VM: warning: out of spare pages\n");
362 if(!is_staticaddr(s
)) vm_self_pages
++;
367 if (reason
== VMP_PAGEDIR
) {
368 mem_flags
|= PAF_ALIGN16K
;
372 /* Allocate page of memory for use by VM. As VM
373 * is trusted, we don't have to pre-clear it.
375 if((newpage
= alloc_mem(pages
, mem_flags
)) == NO_MEM
) {
377 printf("VM: vm_allocpage: alloc_mem failed\n");
381 *phys
= CLICK2ABS(newpage
);
383 if(!(ret
= vm_mappages(*phys
, pages
))) {
385 printf("VM: vm_allocpage: vm_mappages failed\n");
395 void *vm_allocpage(phys_bytes
*phys
, int reason
)
397 return vm_allocpages(phys
, reason
, 1);
400 /*===========================================================================*
402 *===========================================================================*/
403 void vm_pagelock(void *vir
, int lockflag
)
405 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
406 vir_bytes m
= (vir_bytes
) vir
;
408 u32_t flags
= ARCH_VM_PTE_PRESENT
| ARCH_VM_PTE_USER
;
411 pt
= &vmprocess
->vm_pt
;
413 assert(!(m
% VM_PAGE_SIZE
));
416 flags
|= ARCH_VM_PTE_RW
;
419 flags
|= ARCH_VM_PTE_RO
;
421 flags
|= ARM_VM_PTE_CACHED
;
425 if((r
=pt_writemap(vmprocess
, pt
, m
, 0, VM_PAGE_SIZE
,
426 flags
, WMF_OVERWRITE
| WMF_WRITEFLAGSONLY
)) != OK
) {
427 panic("vm_lockpage: pt_writemap failed");
430 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
431 panic("VMCTL_FLUSHTLB failed: %d", r
);
437 /*===========================================================================*
439 *===========================================================================*/
440 int vm_addrok(void *vir
, int writeflag
)
442 pt_t
*pt
= &vmprocess
->vm_pt
;
444 vir_bytes v
= (vir_bytes
) vir
;
446 pde
= ARCH_VM_PDE(v
);
447 pte
= ARCH_VM_PTE(v
);
449 if(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
)) {
450 printf("addr not ok: missing pde %d\n", pde
);
454 #if defined(__i386__)
456 !(pt
->pt_dir
[pde
] & ARCH_VM_PTE_RW
)) {
457 printf("addr not ok: pde %d present but pde unwritable\n", pde
);
460 #elif defined(__arm__)
462 (pt
->pt_dir
[pde
] & ARCH_VM_PTE_RO
)) {
463 printf("addr not ok: pde %d present but pde unwritable\n", pde
);
468 if(!(pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_PRESENT
)) {
469 printf("addr not ok: missing pde %d / pte %d\n",
474 #if defined(__i386__)
476 !(pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_RW
)) {
477 printf("addr not ok: pde %d / pte %d present but unwritable\n",
479 #elif defined(__arm__)
481 (pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_RO
)) {
482 printf("addr not ok: pde %d / pte %d present but unwritable\n",
491 /*===========================================================================*
493 *===========================================================================*/
494 static int pt_ptalloc(pt_t
*pt
, int pde
, u32_t flags
)
496 /* Allocate a page table and write its address into the page directory. */
501 /* Argument must make sense. */
502 assert(pde
>= 0 && pde
< ARCH_VM_DIR_ENTRIES
);
503 assert(!(flags
& ~(PTF_ALLFLAGS
)));
505 /* We don't expect to overwrite page directory entry, nor
506 * storage for the page table.
508 assert(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
));
509 assert(!pt
->pt_pt
[pde
]);
511 /* Get storage for the page table. The allocation call may in fact
512 * recursively create the directory entry as a side effect. In that
513 * case, we free the newly allocated page and do nothing else.
515 if (!(p
= vm_allocpage(&pt_phys
, VMP_PAGETABLE
)))
517 if (pt
->pt_pt
[pde
]) {
518 vm_freepages((vir_bytes
) p
, 1);
519 assert(pt
->pt_pt
[pde
]);
524 for(i
= 0; i
< ARCH_VM_PT_ENTRIES
; i
++)
525 pt
->pt_pt
[pde
][i
] = 0; /* Empty entry. */
527 /* Make page directory entry.
528 * The PDE is always 'present,' 'writable,' and 'user accessible,'
529 * relying on the PTE for protection.
531 #if defined(__i386__)
532 pt
->pt_dir
[pde
] = (pt_phys
& ARCH_VM_ADDR_MASK
) | flags
533 | ARCH_VM_PDE_PRESENT
| ARCH_VM_PTE_USER
| ARCH_VM_PTE_RW
;
534 #elif defined(__arm__)
535 pt
->pt_dir
[pde
] = (pt_phys
& ARCH_VM_PDE_MASK
)
536 | ARCH_VM_PDE_PRESENT
| ARM_VM_PDE_DOMAIN
; //LSC FIXME
542 /*===========================================================================*
543 * pt_ptalloc_in_range *
544 *===========================================================================*/
545 int pt_ptalloc_in_range(pt_t
*pt
, vir_bytes start
, vir_bytes end
,
546 u32_t flags
, int verify
)
548 /* Allocate all the page tables in the range specified. */
549 int pde
, first_pde
, last_pde
;
551 first_pde
= ARCH_VM_PDE(start
);
552 last_pde
= ARCH_VM_PDE(end
-1);
554 assert(first_pde
>= 0);
555 assert(last_pde
< ARCH_VM_DIR_ENTRIES
);
557 /* Scan all page-directory entries in the range. */
558 for(pde
= first_pde
; pde
<= last_pde
; pde
++) {
559 assert(!(pt
->pt_dir
[pde
] & ARCH_VM_BIGPAGE
));
560 if(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
)) {
563 printf("pt_ptalloc_in_range: no pde %d\n", pde
);
566 assert(!pt
->pt_dir
[pde
]);
567 if((r
=pt_ptalloc(pt
, pde
, flags
)) != OK
) {
568 /* Couldn't do (complete) mapping.
569 * Don't bother freeing any previously
570 * allocated page tables, they're
571 * still writable, don't point to nonsense,
572 * and pt_ptalloc leaves the directory
573 * and other data in a consistent state.
577 assert(pt
->pt_pt
[pde
]);
579 assert(pt
->pt_pt
[pde
]);
580 assert(pt
->pt_dir
[pde
]);
581 assert(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
);
587 static const char *ptestr(u32_t pte
)
589 #define FLAG(constant, name) { \
590 if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \
594 if(!(pte
& ARCH_VM_PTE_PRESENT
)) {
595 return "not present";
598 #if defined(__i386__)
599 FLAG(ARCH_VM_PTE_RW
, "W");
600 #elif defined(__arm__)
601 if(pte
& ARCH_VM_PTE_RO
) {
607 FLAG(ARCH_VM_PTE_USER
, "U");
608 #if defined(__i386__)
609 FLAG(I386_VM_PWT
, "PWT");
610 FLAG(I386_VM_PCD
, "PCD");
611 FLAG(I386_VM_ACC
, "ACC");
612 FLAG(I386_VM_DIRTY
, "DIRTY");
613 FLAG(I386_VM_PS
, "PS");
614 FLAG(I386_VM_GLOBAL
, "G");
615 FLAG(I386_VM_PTAVAIL1
, "AV1");
616 FLAG(I386_VM_PTAVAIL2
, "AV2");
617 FLAG(I386_VM_PTAVAIL3
, "AV3");
618 #elif defined(__arm__)
619 FLAG(ARM_VM_PTE_SUPER
, "S");
620 FLAG(ARM_VM_PTE_S
, "SH");
621 FLAG(ARM_VM_PTE_WB
, "WB");
622 FLAG(ARM_VM_PTE_WT
, "WT");
628 /*===========================================================================*
630 *===========================================================================*/
631 int pt_map_in_range(struct vmproc
*src_vmp
, struct vmproc
*dst_vmp
,
632 vir_bytes start
, vir_bytes end
)
634 /* Transfer all the mappings from the pt of the source process to the pt of
635 * the destination process in the range specified.
641 pt
= &src_vmp
->vm_pt
;
642 dst_pt
= &dst_vmp
->vm_pt
;
644 end
= end
? end
: VM_DATATOP
;
645 assert(start
% VM_PAGE_SIZE
== 0);
646 assert(end
% VM_PAGE_SIZE
== 0);
648 assert( /* ARCH_VM_PDE(start) >= 0 && */ start
<= end
);
649 assert(ARCH_VM_PDE(end
) < ARCH_VM_DIR_ENTRIES
);
652 printf("VM: pt_map_in_range: src = %d, dst = %d\n",
653 src_vmp
->vm_endpoint
, dst_vmp
->vm_endpoint
);
654 printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
655 start
, ARCH_VM_PDE(start
), ARCH_VM_PTE(start
),
656 end
, ARCH_VM_PDE(end
), ARCH_VM_PTE(end
));
659 /* Scan all page-table entries in the range. */
660 for(viraddr
= start
; viraddr
<= end
; viraddr
+= VM_PAGE_SIZE
) {
661 pde
= ARCH_VM_PDE(viraddr
);
662 if(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
)) {
663 if(viraddr
== VM_DATATOP
) break;
666 pte
= ARCH_VM_PTE(viraddr
);
667 if(!(pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_PRESENT
)) {
668 if(viraddr
== VM_DATATOP
) break;
672 /* Transfer the mapping. */
673 dst_pt
->pt_pt
[pde
][pte
] = pt
->pt_pt
[pde
][pte
];
674 assert(dst_pt
->pt_pt
[pde
]);
676 if(viraddr
== VM_DATATOP
) break;
682 /*===========================================================================*
684 *===========================================================================*/
685 int pt_ptmap(struct vmproc
*src_vmp
, struct vmproc
*dst_vmp
)
687 /* Transfer mappings to page dir and page tables from source process and
688 * destination process.
695 pt
= &src_vmp
->vm_pt
;
698 printf("VM: pt_ptmap: src = %d, dst = %d\n",
699 src_vmp
->vm_endpoint
, dst_vmp
->vm_endpoint
);
702 /* Transfer mapping to the page directory. */
703 viraddr
= (vir_bytes
) pt
->pt_dir
;
704 physaddr
= pt
->pt_dir_phys
& ARCH_VM_ADDR_MASK
;
705 #if defined(__i386__)
706 if((r
=pt_writemap(dst_vmp
, &dst_vmp
->vm_pt
, viraddr
, physaddr
, VM_PAGE_SIZE
,
707 ARCH_VM_PTE_PRESENT
| ARCH_VM_PTE_USER
| ARCH_VM_PTE_RW
,
708 #elif defined(__arm__)
709 if((r
=pt_writemap(dst_vmp
, &dst_vmp
->vm_pt
, viraddr
, physaddr
, ARCH_PAGEDIR_SIZE
,
710 ARCH_VM_PTE_PRESENT
| ARCH_VM_PTE_USER
|
713 WMF_OVERWRITE
)) != OK
) {
717 printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
721 /* Scan all non-reserved page-directory entries. */
722 for(pde
=0; pde
< kern_start_pde
; pde
++) {
723 if(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
)) {
727 if(!pt
->pt_pt
[pde
]) { panic("pde %d empty\n", pde
); }
729 /* Transfer mapping to the page table. */
730 viraddr
= (vir_bytes
) pt
->pt_pt
[pde
];
731 #if defined(__i386__)
732 physaddr
= pt
->pt_dir
[pde
] & ARCH_VM_ADDR_MASK
;
733 #elif defined(__arm__)
734 physaddr
= pt
->pt_dir
[pde
] & ARCH_VM_PDE_MASK
;
737 if((r
=pt_writemap(dst_vmp
, &dst_vmp
->vm_pt
, viraddr
, physaddr
, VM_PAGE_SIZE
,
738 ARCH_VM_PTE_PRESENT
| ARCH_VM_PTE_USER
| ARCH_VM_PTE_RW
743 WMF_OVERWRITE
)) != OK
) {
751 void pt_clearmapcache(void)
753 /* Make sure kernel will invalidate tlb when using current
754 * pagetable (i.e. vm's) to make new mappings before new cr3
757 if(sys_vmctl(SELF
, VMCTL_CLEARMAPCACHE
, 0) != OK
)
758 panic("VMCTL_CLEARMAPCACHE failed");
761 int pt_writable(struct vmproc
*vmp
, vir_bytes v
)
764 pt_t
*pt
= &vmp
->vm_pt
;
765 assert(!(v
% VM_PAGE_SIZE
));
766 int pde
= ARCH_VM_PDE(v
);
767 int pte
= ARCH_VM_PTE(v
);
769 assert(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
);
770 assert(pt
->pt_pt
[pde
]);
772 entry
= pt
->pt_pt
[pde
][pte
];
774 #if defined(__i386__)
775 return((entry
& PTF_WRITE
) ? 1 : 0);
776 #elif defined(__arm__)
777 return((entry
& ARCH_VM_PTE_RO
) ? 0 : 1);
781 /*===========================================================================*
783 *===========================================================================*/
784 int pt_writemap(struct vmproc
* vmp
,
792 /* Write mapping into page table. Allocate a new page table if necessary. */
793 /* Page directory and table entries for this virtual address. */
799 int vminhibit_clear
= 0;
801 * don't do it everytime, stop the process only on the first change and
802 * resume the execution on the last change. Do in a wrapper of this
805 if (vmp
&& vmp
->vm_endpoint
!= NONE
&& vmp
->vm_endpoint
!= VM_PROC_NR
&&
806 !(vmp
->vm_flags
& VMF_EXITING
)) {
807 sys_vmctl(vmp
->vm_endpoint
, VMCTL_VMINHIBIT_SET
, 0);
812 if(writemapflags
& WMF_VERIFY
)
815 assert(!(bytes
% VM_PAGE_SIZE
));
816 assert(!(flags
& ~(PTF_ALLFLAGS
)));
818 pages
= bytes
/ VM_PAGE_SIZE
;
820 /* MAP_NONE means to clear the mapping. It doesn't matter
821 * what's actually written into the PTE if PRESENT
822 * isn't on, so we can just write MAP_NONE into it.
824 assert(physaddr
== MAP_NONE
|| (flags
& ARCH_VM_PTE_PRESENT
));
825 assert(physaddr
!= MAP_NONE
|| !flags
);
827 /* First make sure all the necessary page tables are allocated,
828 * before we start writing in any of them, because it's a pain
829 * to undo our work properly.
831 ret
= pt_ptalloc_in_range(pt
, v
, v
+ VM_PAGE_SIZE
*pages
, flags
, verify
);
833 printf("VM: writemap: pt_ptalloc_in_range failed\n");
837 /* Now write in them. */
838 for(p
= 0; p
< pages
; p
++) {
840 int pde
= ARCH_VM_PDE(v
);
841 int pte
= ARCH_VM_PTE(v
);
843 assert(!(v
% VM_PAGE_SIZE
));
844 assert(pte
>= 0 && pte
< ARCH_VM_PT_ENTRIES
);
845 assert(pde
>= 0 && pde
< ARCH_VM_DIR_ENTRIES
);
847 /* Page table has to be there. */
848 assert(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
);
850 /* We do not expect it to be a bigpage. */
851 assert(!(pt
->pt_dir
[pde
] & ARCH_VM_BIGPAGE
));
853 /* Make sure page directory entry for this page table
854 * is marked present and page table entry is available.
856 assert(pt
->pt_pt
[pde
]);
858 if(writemapflags
& (WMF_WRITEFLAGSONLY
|WMF_FREE
)) {
859 #if defined(__i386__)
860 physaddr
= pt
->pt_pt
[pde
][pte
] & ARCH_VM_ADDR_MASK
;
861 #elif defined(__arm__)
862 physaddr
= pt
->pt_pt
[pde
][pte
] & ARM_VM_PTE_MASK
;
866 if(writemapflags
& WMF_FREE
) {
867 free_mem(ABS2CLICK(physaddr
), 1);
870 /* Entry we will write. */
871 #if defined(__i386__)
872 entry
= (physaddr
& ARCH_VM_ADDR_MASK
) | flags
;
873 #elif defined(__arm__)
874 entry
= (physaddr
& ARM_VM_PTE_MASK
) | flags
;
879 maskedentry
= pt
->pt_pt
[pde
][pte
];
880 #if defined(__i386__)
881 maskedentry
&= ~(I386_VM_ACC
|I386_VM_DIRTY
);
883 /* Verify pagetable entry. */
884 #if defined(__i386__)
885 if(entry
& ARCH_VM_PTE_RW
) {
886 /* If we expect a writable page, allow a readonly page. */
887 maskedentry
|= ARCH_VM_PTE_RW
;
889 #elif defined(__arm__)
890 if(!(entry
& ARCH_VM_PTE_RO
)) {
891 /* If we expect a writable page, allow a readonly page. */
892 maskedentry
&= ~ARCH_VM_PTE_RO
;
894 maskedentry
&= ~(ARM_VM_PTE_WB
|ARM_VM_PTE_WT
);
896 if(maskedentry
!= entry
) {
897 printf("pt_writemap: mismatch: ");
898 #if defined(__i386__)
899 if((entry
& ARCH_VM_ADDR_MASK
) !=
900 (maskedentry
& ARCH_VM_ADDR_MASK
)) {
901 #elif defined(__arm__)
902 if((entry
& ARM_VM_PTE_MASK
) !=
903 (maskedentry
& ARM_VM_PTE_MASK
)) {
905 printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
906 (long)entry
, (long)maskedentry
);
907 } else printf("phys ok; ");
908 printf(" flags: found %s; ",
909 ptestr(pt
->pt_pt
[pde
][pte
]));
910 printf(" masked %s; ",
911 ptestr(maskedentry
));
912 printf(" expected %s\n", ptestr(entry
));
913 printf("found 0x%x, wanted 0x%x\n",
914 pt
->pt_pt
[pde
][pte
], entry
);
919 /* Write pagetable entry. */
920 pt
->pt_pt
[pde
][pte
] = entry
;
923 physaddr
+= VM_PAGE_SIZE
;
930 if (vminhibit_clear
) {
931 assert(vmp
&& vmp
->vm_endpoint
!= NONE
&& vmp
->vm_endpoint
!= VM_PROC_NR
&&
932 !(vmp
->vm_flags
& VMF_EXITING
));
933 sys_vmctl(vmp
->vm_endpoint
, VMCTL_VMINHIBIT_CLEAR
, 0);
940 /*===========================================================================*
942 *===========================================================================*/
943 int pt_checkrange(pt_t
*pt
, vir_bytes v
, size_t bytes
,
948 assert(!(bytes
% VM_PAGE_SIZE
));
950 pages
= bytes
/ VM_PAGE_SIZE
;
952 for(p
= 0; p
< pages
; p
++) {
953 int pde
= ARCH_VM_PDE(v
);
954 int pte
= ARCH_VM_PTE(v
);
956 assert(!(v
% VM_PAGE_SIZE
));
957 assert(pte
>= 0 && pte
< ARCH_VM_PT_ENTRIES
);
958 assert(pde
>= 0 && pde
< ARCH_VM_DIR_ENTRIES
);
960 /* Page table has to be there. */
961 if(!(pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
))
964 /* Make sure page directory entry for this page table
965 * is marked present and page table entry is available.
967 assert((pt
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
) && pt
->pt_pt
[pde
]);
969 if(!(pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_PRESENT
)) {
973 #if defined(__i386__)
974 if(write
&& !(pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_RW
)) {
975 #elif defined(__arm__)
976 if(write
&& (pt
->pt_pt
[pde
][pte
] & ARCH_VM_PTE_RO
)) {
987 /*===========================================================================*
989 *===========================================================================*/
992 /* Allocate a pagetable root. Allocate a page-aligned page directory
993 * and set them to 0 (indicating no page tables are allocated). Lookup
994 * its physical address as we'll need that in the future. Verify it's
999 /* Don't ever re-allocate/re-move a certain process slot's
1000 * page directory once it's been created. This is a fraction
1001 * faster, but also avoids having to invalidate the page
1002 * mappings from in-kernel page tables pointing to
1003 * the page directories (the page_directories data).
1006 !(pt
->pt_dir
= vm_allocpages((phys_bytes
*)&pt
->pt_dir_phys
,
1007 VMP_PAGEDIR
, ARCH_PAGEDIR_SIZE
/VM_PAGE_SIZE
))) {
1011 assert(!((u32_t
)pt
->pt_dir_phys
% ARCH_PAGEDIR_SIZE
));
1013 for(i
= 0; i
< ARCH_VM_DIR_ENTRIES
; i
++) {
1014 pt
->pt_dir
[i
] = 0; /* invalid entry (PRESENT bit = 0) */
1015 pt
->pt_pt
[i
] = NULL
;
1018 /* Where to start looking for free virtual address space? */
1021 /* Map in kernel. */
1022 if((r
=pt_mapkernel(pt
)) != OK
)
1028 static int freepde(void)
1030 int p
= kernel_boot_info
.freepde_start
++;
1031 assert(kernel_boot_info
.freepde_start
< ARCH_VM_DIR_ENTRIES
);
1035 void pt_allocate_kernel_mapped_pagetables(void)
1037 /* Reserve PDEs available for mapping in the page directories. */
1039 for(pd
= 0; pd
< MAX_PAGEDIR_PDES
; pd
++) {
1040 struct pdm
*pdm
= &pagedir_mappings
[pd
];
1042 pdm
->pdeno
= freepde();
1047 /* Allocate us a page table in which to
1048 * remember page directory pointers.
1050 if(!(pdm
->page_directories
=
1051 vm_allocpage(&ph
, VMP_PAGETABLE
))) {
1052 panic("no virt addr for vm mappings");
1054 memset(pdm
->page_directories
, 0, VM_PAGE_SIZE
);
1057 #if defined(__i386__)
1058 pdm
->val
= (ph
& ARCH_VM_ADDR_MASK
) |
1059 ARCH_VM_PDE_PRESENT
| ARCH_VM_PTE_RW
;
1060 #elif defined(__arm__)
1061 pdm
->val
= (ph
& ARCH_VM_PDE_MASK
)
1062 | ARCH_VM_PDE_PRESENT
1064 | ARM_VM_PDE_DOMAIN
; //LSC FIXME
1069 static void pt_copy(pt_t
*dst
, pt_t
*src
)
1072 for(pde
=0; pde
< kern_start_pde
; pde
++) {
1073 if(!(src
->pt_dir
[pde
] & ARCH_VM_PDE_PRESENT
)) {
1076 assert(!(src
->pt_dir
[pde
] & ARCH_VM_BIGPAGE
));
1077 if(!src
->pt_pt
[pde
]) { panic("pde %d empty\n", pde
); }
1078 if(pt_ptalloc(dst
, pde
, 0) != OK
)
1079 panic("pt_ptalloc failed");
1080 memcpy(dst
->pt_pt
[pde
], src
->pt_pt
[pde
],
1081 ARCH_VM_PT_ENTRIES
* sizeof(*dst
->pt_pt
[pde
]));
1085 /*===========================================================================*
1087 *===========================================================================*/
1090 pt_t
*newpt
, newpt_dyn
;
1093 vir_bytes sparepages_mem
;
1094 #if defined(__arm__)
1095 vir_bytes sparepagedirs_mem
;
1097 static u32_t currentpagedir
[ARCH_VM_DIR_ENTRIES
];
1098 int m
= kernel_boot_info
.kern_mod
;
1099 #if defined(__i386__)
1100 int global_bit_ok
= 0;
1101 u32_t mypdbr
; /* Page Directory Base Register (cr3) value */
1102 #elif defined(__arm__)
1106 /* Find what the physical location of the kernel is. */
1108 assert(m
< kernel_boot_info
.mods_with_kernel
);
1109 assert(kernel_boot_info
.mods_with_kernel
< MULTIBOOT_MAX_MODS
);
1110 kern_mb_mod
= &kernel_boot_info
.module_list
[m
];
1111 kern_size
= kern_mb_mod
->mod_end
- kern_mb_mod
->mod_start
;
1112 assert(!(kern_mb_mod
->mod_start
% ARCH_BIG_PAGE_SIZE
));
1113 assert(!(kernel_boot_info
.vir_kern_start
% ARCH_BIG_PAGE_SIZE
));
1114 kern_start_pde
= kernel_boot_info
.vir_kern_start
/ ARCH_BIG_PAGE_SIZE
;
1116 /* Get ourselves spare pages. */
1117 sparepages_mem
= (vir_bytes
) static_sparepages
;
1118 assert(!(sparepages_mem
% VM_PAGE_SIZE
));
1120 #if defined(__arm__)
1121 /* Get ourselves spare pagedirs. */
1122 sparepagedirs_mem
= (vir_bytes
) static_sparepagedirs
;
1123 assert(!(sparepagedirs_mem
% ARCH_PAGEDIR_SIZE
));
1126 /* Spare pages are used to allocate memory before VM has its own page
1127 * table that things (i.e. arbitrary physical memory) can be mapped into.
1128 * We get it by pre-allocating it in our bss (allocated and mapped in by
1129 * the kernel) in static_sparepages. We also need the physical addresses
1130 * though; we look them up now so they are ready for use.
1132 #if defined(__arm__)
1133 missing_sparedirs
= 0;
1134 assert(STATIC_SPAREPAGEDIRS
<= SPAREPAGEDIRS
);
1135 for(s
= 0; s
< SPAREPAGEDIRS
; s
++) {
1136 vir_bytes v
= (sparepagedirs_mem
+ s
*ARCH_PAGEDIR_SIZE
);;
1138 if((r
=sys_umap(SELF
, VM_D
, (vir_bytes
) v
,
1139 ARCH_PAGEDIR_SIZE
, &ph
)) != OK
)
1140 panic("pt_init: sys_umap failed: %d", r
);
1141 if(s
>= STATIC_SPAREPAGEDIRS
) {
1142 sparepagedirs
[s
].pagedir
= NULL
;
1143 missing_sparedirs
++;
1146 sparepagedirs
[s
].pagedir
= (void *) v
;
1147 sparepagedirs
[s
].phys
= ph
;
1151 if(!(spare_pagequeue
= reservedqueue_new(SPAREPAGES
, 1, 1, 0)))
1152 panic("reservedqueue_new for single pages failed");
1154 assert(STATIC_SPAREPAGES
< SPAREPAGES
);
1155 for(s
= 0; s
< STATIC_SPAREPAGES
; s
++) {
1156 void *v
= (void *) (sparepages_mem
+ s
*VM_PAGE_SIZE
);
1158 if((r
=sys_umap(SELF
, VM_D
, (vir_bytes
) v
,
1159 VM_PAGE_SIZE
*SPAREPAGES
, &ph
)) != OK
)
1160 panic("pt_init: sys_umap failed: %d", r
);
1161 reservedqueue_add(spare_pagequeue
, v
, ph
);
1164 #if defined(__i386__)
1165 /* global bit and 4MB pages available? */
1166 global_bit_ok
= _cpufeature(_CPUF_I386_PGE
);
1167 bigpage_ok
= _cpufeature(_CPUF_I386_PSE
);
1169 /* Set bit for PTE's and PDE's if available. */
1171 global_bit
= I386_VM_GLOBAL
;
1174 /* Now reserve another pde for kernel's own mappings. */
1177 phys_bytes addr
, len
;
1178 int flags
, pindex
= 0;
1181 kernmap_pde
= freepde();
1182 offset
= kernmap_pde
* ARCH_BIG_PAGE_SIZE
;
1184 while(sys_vmctl_get_mapping(pindex
, &addr
, &len
,
1188 if(pindex
>= MAX_KERNMAPPINGS
)
1189 panic("VM: too many kernel mappings: %d", pindex
);
1190 kern_mappings
[pindex
].phys_addr
= addr
;
1191 kern_mappings
[pindex
].len
= len
;
1192 kern_mappings
[pindex
].flags
= flags
;
1193 kern_mappings
[pindex
].vir_addr
= offset
;
1194 kern_mappings
[pindex
].flags
=
1195 ARCH_VM_PTE_PRESENT
;
1196 if(flags
& VMMF_UNCACHED
)
1197 #if defined(__i386__)
1198 kern_mappings
[pindex
].flags
|= PTF_NOCACHE
;
1199 #elif defined(__arm__)
1200 kern_mappings
[pindex
].flags
|= ARM_VM_PTE_DEVICE
;
1202 kern_mappings
[pindex
].flags
|= ARM_VM_PTE_CACHED
;
1205 if(flags
& VMMF_USER
)
1206 kern_mappings
[pindex
].flags
|= ARCH_VM_PTE_USER
;
1207 #if defined(__arm__)
1209 kern_mappings
[pindex
].flags
|= ARM_VM_PTE_SUPER
;
1211 if(flags
& VMMF_WRITE
)
1212 kern_mappings
[pindex
].flags
|= ARCH_VM_PTE_RW
;
1213 #if defined(__arm__)
1215 kern_mappings
[pindex
].flags
|= ARCH_VM_PTE_RO
;
1218 #if defined(__i386__)
1219 if(flags
& VMMF_GLO
)
1220 kern_mappings
[pindex
].flags
|= I386_VM_GLOBAL
;
1223 if(addr
% VM_PAGE_SIZE
)
1224 panic("VM: addr unaligned: %lu", addr
);
1225 if(len
% VM_PAGE_SIZE
)
1226 panic("VM: len unaligned: %lu", len
);
1228 if(sys_vmctl_reply_mapping(pindex
, vir
) != OK
)
1229 panic("VM: reply failed");
1234 usedpde
= ARCH_VM_PDE(offset
);
1235 while(usedpde
> kernmap_pde
) {
1236 int newpde
= freepde();
1237 assert(newpde
== kernmap_pde
+1);
1238 kernmap_pde
= newpde
;
1243 pt_allocate_kernel_mapped_pagetables();
1245 /* Allright. Now. We have to make our own page directory and page tables,
1246 * that the kernel has already set up, accessible to us. It's easier to
1247 * understand if we just copy all the required pages (i.e. page directory
1248 * and page tables), and set up the pointers as if VM had done it itself.
1250 * This allocation will happen without using any page table, and just
1253 newpt
= &vmprocess
->vm_pt
;
1254 if(pt_new(newpt
) != OK
)
1255 panic("vm pt_new failed");
1257 /* Get our current pagedir so we can see it. */
1258 #if defined(__i386__)
1259 if(sys_vmctl_get_pdbr(SELF
, &mypdbr
) != OK
)
1260 #elif defined(__arm__)
1261 if(sys_vmctl_get_pdbr(SELF
, &myttbr
) != OK
)
1264 panic("VM: sys_vmctl_get_pdbr failed");
1265 #if defined(__i386__)
1266 if(sys_vircopy(NONE
, mypdbr
, SELF
,
1267 (vir_bytes
) currentpagedir
, VM_PAGE_SIZE
, 0) != OK
)
1268 #elif defined(__arm__)
1269 if(sys_vircopy(NONE
, myttbr
, SELF
,
1270 (vir_bytes
) currentpagedir
, ARCH_PAGEDIR_SIZE
, 0) != OK
)
1272 panic("VM: sys_vircopy failed");
1274 /* We have mapped in kernel ourselves; now copy mappings for VM
1275 * that kernel made, including allocations for BSS. Skip identity
1276 * mapping bits; just map in VM.
1278 for(p
= 0; p
< ARCH_VM_DIR_ENTRIES
; p
++) {
1279 u32_t entry
= currentpagedir
[p
];
1280 phys_bytes ptaddr_kern
, ptaddr_us
;
1282 /* BIGPAGEs are kernel mapping (do ourselves) or boot
1283 * identity mapping (don't want).
1285 if(!(entry
& ARCH_VM_PDE_PRESENT
)) continue;
1286 if((entry
& ARCH_VM_BIGPAGE
)) continue;
1288 if(pt_ptalloc(newpt
, p
, 0) != OK
)
1289 panic("pt_ptalloc failed");
1290 assert(newpt
->pt_dir
[p
] & ARCH_VM_PDE_PRESENT
);
1292 #if defined(__i386__)
1293 ptaddr_kern
= entry
& ARCH_VM_ADDR_MASK
;
1294 ptaddr_us
= newpt
->pt_dir
[p
] & ARCH_VM_ADDR_MASK
;
1295 #elif defined(__arm__)
1296 ptaddr_kern
= entry
& ARCH_VM_PDE_MASK
;
1297 ptaddr_us
= newpt
->pt_dir
[p
] & ARCH_VM_PDE_MASK
;
1300 /* Copy kernel-initialized pagetable contents into our
1301 * normally accessible pagetable.
1303 if(sys_abscopy(ptaddr_kern
, ptaddr_us
, VM_PAGE_SIZE
) != OK
)
1304 panic("pt_init: abscopy failed");
1307 /* Inform kernel vm has a newly built page table. */
1308 assert(vmproc
[VM_PROC_NR
].vm_endpoint
== VM_PROC_NR
);
1309 pt_bind(newpt
, &vmproc
[VM_PROC_NR
]);
1313 /* VM is now fully functional in that it can dynamically allocate memory
1316 * We don't want to keep using the bootstrap statically allocated spare
1317 * pages though, as the physical addresses will change on liveupdate. So we
1318 * re-do part of the initialization now with purely dynamically allocated
1319 * memory. First throw out the static pool.
1321 * Then allocate the kernel-shared-pagetables and VM pagetables with dynamic
1325 alloc_cycle(); /* Make sure allocating works */
1326 while(vm_getsparepage(&phys
)) ; /* Use up all static pages */
1327 alloc_cycle(); /* Refill spares with dynamic */
1328 pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */
1329 pt_bind(newpt
, &vmproc
[VM_PROC_NR
]); /* Recalculate */
1330 pt_mapkernel(newpt
); /* Rewrite pagetable info */
1332 /* Flush TLB just in case any of those mappings have been touched */
1333 if((sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
1334 panic("VMCTL_FLUSHTLB failed");
1337 /* Recreate VM page table with dynamic-only allocations */
1338 memset(&newpt_dyn
, 0, sizeof(newpt_dyn
));
1340 pt_copy(&newpt_dyn
, newpt
);
1341 memcpy(newpt
, &newpt_dyn
, sizeof(*newpt
));
1343 pt_bind(newpt
, &vmproc
[VM_PROC_NR
]); /* Recalculate */
1344 pt_mapkernel(newpt
); /* Rewrite pagetable info */
1346 /* Flush TLB just in case any of those mappings have been touched */
1347 if((sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
1348 panic("VMCTL_FLUSHTLB failed");
1355 /*===========================================================================*
1357 *===========================================================================*/
1358 int pt_bind(pt_t
*pt
, struct vmproc
*who
)
1360 int procslot
, pdeslot
;
1365 int pages_per_pagedir
= ARCH_PAGEDIR_SIZE
/VM_PAGE_SIZE
;
1368 slots_per_pde
= ARCH_VM_PT_ENTRIES
/ pages_per_pagedir
;
1370 /* Basic sanity checks. */
1372 assert(who
->vm_flags
& VMF_INUSE
);
1375 procslot
= who
->vm_slot
;
1376 pdm
= &pagedir_mappings
[procslot
/slots_per_pde
];
1377 pdeslot
= procslot
%slots_per_pde
;
1378 pagedir_pde
= pdm
->pdeno
;
1379 assert(pdeslot
>= 0);
1380 assert(procslot
< ELEMENTS(vmproc
));
1381 assert(pdeslot
< ARCH_VM_PT_ENTRIES
/ pages_per_pagedir
);
1382 assert(pagedir_pde
>= 0);
1384 #if defined(__i386__)
1385 phys
= pt
->pt_dir_phys
& ARCH_VM_ADDR_MASK
;
1386 #elif defined(__arm__)
1387 phys
= pt
->pt_dir_phys
& ARM_VM_PTE_MASK
;
1389 assert(pt
->pt_dir_phys
== phys
);
1390 assert(!(pt
->pt_dir_phys
% ARCH_PAGEDIR_SIZE
));
1392 /* Update "page directory pagetable." */
1393 #if defined(__i386__)
1394 pdm
->page_directories
[pdeslot
] =
1395 phys
| ARCH_VM_PDE_PRESENT
|ARCH_VM_PTE_RW
;
1396 #elif defined(__arm__)
1399 for (i
= 0; i
< pages_per_pagedir
; i
++) {
1400 pdm
->page_directories
[pdeslot
*pages_per_pagedir
+i
] =
1401 (phys
+i
*VM_PAGE_SIZE
)
1402 | ARCH_VM_PTE_PRESENT
1405 | ARCH_VM_PTE_USER
; //LSC FIXME
1410 /* This is where the PDE's will be visible to the kernel
1411 * in its address space.
1413 pdes
= (void *) (pagedir_pde
*ARCH_BIG_PAGE_SIZE
+
1414 #if defined(__i386__)
1415 pdeslot
* VM_PAGE_SIZE
);
1416 #elif defined(__arm__)
1417 pdeslot
* ARCH_PAGEDIR_SIZE
);
1420 /* Tell kernel about new page table root. */
1421 return sys_vmctl_set_addrspace(who
->vm_endpoint
, pt
->pt_dir_phys
, pdes
);
1424 /*===========================================================================*
1426 *===========================================================================*/
1427 void pt_free(pt_t
*pt
)
1429 /* Free memory associated with this pagetable. */
1432 for(i
= 0; i
< ARCH_VM_DIR_ENTRIES
; i
++)
1434 vm_freepages((vir_bytes
) pt
->pt_pt
[i
], 1);
1439 /*===========================================================================*
1441 *===========================================================================*/
1442 int pt_mapkernel(pt_t
*pt
)
1445 int kern_pde
= kern_start_pde
;
1446 phys_bytes addr
, mapped
= 0;
1448 /* Any page table needs to map in the kernel address space. */
1450 assert(kern_pde
>= 0);
1452 /* pt_init() has made sure this is ok. */
1453 addr
= kern_mb_mod
->mod_start
;
1455 /* Actually mapping in kernel */
1456 while(mapped
< kern_size
) {
1457 #if defined(__i386__)
1458 pt
->pt_dir
[kern_pde
] = addr
| ARCH_VM_PDE_PRESENT
|
1459 ARCH_VM_BIGPAGE
| ARCH_VM_PTE_RW
| global_bit
;
1460 #elif defined(__arm__)
1461 pt
->pt_dir
[kern_pde
] = (addr
& ARM_VM_SECTION_MASK
)
1463 | ARM_VM_SECTION_DOMAIN
1464 | ARM_VM_SECTION_CACHED
1465 | ARM_VM_SECTION_SUPER
;
1468 mapped
+= ARCH_BIG_PAGE_SIZE
;
1469 addr
+= ARCH_BIG_PAGE_SIZE
;
1472 /* Kernel also wants to know about all page directories. */
1475 for(pd
= 0; pd
< MAX_PAGEDIR_PDES
; pd
++) {
1476 struct pdm
*pdm
= &pagedir_mappings
[pd
];
1478 assert(pdm
->pdeno
> 0);
1479 assert(pdm
->pdeno
> kern_pde
);
1480 pt
->pt_dir
[pdm
->pdeno
] = pdm
->val
;
1484 /* Kernel also wants various mappings of its own. */
1485 for(i
= 0; i
< kernmappings
; i
++) {
1487 if((r
=pt_writemap(NULL
, pt
,
1488 kern_mappings
[i
].vir_addr
,
1489 kern_mappings
[i
].phys_addr
,
1490 kern_mappings
[i
].len
,
1491 kern_mappings
[i
].flags
, 0)) != OK
) {
1500 int get_vm_self_pages(void) { return vm_self_pages
; }