3 #define _POSIX_SOURCE 1
7 #include <minix/callnr.h>
9 #include <minix/config.h>
10 #include <minix/const.h>
12 #include <minix/endpoint.h>
13 #include <minix/keymap.h>
14 #include <minix/minlib.h>
15 #include <minix/type.h>
16 #include <minix/ipc.h>
17 #include <minix/sysutil.h>
18 #include <minix/syslib.h>
19 #include <minix/safecopies.h>
20 #include <minix/cpufeature.h>
21 #include <minix/bitmap.h>
36 #include "../sanitycheck.h"
40 /* PDE used to map in kernel, kernel physical address. */
41 PRIVATE
int id_map_high_pde
= -1, pagedir_pde
= -1;
42 PRIVATE u32_t global_bit
= 0, pagedir_pde_val
;
44 PRIVATE
int proc_pde
= 0;
46 /* 4MB page size available in hardware? */
47 PRIVATE
int bigpage_ok
= 0;
49 /* Our process table entry. */
50 struct vmproc
*vmp
= &vmproc
[VM_PROC_NR
];
52 /* Spare memory, ready to go after initialization, to avoid a
53 * circular dependency on allocating memory and writing it into VM's
57 int missing_spares
= SPAREPAGES
;
61 } sparepages
[SPAREPAGES
];
63 #define MAX_KERNMAPPINGS 10
65 phys_bytes phys_addr
; /* Physical addr. */
66 phys_bytes len
; /* Length in bytes. */
67 vir_bytes lin_addr
; /* Offset in page table. */
69 } kern_mappings
[MAX_KERNMAPPINGS
];
72 /* Clicks must be pages, as
73 * - they must be page aligned to map them
74 * - they must be a multiple of the page size
75 * - it's inconvenient to have them bigger than pages, because we often want
77 * May as well require them to be equal then.
79 #if CLICK_SIZE != I386_PAGE_SIZE
80 #error CLICK_SIZE must be page size.
83 /* Bytes of virtual address space one pde controls. */
84 #define BYTESPERPDE (I386_VM_PT_ENTRIES * I386_PAGE_SIZE)
86 /* Nevertheless, introduce these macros to make the code readable. */
87 #define CLICK2PAGE(c) ((c) / CLICKSPERPAGE)
89 /* Page table that contains pointers to all page directories. */
90 u32_t page_directories_phys
, *page_directories
= NULL
;
93 /*===========================================================================*
95 *===========================================================================*/
96 PUBLIC
void pt_sanitycheck(pt_t
*pt
, char *file
, int line
)
98 /* Basic pt sanity check. */
103 MYASSERT(pt
->pt_dir
);
104 MYASSERT(pt
->pt_dir_phys
);
106 for(slot
= 0; slot
< ELEMENTS(vmproc
); slot
++) {
107 if(pt
== &vmproc
[slot
].vm_pt
)
111 if(slot
>= ELEMENTS(vmproc
)) {
112 vm_panic("pt_sanitycheck: passed pt not in any proc", NO_NUM
);
115 MYASSERT(usedpages_add(pt
->pt_dir_phys
, I386_PAGE_SIZE
) == OK
);
117 for(i
= proc_pde
; i
< I386_VM_DIR_ENTRIES
; i
++) {
119 if(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
)) {
120 printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
121 slot
, i
, pt
->pt_pt
[i
], pt
->pt_dir
[i
]);
123 MYASSERT(pt
->pt_dir
[i
] & I386_VM_PRESENT
);
124 MYASSERT(usedpages_add(I386_VM_PFA(pt
->pt_dir
[i
]),
125 I386_PAGE_SIZE
) == OK
);
127 MYASSERT(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
));
133 /*===========================================================================*
135 *===========================================================================*/
136 PRIVATE
void *aalloc(size_t bytes
)
138 /* Page-aligned malloc(). only used if vm_allocpage can't be used. */
141 b
= (u32_t
) malloc(I386_PAGE_SIZE
+ bytes
);
142 if(!b
) vm_panic("aalloc: out of memory", bytes
);
143 b
+= I386_PAGE_SIZE
- (b
% I386_PAGE_SIZE
);
148 /*===========================================================================*
150 *===========================================================================*/
151 PRIVATE u32_t
findhole(pt_t
*pt
, u32_t vmin
, u32_t vmax
)
153 /* Find a space in the virtual address space of pageteble 'pt',
154 * between page-aligned BYTE offsets vmin and vmax, to fit
155 * a page in. Return byte offset.
157 u32_t freefound
= 0, curv
;
158 int pde
= 0, try_restart
;
159 static u32_t lastv
= 0;
161 /* Input sanity check. */
162 vm_assert(vmin
+ I386_PAGE_SIZE
>= vmin
);
163 vm_assert(vmax
>= vmin
+ I386_PAGE_SIZE
);
164 vm_assert((vmin
% I386_PAGE_SIZE
) == 0);
165 vm_assert((vmax
% I386_PAGE_SIZE
) == 0);
168 curv
= ((u32_t
) random()) % ((vmax
- vmin
)/I386_PAGE_SIZE
);
169 curv
*= I386_PAGE_SIZE
;
173 if(curv
< vmin
|| curv
>= vmax
)
178 /* Start looking for a free page starting at vmin. */
182 vm_assert(curv
>= vmin
);
183 vm_assert(curv
< vmax
);
185 pde
= I386_VM_PDE(curv
);
186 pte
= I386_VM_PTE(curv
);
188 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
) ||
189 !(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
194 curv
+=I386_PAGE_SIZE
;
196 if(curv
>= vmax
&& try_restart
) {
202 printf("VM: out of virtual address space in vm\n");
207 /*===========================================================================*
209 *===========================================================================*/
210 PRIVATE
void vm_freepages(vir_bytes vir
, vir_bytes phys
, int pages
, int reason
)
212 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
213 if(vir
>= vmp
->vm_stacktop
) {
214 vm_assert(!(vir
% I386_PAGE_SIZE
));
215 vm_assert(!(phys
% I386_PAGE_SIZE
));
216 FREE_MEM(ABS2CLICK(phys
), pages
);
217 if(pt_writemap(&vmp
->vm_pt
, arch_vir2map(vmp
, vir
),
218 MAP_NONE
, pages
*I386_PAGE_SIZE
, 0, WMF_OVERWRITE
) != OK
)
219 vm_panic("vm_freepages: pt_writemap failed",
222 printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
227 /*===========================================================================*
229 *===========================================================================*/
230 PRIVATE
void *vm_getsparepage(u32_t
*phys
)
233 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
234 for(s
= 0; s
< SPAREPAGES
; s
++) {
235 if(sparepages
[s
].page
) {
237 sp
= sparepages
[s
].page
;
238 *phys
= sparepages
[s
].phys
;
239 sparepages
[s
].page
= NULL
;
241 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
248 /*===========================================================================*
250 *===========================================================================*/
251 PRIVATE
void *vm_checkspares(void)
254 static int total
= 0, worst
= 0;
255 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
256 for(s
= 0; s
< SPAREPAGES
&& missing_spares
> 0; s
++)
257 if(!sparepages
[s
].page
) {
259 if((sparepages
[s
].page
= vm_allocpage(&sparepages
[s
].phys
,
262 vm_assert(missing_spares
>= 0);
263 vm_assert(missing_spares
<= SPAREPAGES
);
265 printf("VM: warning: couldn't get new spare page\n");
268 if(worst
< n
) worst
= n
;
274 /*===========================================================================*
276 *===========================================================================*/
277 PUBLIC
void *vm_allocpage(phys_bytes
*phys
, int reason
)
279 /* Allocate a page for use by VM itself. */
284 static int level
= 0;
288 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
292 vm_assert(level
>= 1);
293 vm_assert(level
<= 2);
295 if(level
> 1 || !(vmp
->vm_flags
& VMF_HASPT
) || !meminit_done
) {
298 s
=vm_getsparepage(phys
);
302 printf("VM: warning: out of spare pages\n");
307 /* VM does have a pagetable, so get a page and map it in there.
308 * Where in our virtual address space can we put it?
310 loc
= findhole(pt
, arch_vir2map(vmp
, vmp
->vm_stacktop
),
311 vmp
->vm_arch
.vm_data_top
);
314 printf("VM: vm_allocpage: findhole failed\n");
318 /* Allocate page of memory for use by VM. As VM
319 * is trusted, we don't have to pre-clear it.
321 if((newpage
= ALLOC_MEM(CLICKSPERPAGE
, 0)) == NO_MEM
) {
323 printf("VM: vm_allocpage: ALLOC_MEM failed\n");
327 *phys
= CLICK2ABS(newpage
);
329 /* Map this page into our address space. */
330 if((r
=pt_writemap(pt
, loc
, *phys
, I386_PAGE_SIZE
,
331 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
, 0)) != OK
) {
332 FREE_MEM(newpage
, CLICKSPERPAGE
);
333 printf("vm_allocpage writemap failed\n", ret
);
338 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
339 vm_panic("VMCTL_FLUSHTLB failed", r
);
344 /* Return user-space-ready pointer to it. */
345 ret
= (void *) arch_map2vir(vmp
, loc
);
350 /*===========================================================================*
352 *===========================================================================*/
353 PUBLIC
void vm_pagelock(void *vir
, int lockflag
)
355 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
358 u32_t flags
= I386_VM_PRESENT
| I386_VM_USER
;
362 m
= arch_vir2map(vmp
, (vir_bytes
) vir
);
364 vm_assert(!(m
% I386_PAGE_SIZE
));
367 flags
|= I386_VM_WRITE
;
370 if((r
=pt_writemap(pt
, m
, 0, I386_PAGE_SIZE
,
371 flags
, WMF_OVERWRITE
| WMF_WRITEFLAGSONLY
)) != OK
) {
372 vm_panic("vm_lockpage: pt_writemap failed\n", NO_NUM
);
375 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
376 vm_panic("VMCTL_FLUSHTLB failed", r
);
382 /*===========================================================================*
384 *===========================================================================*/
385 PRIVATE
int pt_ptalloc(pt_t
*pt
, int pde
, u32_t flags
)
387 /* Allocate a page table and write its address into the page directory. */
391 /* Argument must make sense. */
392 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
393 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
395 /* We don't expect to overwrite page directory entry, nor
396 * storage for the page table.
398 vm_assert(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
));
399 vm_assert(!pt
->pt_pt
[pde
]);
401 /* Get storage for the page table. */
402 if(!(pt
->pt_pt
[pde
] = vm_allocpage(&pt_phys
, VMP_PAGETABLE
)))
405 for(i
= 0; i
< I386_VM_PT_ENTRIES
; i
++)
406 pt
->pt_pt
[pde
][i
] = 0; /* Empty entry. */
408 /* Make page directory entry.
409 * The PDE is always 'present,' 'writable,' and 'user accessible,'
410 * relying on the PTE for protection.
412 pt
->pt_dir
[pde
] = (pt_phys
& I386_VM_ADDR_MASK
) | flags
413 | I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
418 /*===========================================================================*
420 *===========================================================================*/
421 PUBLIC
int pt_writemap(pt_t
*pt
, vir_bytes v
, phys_bytes physaddr
,
422 size_t bytes
, u32_t flags
, u32_t writemapflags
)
424 /* Write mapping into page table. Allocate a new page table if necessary. */
425 /* Page directory and table entries for this virtual address. */
426 int p
, pages
, pdecheck
;
430 if(writemapflags
& WMF_VERIFY
)
433 vm_assert(!(bytes
% I386_PAGE_SIZE
));
434 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
436 pages
= bytes
/ I386_PAGE_SIZE
;
438 /* MAP_NONE means to clear the mapping. It doesn't matter
439 * what's actually written into the PTE if I386_VM_PRESENT
440 * isn't on, so we can just write MAP_NONE into it.
443 if(physaddr
!= MAP_NONE
&& !(flags
& I386_VM_PRESENT
)) {
444 vm_panic("pt_writemap: writing dir with !P\n", NO_NUM
);
446 if(physaddr
== MAP_NONE
&& flags
) {
447 vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM
);
451 finalpde
= I386_VM_PDE(v
+ I386_PAGE_SIZE
* pages
);
453 /* First make sure all the necessary page tables are allocated,
454 * before we start writing in any of them, because it's a pain
455 * to undo our work properly. Walk the range in page-directory-entry
458 for(pdecheck
= I386_VM_PDE(v
); pdecheck
<= finalpde
; pdecheck
++) {
459 vm_assert(pdecheck
>= 0 && pdecheck
< I386_VM_DIR_ENTRIES
);
460 if(pt
->pt_dir
[pdecheck
] & I386_VM_BIGPAGE
) {
461 printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
463 vm_panic("pt_writemap: BIGPAGE found", NO_NUM
);
465 if(!(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
)) {
468 printf("pt_writemap verify: no pde %d\n", pdecheck
);
471 vm_assert(!pt
->pt_dir
[pdecheck
]);
472 if((r
=pt_ptalloc(pt
, pdecheck
, flags
)) != OK
) {
473 /* Couldn't do (complete) mapping.
474 * Don't bother freeing any previously
475 * allocated page tables, they're
476 * still writable, don't point to nonsense,
477 * and pt_ptalloc leaves the directory
478 * and other data in a consistent state.
480 printf("pt_writemap: pt_ptalloc failed\n", pdecheck
);
484 vm_assert(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
);
487 /* Now write in them. */
488 for(p
= 0; p
< pages
; p
++) {
490 int pde
= I386_VM_PDE(v
);
491 int pte
= I386_VM_PTE(v
);
493 vm_assert(!(v
% I386_PAGE_SIZE
));
494 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
495 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
497 /* Page table has to be there. */
498 vm_assert(pt
->pt_dir
[pde
] & I386_VM_PRESENT
);
500 /* Make sure page directory entry for this page table
501 * is marked present and page table entry is available.
503 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
506 /* We don't expect to overwrite a page. */
507 if(!(writemapflags
& (WMF_OVERWRITE
|WMF_VERIFY
)))
508 vm_assert(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
));
510 if(writemapflags
& (WMF_WRITEFLAGSONLY
|WMF_FREE
)) {
511 physaddr
= pt
->pt_pt
[pde
][pte
] & I386_VM_ADDR_MASK
;
514 if(writemapflags
& WMF_FREE
) {
515 FREE_MEM(ABS2CLICK(physaddr
), 1);
518 /* Entry we will write. */
519 entry
= (physaddr
& I386_VM_ADDR_MASK
) | flags
;
523 maskedentry
= pt
->pt_pt
[pde
][pte
];
524 maskedentry
&= ~(I386_VM_ACC
|I386_VM_DIRTY
);
525 /* Verify pagetable entry. */
526 if(maskedentry
!= entry
) {
527 printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
528 pt
->pt_pt
[pde
][pte
], maskedentry
, entry
);
532 /* Write pagetable entry. */
533 pt
->pt_pt
[pde
][pte
] = entry
;
536 physaddr
+= I386_PAGE_SIZE
;
543 /*===========================================================================*
545 *===========================================================================*/
546 PUBLIC
int pt_checkrange(pt_t
*pt
, vir_bytes v
, size_t bytes
,
551 vm_assert(!(bytes
% I386_PAGE_SIZE
));
553 pages
= bytes
/ I386_PAGE_SIZE
;
555 for(p
= 0; p
< pages
; p
++) {
557 int pde
= I386_VM_PDE(v
);
558 int pte
= I386_VM_PTE(v
);
560 vm_assert(!(v
% I386_PAGE_SIZE
));
561 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
562 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
564 /* Page table has to be there. */
565 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
))
568 /* Make sure page directory entry for this page table
569 * is marked present and page table entry is available.
571 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
573 if(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
577 if(write
&& !(pt
->pt_pt
[pde
][pte
] & I386_VM_WRITE
)) {
587 /*===========================================================================*
589 *===========================================================================*/
590 PUBLIC
int pt_new(pt_t
*pt
)
592 /* Allocate a pagetable root. On i386, allocate a page-aligned page directory
593 * and set them to 0 (indicating no page tables are allocated). Lookup
594 * its physical address as we'll need that in the future. Verify it's
599 /* Don't ever re-allocate/re-move a certain process slot's
600 * page directory once it's been created. This is a fraction
601 * faster, but also avoids having to invalidate the page
602 * mappings from in-kernel page tables pointing to
603 * the page directories (the page_directories data).
606 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
610 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
611 pt
->pt_dir
[i
] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
615 /* Where to start looking for free virtual address space? */
619 if(pt_mapkernel(pt
) != OK
)
620 vm_panic("pt_new: pt_mapkernel failed", NO_NUM
);
625 /*===========================================================================*
627 *===========================================================================*/
628 PUBLIC
int pt_identity(pt_t
*pt
)
630 /* Allocate a pagetable that does a 1:1 mapping. */
633 /* Allocate page directory. */
635 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
639 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
641 addr
= I386_BIG_PAGE_SIZE
*i
;
642 pt
->pt_dir
[i
] = (addr
& I386_VM_ADDR_MASK_4MB
) |
645 I386_VM_PRESENT
|I386_VM_WRITE
;
649 /* Where to start looking for free virtual address space? */
655 /*===========================================================================*
657 *===========================================================================*/
658 PUBLIC
void pt_init(phys_bytes usedlimit
)
660 /* By default, the kernel gives us a data segment with pre-allocated
661 * memory that then can't grow. We want to be able to allocate memory
662 * dynamically, however. So here we copy the part of the page table
663 * that's ours, so we get a private page table. Then we increase the
664 * hardware segment size so we can allocate memory above our stack.
668 vir_bytes v
, kpagedir
;
670 vir_bytes extra_clicks
;
672 int global_bit_ok
= 0;
676 vir_bytes sparepages_mem
;
677 phys_bytes sparepages_ph
;
683 /* Get ourselves spare pages. */
684 if(!(sparepages_mem
= (vir_bytes
) aalloc(I386_PAGE_SIZE
*SPAREPAGES
)))
685 vm_panic("pt_init: aalloc for spare failed", NO_NUM
);
686 if((r
=sys_umap(SELF
, VM_D
, (vir_bytes
) sparepages_mem
,
687 I386_PAGE_SIZE
*SPAREPAGES
, &sparepages_ph
)) != OK
)
688 vm_panic("pt_init: sys_umap failed", r
);
690 for(s
= 0; s
< SPAREPAGES
; s
++) {
691 sparepages
[s
].page
= (void *) (sparepages_mem
+ s
*I386_PAGE_SIZE
);
692 sparepages
[s
].phys
= sparepages_ph
+ s
*I386_PAGE_SIZE
;
697 /* global bit and 4MB pages available? */
698 global_bit_ok
= _cpufeature(_CPUF_I386_PGE
);
699 bigpage_ok
= _cpufeature(_CPUF_I386_PSE
);
701 /* Set bit for PTE's and PDE's if available. */
703 global_bit
= I386_VM_GLOBAL
;
705 /* The kernel and boot time processes need an identity mapping.
706 * We use full PDE's for this without separate page tables.
707 * Figure out which pde we can start using for other purposes.
709 id_map_high_pde
= usedlimit
/ I386_BIG_PAGE_SIZE
;
711 /* We have to make mappings up till here. */
712 free_pde
= id_map_high_pde
+1;
714 printf("map high pde: %d for limit: 0x%lx\n",
715 id_map_high_pde
, usedlimit
);
717 /* Initial (current) range of our virtual address space. */
718 lo
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_phys
);
719 hi
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_phys
+
720 vmp
->vm_arch
.vm_seg
[S
].mem_len
);
722 vm_assert(!(lo
% I386_PAGE_SIZE
));
723 vm_assert(!(hi
% I386_PAGE_SIZE
));
725 if(lo
< VM_PROCSTART
) {
726 moveup
= VM_PROCSTART
- lo
;
727 vm_assert(!(VM_PROCSTART
% I386_PAGE_SIZE
));
728 vm_assert(!(lo
% I386_PAGE_SIZE
));
729 vm_assert(!(moveup
% I386_PAGE_SIZE
));
732 /* Make new page table for ourselves, partly copied
733 * from the current one.
735 if(pt_new(newpt
) != OK
)
736 vm_panic("pt_init: pt_new failed", NO_NUM
);
738 /* Set up mappings for VM process. */
739 for(v
= lo
; v
< hi
; v
+= I386_PAGE_SIZE
) {
743 /* We have to write the new position in the PT,
744 * so we can move our segments.
746 if(pt_writemap(newpt
, v
+moveup
, v
, I386_PAGE_SIZE
,
747 I386_VM_PRESENT
|I386_VM_WRITE
|I386_VM_USER
, 0) != OK
)
748 vm_panic("pt_init: pt_writemap failed", NO_NUM
);
751 /* Move segments up too. */
752 vmp
->vm_arch
.vm_seg
[T
].mem_phys
+= ABS2CLICK(moveup
);
753 vmp
->vm_arch
.vm_seg
[D
].mem_phys
+= ABS2CLICK(moveup
);
754 vmp
->vm_arch
.vm_seg
[S
].mem_phys
+= ABS2CLICK(moveup
);
756 /* Allocate us a page table in which to remember page directory
759 if(!(page_directories
= vm_allocpage(&page_directories_phys
,
761 vm_panic("no virt addr for vm mappings", NO_NUM
);
763 memset(page_directories
, 0, I386_PAGE_SIZE
);
765 /* Increase our hardware data segment to create virtual address
766 * space above our stack. We want to increase it to VM_DATATOP,
767 * like regular processes have.
769 extra_clicks
= ABS2CLICK(VM_DATATOP
- hi
);
770 vmp
->vm_arch
.vm_seg
[S
].mem_len
+= extra_clicks
;
772 /* We pretend to the kernel we have a huge stack segment to
773 * increase our data segment.
775 vmp
->vm_arch
.vm_data_top
=
776 (vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
777 vmp
->vm_arch
.vm_seg
[S
].mem_len
) << CLICK_SHIFT
;
779 /* Where our free virtual address space starts.
780 * This is only a hint to the VM system.
782 newpt
->pt_virtop
= 0;
784 /* Let other functions know VM now has a private page table. */
785 vmp
->vm_flags
|= VMF_HASPT
;
787 /* Now reserve another pde for kernel's own mappings. */
790 phys_bytes addr
, len
;
794 kernmap_pde
= free_pde
++;
795 offset
= kernmap_pde
* I386_BIG_PAGE_SIZE
;
797 while(sys_vmctl_get_mapping(index
, &addr
, &len
,
800 if(index
>= MAX_KERNMAPPINGS
)
801 vm_panic("VM: too many kernel mappings", index
);
802 kern_mappings
[index
].phys_addr
= addr
;
803 kern_mappings
[index
].len
= len
;
804 kern_mappings
[index
].flags
= flags
;
805 kern_mappings
[index
].lin_addr
= offset
;
806 kern_mappings
[index
].flags
=
807 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
|
809 if(flags
& VMMF_UNCACHED
)
810 kern_mappings
[index
].flags
|=
811 I386_VM_PWT
| I386_VM_PCD
;
812 if(addr
% I386_PAGE_SIZE
)
813 vm_panic("VM: addr unaligned", addr
);
814 if(len
% I386_PAGE_SIZE
)
815 vm_panic("VM: len unaligned", len
);
816 vir
= arch_map2vir(&vmproc
[VMP_SYSTEM
], offset
);
817 if(sys_vmctl_reply_mapping(index
, vir
) != OK
)
818 vm_panic("VM: reply failed", NO_NUM
);
825 /* Find a PDE below processes available for mapping in the
826 * page directories (readonly).
828 pagedir_pde
= free_pde
++;
829 pagedir_pde_val
= (page_directories_phys
& I386_VM_ADDR_MASK
) |
830 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
832 /* Tell kernel about free pde's. */
833 while(free_pde
*I386_BIG_PAGE_SIZE
< VM_PROCSTART
) {
834 if((r
=sys_vmctl(SELF
, VMCTL_I386_FREEPDE
, free_pde
++)) != OK
) {
835 vm_panic("VMCTL_I386_FREEPDE failed", r
);
839 /* first pde in use by process. */
842 kernlimit
= free_pde
*I386_BIG_PAGE_SIZE
;
844 /* Increase kernel segment to address this memory. */
845 if((r
=sys_vmctl(SELF
, VMCTL_I386_KERNELLIMIT
, kernlimit
)) != OK
) {
846 vm_panic("VMCTL_I386_KERNELLIMIT failed", r
);
849 kpagedir
= arch_map2vir(&vmproc
[VMP_SYSTEM
],
850 pagedir_pde
*I386_BIG_PAGE_SIZE
);
852 /* Tell kernel how to get at the page directories. */
853 if((r
=sys_vmctl(SELF
, VMCTL_I386_PAGEDIRS
, kpagedir
)) != OK
) {
854 vm_panic("VMCTL_I386_KERNELLIMIT failed", r
);
857 /* Give our process the new, copied, private page table. */
858 pt_mapkernel(newpt
); /* didn't know about vm_dir pages earlier */
861 /* Now actually enable paging. */
862 if(sys_vmctl_enable_paging(vmp
->vm_arch
.vm_seg
) != OK
)
863 vm_panic("pt_init: enable paging failed", NO_NUM
);
865 /* Back to reality - this is where the stack actually is. */
866 vmp
->vm_arch
.vm_seg
[S
].mem_len
-= extra_clicks
;
873 /*===========================================================================*
875 *===========================================================================*/
876 PUBLIC
int pt_bind(pt_t
*pt
, struct vmproc
*who
)
881 /* Basic sanity checks. */
883 vm_assert(who
->vm_flags
& VMF_INUSE
);
887 vm_assert(slot
>= 0);
888 vm_assert(slot
< ELEMENTS(vmproc
));
889 vm_assert(slot
< I386_VM_PT_ENTRIES
);
891 phys
= pt
->pt_dir_phys
& I386_VM_ADDR_MASK
;
892 vm_assert(pt
->pt_dir_phys
== phys
);
894 /* Update "page directory pagetable." */
895 page_directories
[slot
] = phys
| I386_VM_PRESENT
|I386_VM_WRITE
;
898 printf("VM: slot %d has pde val 0x%lx\n", slot
, page_directories
[slot
]);
900 /* Tell kernel about new page table root. */
901 return sys_vmctl(who
->vm_endpoint
, VMCTL_I386_SETCR3
,
902 pt
? pt
->pt_dir_phys
: 0);
905 /*===========================================================================*
907 *===========================================================================*/
908 PUBLIC
void pt_free(pt_t
*pt
)
910 /* Free memory associated with this pagetable. */
913 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++)
915 vm_freepages((vir_bytes
) pt
->pt_pt
[i
],
916 I386_VM_PFA(pt
->pt_dir
[i
]), 1, VMP_PAGETABLE
);
921 /*===========================================================================*
923 *===========================================================================*/
924 PUBLIC
int pt_mapkernel(pt_t
*pt
)
928 /* Any i386 page table needs to map in the kernel address space. */
929 vm_assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
933 for(pde
= 0; pde
<= id_map_high_pde
; pde
++) {
935 addr
= pde
* I386_BIG_PAGE_SIZE
;
936 vm_assert((addr
& I386_VM_ADDR_MASK
) == addr
);
937 pt
->pt_dir
[pde
] = addr
| I386_VM_PRESENT
|
938 I386_VM_BIGPAGE
| I386_VM_USER
|
939 I386_VM_WRITE
| global_bit
;
942 vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM
);
945 if(pagedir_pde
>= 0) {
946 /* Kernel also wants to know about all page directories. */
947 pt
->pt_dir
[pagedir_pde
] = pagedir_pde_val
;
950 for(i
= 0; i
< kernmappings
; i
++) {
952 kern_mappings
[i
].lin_addr
,
953 kern_mappings
[i
].phys_addr
,
954 kern_mappings
[i
].len
,
955 kern_mappings
[i
].flags
, 0) != OK
) {
956 vm_panic("pt_mapkernel: pt_writemap failed", NO_NUM
);
963 /*===========================================================================*
965 *===========================================================================*/
966 PUBLIC
void pt_cycle(void)