3 #define _POSIX_SOURCE 1
5 #include <minix/callnr.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
10 #include <minix/endpoint.h>
11 #include <minix/keymap.h>
12 #include <minix/minlib.h>
13 #include <minix/type.h>
14 #include <minix/ipc.h>
15 #include <minix/sysutil.h>
16 #include <minix/syslib.h>
17 #include <minix/safecopies.h>
18 #include <minix/cpufeature.h>
19 #include <minix/bitmap.h>
34 #include "../sanitycheck.h"
38 /* PDE used to map in kernel, kernel physical address. */
39 PRIVATE
int id_map_high_pde
= -1, pagedir_pde
= -1;
40 PRIVATE u32_t global_bit
= 0, pagedir_pde_val
;
42 PRIVATE
int proc_pde
= 0;
44 /* 4MB page size available in hardware? */
45 PRIVATE
int bigpage_ok
= 0;
47 /* Our process table entry. */
48 struct vmproc
*vmp
= &vmproc
[VM_PROC_NR
];
50 /* Spare memory, ready to go after initialization, to avoid a
51 * circular dependency on allocating memory and writing it into VM's
55 int missing_spares
= SPAREPAGES
;
59 } sparepages
[SPAREPAGES
];
61 #define MAX_KERNMAPPINGS 10
63 phys_bytes phys_addr
; /* Physical addr. */
64 phys_bytes len
; /* Length in bytes. */
65 vir_bytes lin_addr
; /* Offset in page table. */
67 } kern_mappings
[MAX_KERNMAPPINGS
];
70 /* Clicks must be pages, as
71 * - they must be page aligned to map them
72 * - they must be a multiple of the page size
73 * - it's inconvenient to have them bigger than pages, because we often want
75 * May as well require them to be equal then.
77 #if CLICK_SIZE != I386_PAGE_SIZE
78 #error CLICK_SIZE must be page size.
81 /* Bytes of virtual address space one pde controls. */
82 #define BYTESPERPDE (I386_VM_PT_ENTRIES * I386_PAGE_SIZE)
84 /* Nevertheless, introduce these macros to make the code readable. */
85 #define CLICK2PAGE(c) ((c) / CLICKSPERPAGE)
87 /* Page table that contains pointers to all page directories. */
88 u32_t page_directories_phys
, *page_directories
= NULL
;
91 /*===========================================================================*
93 *===========================================================================*/
94 PUBLIC
void pt_sanitycheck(pt_t
*pt
, char *file
, int line
)
96 /* Basic pt sanity check. */
101 MYASSERT(pt
->pt_dir
);
102 MYASSERT(pt
->pt_dir_phys
);
104 for(slot
= 0; slot
< ELEMENTS(vmproc
); slot
++) {
105 if(pt
== &vmproc
[slot
].vm_pt
)
109 if(slot
>= ELEMENTS(vmproc
)) {
110 panic("pt_sanitycheck: passed pt not in any proc");
113 MYASSERT(usedpages_add(pt
->pt_dir_phys
, I386_PAGE_SIZE
) == OK
);
115 for(i
= proc_pde
; i
< I386_VM_DIR_ENTRIES
; i
++) {
117 if(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
)) {
118 printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
119 slot
, i
, pt
->pt_pt
[i
], pt
->pt_dir
[i
]);
121 MYASSERT(pt
->pt_dir
[i
] & I386_VM_PRESENT
);
122 MYASSERT(usedpages_add(I386_VM_PFA(pt
->pt_dir
[i
]),
123 I386_PAGE_SIZE
) == OK
);
125 MYASSERT(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
));
131 /*===========================================================================*
133 *===========================================================================*/
134 PRIVATE
void *aalloc(size_t bytes
)
136 /* Page-aligned malloc(). only used if vm_allocpage can't be used. */
139 b
= (u32_t
) malloc(I386_PAGE_SIZE
+ bytes
);
140 if(!b
) panic("aalloc: out of memory: %d", bytes
);
141 b
+= I386_PAGE_SIZE
- (b
% I386_PAGE_SIZE
);
146 /*===========================================================================*
148 *===========================================================================*/
149 PRIVATE u32_t
findhole(pt_t
*pt
, u32_t vmin
, u32_t vmax
)
151 /* Find a space in the virtual address space of pageteble 'pt',
152 * between page-aligned BYTE offsets vmin and vmax, to fit
153 * a page in. Return byte offset.
155 u32_t freefound
= 0, curv
;
156 int pde
= 0, try_restart
;
157 static u32_t lastv
= 0;
159 /* Input sanity check. */
160 vm_assert(vmin
+ I386_PAGE_SIZE
>= vmin
);
161 vm_assert(vmax
>= vmin
+ I386_PAGE_SIZE
);
162 vm_assert((vmin
% I386_PAGE_SIZE
) == 0);
163 vm_assert((vmax
% I386_PAGE_SIZE
) == 0);
166 curv
= ((u32_t
) random()) % ((vmax
- vmin
)/I386_PAGE_SIZE
);
167 curv
*= I386_PAGE_SIZE
;
171 if(curv
< vmin
|| curv
>= vmax
)
176 /* Start looking for a free page starting at vmin. */
180 vm_assert(curv
>= vmin
);
181 vm_assert(curv
< vmax
);
183 pde
= I386_VM_PDE(curv
);
184 pte
= I386_VM_PTE(curv
);
186 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
) ||
187 !(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
192 curv
+=I386_PAGE_SIZE
;
194 if(curv
>= vmax
&& try_restart
) {
200 printf("VM: out of virtual address space in vm\n");
205 /*===========================================================================*
207 *===========================================================================*/
208 PRIVATE
void vm_freepages(vir_bytes vir
, vir_bytes phys
, int pages
, int reason
)
210 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
211 if(vir
>= vmp
->vm_stacktop
) {
212 vm_assert(!(vir
% I386_PAGE_SIZE
));
213 vm_assert(!(phys
% I386_PAGE_SIZE
));
214 FREE_MEM(ABS2CLICK(phys
), pages
);
215 if(pt_writemap(&vmp
->vm_pt
, arch_vir2map(vmp
, vir
),
216 MAP_NONE
, pages
*I386_PAGE_SIZE
, 0, WMF_OVERWRITE
) != OK
)
217 panic("vm_freepages: pt_writemap failed");
219 printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
224 /*===========================================================================*
226 *===========================================================================*/
227 PRIVATE
void *vm_getsparepage(u32_t
*phys
)
230 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
231 for(s
= 0; s
< SPAREPAGES
; s
++) {
232 if(sparepages
[s
].page
) {
234 sp
= sparepages
[s
].page
;
235 *phys
= sparepages
[s
].phys
;
236 sparepages
[s
].page
= NULL
;
238 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
245 /*===========================================================================*
247 *===========================================================================*/
248 PRIVATE
void *vm_checkspares(void)
251 static int total
= 0, worst
= 0;
252 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
253 for(s
= 0; s
< SPAREPAGES
&& missing_spares
> 0; s
++)
254 if(!sparepages
[s
].page
) {
256 if((sparepages
[s
].page
= vm_allocpage(&sparepages
[s
].phys
,
259 vm_assert(missing_spares
>= 0);
260 vm_assert(missing_spares
<= SPAREPAGES
);
262 printf("VM: warning: couldn't get new spare page\n");
265 if(worst
< n
) worst
= n
;
271 /*===========================================================================*
273 *===========================================================================*/
274 PUBLIC
void *vm_allocpage(phys_bytes
*phys
, int reason
)
276 /* Allocate a page for use by VM itself. */
281 static int level
= 0;
285 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
289 vm_assert(level
>= 1);
290 vm_assert(level
<= 2);
292 if(level
> 1 || !(vmp
->vm_flags
& VMF_HASPT
) || !meminit_done
) {
295 s
=vm_getsparepage(phys
);
299 printf("VM: warning: out of spare pages\n");
304 /* VM does have a pagetable, so get a page and map it in there.
305 * Where in our virtual address space can we put it?
307 loc
= findhole(pt
, arch_vir2map(vmp
, vmp
->vm_stacktop
),
308 vmp
->vm_arch
.vm_data_top
);
311 printf("VM: vm_allocpage: findhole failed\n");
315 /* Allocate page of memory for use by VM. As VM
316 * is trusted, we don't have to pre-clear it.
318 if((newpage
= ALLOC_MEM(CLICKSPERPAGE
, 0)) == NO_MEM
) {
320 printf("VM: vm_allocpage: ALLOC_MEM failed\n");
324 *phys
= CLICK2ABS(newpage
);
326 /* Map this page into our address space. */
327 if((r
=pt_writemap(pt
, loc
, *phys
, I386_PAGE_SIZE
,
328 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
, 0)) != OK
) {
329 FREE_MEM(newpage
, CLICKSPERPAGE
);
330 printf("vm_allocpage writemap failed\n");
335 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
336 panic("VMCTL_FLUSHTLB failed: %d", r
);
341 /* Return user-space-ready pointer to it. */
342 ret
= (void *) arch_map2vir(vmp
, loc
);
347 /*===========================================================================*
349 *===========================================================================*/
350 PUBLIC
void vm_pagelock(void *vir
, int lockflag
)
352 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
355 u32_t flags
= I386_VM_PRESENT
| I386_VM_USER
;
359 m
= arch_vir2map(vmp
, (vir_bytes
) vir
);
361 vm_assert(!(m
% I386_PAGE_SIZE
));
364 flags
|= I386_VM_WRITE
;
367 if((r
=pt_writemap(pt
, m
, 0, I386_PAGE_SIZE
,
368 flags
, WMF_OVERWRITE
| WMF_WRITEFLAGSONLY
)) != OK
) {
369 panic("vm_lockpage: pt_writemap failed");
372 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
373 panic("VMCTL_FLUSHTLB failed: %d", r
);
379 /*===========================================================================*
381 *===========================================================================*/
382 PRIVATE
int pt_ptalloc(pt_t
*pt
, int pde
, u32_t flags
)
384 /* Allocate a page table and write its address into the page directory. */
388 /* Argument must make sense. */
389 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
390 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
392 /* We don't expect to overwrite page directory entry, nor
393 * storage for the page table.
395 vm_assert(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
));
396 vm_assert(!pt
->pt_pt
[pde
]);
398 /* Get storage for the page table. */
399 if(!(pt
->pt_pt
[pde
] = vm_allocpage(&pt_phys
, VMP_PAGETABLE
)))
402 for(i
= 0; i
< I386_VM_PT_ENTRIES
; i
++)
403 pt
->pt_pt
[pde
][i
] = 0; /* Empty entry. */
405 /* Make page directory entry.
406 * The PDE is always 'present,' 'writable,' and 'user accessible,'
407 * relying on the PTE for protection.
409 pt
->pt_dir
[pde
] = (pt_phys
& I386_VM_ADDR_MASK
) | flags
410 | I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
415 /*===========================================================================*
417 *===========================================================================*/
418 PUBLIC
int pt_writemap(pt_t
*pt
, vir_bytes v
, phys_bytes physaddr
,
419 size_t bytes
, u32_t flags
, u32_t writemapflags
)
421 /* Write mapping into page table. Allocate a new page table if necessary. */
422 /* Page directory and table entries for this virtual address. */
423 int p
, pages
, pdecheck
;
427 if(writemapflags
& WMF_VERIFY
)
430 vm_assert(!(bytes
% I386_PAGE_SIZE
));
431 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
433 pages
= bytes
/ I386_PAGE_SIZE
;
435 /* MAP_NONE means to clear the mapping. It doesn't matter
436 * what's actually written into the PTE if I386_VM_PRESENT
437 * isn't on, so we can just write MAP_NONE into it.
440 if(physaddr
!= MAP_NONE
&& !(flags
& I386_VM_PRESENT
)) {
441 panic("pt_writemap: writing dir with !P");
443 if(physaddr
== MAP_NONE
&& flags
) {
444 panic("pt_writemap: writing 0 with flags");
448 finalpde
= I386_VM_PDE(v
+ I386_PAGE_SIZE
* pages
);
450 /* First make sure all the necessary page tables are allocated,
451 * before we start writing in any of them, because it's a pain
452 * to undo our work properly. Walk the range in page-directory-entry
455 for(pdecheck
= I386_VM_PDE(v
); pdecheck
<= finalpde
; pdecheck
++) {
456 vm_assert(pdecheck
>= 0 && pdecheck
< I386_VM_DIR_ENTRIES
);
457 if(pt
->pt_dir
[pdecheck
] & I386_VM_BIGPAGE
) {
458 printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
460 panic("pt_writemap: BIGPAGE found");
462 if(!(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
)) {
465 printf("pt_writemap verify: no pde %d\n", pdecheck
);
468 vm_assert(!pt
->pt_dir
[pdecheck
]);
469 if((r
=pt_ptalloc(pt
, pdecheck
, flags
)) != OK
) {
470 /* Couldn't do (complete) mapping.
471 * Don't bother freeing any previously
472 * allocated page tables, they're
473 * still writable, don't point to nonsense,
474 * and pt_ptalloc leaves the directory
475 * and other data in a consistent state.
477 printf("pt_writemap: pt_ptalloc failed\n", pdecheck
);
481 vm_assert(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
);
484 /* Now write in them. */
485 for(p
= 0; p
< pages
; p
++) {
487 int pde
= I386_VM_PDE(v
);
488 int pte
= I386_VM_PTE(v
);
490 vm_assert(!(v
% I386_PAGE_SIZE
));
491 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
492 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
494 /* Page table has to be there. */
495 vm_assert(pt
->pt_dir
[pde
] & I386_VM_PRESENT
);
497 /* Make sure page directory entry for this page table
498 * is marked present and page table entry is available.
500 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
503 /* We don't expect to overwrite a page. */
504 if(!(writemapflags
& (WMF_OVERWRITE
|WMF_VERIFY
)))
505 vm_assert(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
));
507 if(writemapflags
& (WMF_WRITEFLAGSONLY
|WMF_FREE
)) {
508 physaddr
= pt
->pt_pt
[pde
][pte
] & I386_VM_ADDR_MASK
;
511 if(writemapflags
& WMF_FREE
) {
512 FREE_MEM(ABS2CLICK(physaddr
), 1);
515 /* Entry we will write. */
516 entry
= (physaddr
& I386_VM_ADDR_MASK
) | flags
;
520 maskedentry
= pt
->pt_pt
[pde
][pte
];
521 maskedentry
&= ~(I386_VM_ACC
|I386_VM_DIRTY
);
522 /* Verify pagetable entry. */
523 if(maskedentry
!= entry
) {
524 printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
525 pt
->pt_pt
[pde
][pte
], maskedentry
, entry
);
529 /* Write pagetable entry. */
530 pt
->pt_pt
[pde
][pte
] = entry
;
533 physaddr
+= I386_PAGE_SIZE
;
540 /*===========================================================================*
542 *===========================================================================*/
543 PUBLIC
int pt_checkrange(pt_t
*pt
, vir_bytes v
, size_t bytes
,
548 vm_assert(!(bytes
% I386_PAGE_SIZE
));
550 pages
= bytes
/ I386_PAGE_SIZE
;
552 for(p
= 0; p
< pages
; p
++) {
554 int pde
= I386_VM_PDE(v
);
555 int pte
= I386_VM_PTE(v
);
557 vm_assert(!(v
% I386_PAGE_SIZE
));
558 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
559 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
561 /* Page table has to be there. */
562 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
))
565 /* Make sure page directory entry for this page table
566 * is marked present and page table entry is available.
568 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
570 if(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
574 if(write
&& !(pt
->pt_pt
[pde
][pte
] & I386_VM_WRITE
)) {
584 /*===========================================================================*
586 *===========================================================================*/
587 PUBLIC
int pt_new(pt_t
*pt
)
589 /* Allocate a pagetable root. On i386, allocate a page-aligned page directory
590 * and set them to 0 (indicating no page tables are allocated). Lookup
591 * its physical address as we'll need that in the future. Verify it's
596 /* Don't ever re-allocate/re-move a certain process slot's
597 * page directory once it's been created. This is a fraction
598 * faster, but also avoids having to invalidate the page
599 * mappings from in-kernel page tables pointing to
600 * the page directories (the page_directories data).
603 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
607 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
608 pt
->pt_dir
[i
] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
612 /* Where to start looking for free virtual address space? */
616 if(pt_mapkernel(pt
) != OK
)
617 panic("pt_new: pt_mapkernel failed");
622 /*===========================================================================*
624 *===========================================================================*/
625 PUBLIC
int pt_identity(pt_t
*pt
)
627 /* Allocate a pagetable that does a 1:1 mapping. */
630 /* Allocate page directory. */
632 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
636 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
638 addr
= I386_BIG_PAGE_SIZE
*i
;
639 pt
->pt_dir
[i
] = (addr
& I386_VM_ADDR_MASK_4MB
) |
642 I386_VM_PRESENT
|I386_VM_WRITE
;
646 /* Where to start looking for free virtual address space? */
652 /*===========================================================================*
654 *===========================================================================*/
655 PUBLIC
void pt_init(phys_bytes usedlimit
)
657 /* By default, the kernel gives us a data segment with pre-allocated
658 * memory that then can't grow. We want to be able to allocate memory
659 * dynamically, however. So here we copy the part of the page table
660 * that's ours, so we get a private page table. Then we increase the
661 * hardware segment size so we can allocate memory above our stack.
665 vir_bytes v
, kpagedir
;
667 vir_bytes extra_clicks
;
669 int global_bit_ok
= 0;
673 vir_bytes sparepages_mem
;
674 phys_bytes sparepages_ph
;
680 /* Get ourselves spare pages. */
681 if(!(sparepages_mem
= (vir_bytes
) aalloc(I386_PAGE_SIZE
*SPAREPAGES
)))
682 panic("pt_init: aalloc for spare failed");
683 if((r
=sys_umap(SELF
, VM_D
, (vir_bytes
) sparepages_mem
,
684 I386_PAGE_SIZE
*SPAREPAGES
, &sparepages_ph
)) != OK
)
685 panic("pt_init: sys_umap failed: %d", r
);
687 for(s
= 0; s
< SPAREPAGES
; s
++) {
688 sparepages
[s
].page
= (void *) (sparepages_mem
+ s
*I386_PAGE_SIZE
);
689 sparepages
[s
].phys
= sparepages_ph
+ s
*I386_PAGE_SIZE
;
694 /* global bit and 4MB pages available? */
695 global_bit_ok
= _cpufeature(_CPUF_I386_PGE
);
696 bigpage_ok
= _cpufeature(_CPUF_I386_PSE
);
698 /* Set bit for PTE's and PDE's if available. */
700 global_bit
= I386_VM_GLOBAL
;
702 /* The kernel and boot time processes need an identity mapping.
703 * We use full PDE's for this without separate page tables.
704 * Figure out which pde we can start using for other purposes.
706 id_map_high_pde
= usedlimit
/ I386_BIG_PAGE_SIZE
;
708 /* We have to make mappings up till here. */
709 free_pde
= id_map_high_pde
+1;
711 /* Initial (current) range of our virtual address space. */
712 lo
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_phys
);
713 hi
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_phys
+
714 vmp
->vm_arch
.vm_seg
[S
].mem_len
);
716 vm_assert(!(lo
% I386_PAGE_SIZE
));
717 vm_assert(!(hi
% I386_PAGE_SIZE
));
719 if(lo
< VM_PROCSTART
) {
720 moveup
= VM_PROCSTART
- lo
;
721 vm_assert(!(VM_PROCSTART
% I386_PAGE_SIZE
));
722 vm_assert(!(lo
% I386_PAGE_SIZE
));
723 vm_assert(!(moveup
% I386_PAGE_SIZE
));
726 /* Make new page table for ourselves, partly copied
727 * from the current one.
729 if(pt_new(newpt
) != OK
)
730 panic("pt_init: pt_new failed");
732 /* Set up mappings for VM process. */
733 for(v
= lo
; v
< hi
; v
+= I386_PAGE_SIZE
) {
737 /* We have to write the new position in the PT,
738 * so we can move our segments.
740 if(pt_writemap(newpt
, v
+moveup
, v
, I386_PAGE_SIZE
,
741 I386_VM_PRESENT
|I386_VM_WRITE
|I386_VM_USER
, 0) != OK
)
742 panic("pt_init: pt_writemap failed");
745 /* Move segments up too. */
746 vmp
->vm_arch
.vm_seg
[T
].mem_phys
+= ABS2CLICK(moveup
);
747 vmp
->vm_arch
.vm_seg
[D
].mem_phys
+= ABS2CLICK(moveup
);
748 vmp
->vm_arch
.vm_seg
[S
].mem_phys
+= ABS2CLICK(moveup
);
750 /* Allocate us a page table in which to remember page directory
753 if(!(page_directories
= vm_allocpage(&page_directories_phys
,
755 panic("no virt addr for vm mappings");
757 memset(page_directories
, 0, I386_PAGE_SIZE
);
759 /* Increase our hardware data segment to create virtual address
760 * space above our stack. We want to increase it to VM_DATATOP,
761 * like regular processes have.
763 extra_clicks
= ABS2CLICK(VM_DATATOP
- hi
);
764 vmp
->vm_arch
.vm_seg
[S
].mem_len
+= extra_clicks
;
766 /* We pretend to the kernel we have a huge stack segment to
767 * increase our data segment.
769 vmp
->vm_arch
.vm_data_top
=
770 (vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
771 vmp
->vm_arch
.vm_seg
[S
].mem_len
) << CLICK_SHIFT
;
773 /* Where our free virtual address space starts.
774 * This is only a hint to the VM system.
776 newpt
->pt_virtop
= 0;
778 /* Let other functions know VM now has a private page table. */
779 vmp
->vm_flags
|= VMF_HASPT
;
781 /* Now reserve another pde for kernel's own mappings. */
784 phys_bytes addr
, len
;
785 int flags
, index
= 0;
788 kernmap_pde
= free_pde
++;
789 offset
= kernmap_pde
* I386_BIG_PAGE_SIZE
;
791 while(sys_vmctl_get_mapping(index
, &addr
, &len
,
794 if(index
>= MAX_KERNMAPPINGS
)
795 panic("VM: too many kernel mappings: %d", index
);
796 kern_mappings
[index
].phys_addr
= addr
;
797 kern_mappings
[index
].len
= len
;
798 kern_mappings
[index
].flags
= flags
;
799 kern_mappings
[index
].lin_addr
= offset
;
800 kern_mappings
[index
].flags
=
801 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
|
803 if(flags
& VMMF_UNCACHED
)
804 kern_mappings
[index
].flags
|=
805 I386_VM_PWT
| I386_VM_PCD
;
806 if(addr
% I386_PAGE_SIZE
)
807 panic("VM: addr unaligned: %d", addr
);
808 if(len
% I386_PAGE_SIZE
)
809 panic("VM: len unaligned: %d", len
);
810 vir
= arch_map2vir(&vmproc
[VMP_SYSTEM
], offset
);
811 if(sys_vmctl_reply_mapping(index
, vir
) != OK
)
812 panic("VM: reply failed");
819 /* Find a PDE below processes available for mapping in the
820 * page directories (readonly).
822 pagedir_pde
= free_pde
++;
823 pagedir_pde_val
= (page_directories_phys
& I386_VM_ADDR_MASK
) |
824 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
826 /* Tell kernel about free pde's. */
827 while(free_pde
*I386_BIG_PAGE_SIZE
< VM_PROCSTART
) {
828 if((r
=sys_vmctl(SELF
, VMCTL_I386_FREEPDE
, free_pde
++)) != OK
) {
829 panic("VMCTL_I386_FREEPDE failed: %d", r
);
833 /* first pde in use by process. */
836 kernlimit
= free_pde
*I386_BIG_PAGE_SIZE
;
838 /* Increase kernel segment to address this memory. */
839 if((r
=sys_vmctl(SELF
, VMCTL_I386_KERNELLIMIT
, kernlimit
)) != OK
) {
840 panic("VMCTL_I386_KERNELLIMIT failed: %d", r
);
843 kpagedir
= arch_map2vir(&vmproc
[VMP_SYSTEM
],
844 pagedir_pde
*I386_BIG_PAGE_SIZE
);
846 /* Tell kernel how to get at the page directories. */
847 if((r
=sys_vmctl(SELF
, VMCTL_I386_PAGEDIRS
, kpagedir
)) != OK
) {
848 panic("VMCTL_I386_PAGEDIRS failed: %d", r
);
851 /* Give our process the new, copied, private page table. */
852 pt_mapkernel(newpt
); /* didn't know about vm_dir pages earlier */
855 /* Now actually enable paging. */
856 if(sys_vmctl_enable_paging(vmp
->vm_arch
.vm_seg
) != OK
)
857 panic("pt_init: enable paging failed");
859 /* Back to reality - this is where the stack actually is. */
860 vmp
->vm_arch
.vm_seg
[S
].mem_len
-= extra_clicks
;
867 /*===========================================================================*
869 *===========================================================================*/
870 PUBLIC
int pt_bind(pt_t
*pt
, struct vmproc
*who
)
875 /* Basic sanity checks. */
877 vm_assert(who
->vm_flags
& VMF_INUSE
);
881 vm_assert(slot
>= 0);
882 vm_assert(slot
< ELEMENTS(vmproc
));
883 vm_assert(slot
< I386_VM_PT_ENTRIES
);
885 phys
= pt
->pt_dir_phys
& I386_VM_ADDR_MASK
;
886 vm_assert(pt
->pt_dir_phys
== phys
);
888 /* Update "page directory pagetable." */
889 page_directories
[slot
] = phys
| I386_VM_PRESENT
|I386_VM_WRITE
;
892 printf("VM: slot %d has pde val 0x%lx\n", slot
, page_directories
[slot
]);
894 /* Tell kernel about new page table root. */
895 return sys_vmctl(who
->vm_endpoint
, VMCTL_I386_SETCR3
,
896 pt
? pt
->pt_dir_phys
: 0);
899 /*===========================================================================*
901 *===========================================================================*/
902 PUBLIC
void pt_free(pt_t
*pt
)
904 /* Free memory associated with this pagetable. */
907 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++)
909 vm_freepages((vir_bytes
) pt
->pt_pt
[i
],
910 I386_VM_PFA(pt
->pt_dir
[i
]), 1, VMP_PAGETABLE
);
915 /*===========================================================================*
917 *===========================================================================*/
918 PUBLIC
int pt_mapkernel(pt_t
*pt
)
922 /* Any i386 page table needs to map in the kernel address space. */
923 vm_assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
927 for(pde
= 0; pde
<= id_map_high_pde
; pde
++) {
929 addr
= pde
* I386_BIG_PAGE_SIZE
;
930 vm_assert((addr
& I386_VM_ADDR_MASK
) == addr
);
931 pt
->pt_dir
[pde
] = addr
| I386_VM_PRESENT
|
932 I386_VM_BIGPAGE
| I386_VM_USER
|
933 I386_VM_WRITE
| global_bit
;
936 panic("VM: pt_mapkernel: no bigpage");
939 if(pagedir_pde
>= 0) {
940 /* Kernel also wants to know about all page directories. */
941 pt
->pt_dir
[pagedir_pde
] = pagedir_pde_val
;
944 for(i
= 0; i
< kernmappings
; i
++) {
946 kern_mappings
[i
].lin_addr
,
947 kern_mappings
[i
].phys_addr
,
948 kern_mappings
[i
].len
,
949 kern_mappings
[i
].flags
, 0) != OK
) {
950 panic("pt_mapkernel: pt_writemap failed");
957 /*===========================================================================*
959 *===========================================================================*/
960 PUBLIC
void pt_cycle(void)