3 #define _POSIX_SOURCE 1
5 #include <minix/callnr.h>
7 #include <minix/config.h>
8 #include <minix/const.h>
10 #include <minix/endpoint.h>
11 #include <minix/keymap.h>
12 #include <minix/minlib.h>
13 #include <minix/type.h>
14 #include <minix/ipc.h>
15 #include <minix/sysutil.h>
16 #include <minix/syslib.h>
17 #include <minix/safecopies.h>
18 #include <minix/cpufeature.h>
19 #include <minix/bitmap.h>
34 #include "../sanitycheck.h"
38 /* PDE used to map in kernel, kernel physical address. */
39 PRIVATE
int id_map_high_pde
= -1, pagedir_pde
= -1;
40 PRIVATE u32_t global_bit
= 0, pagedir_pde_val
;
42 PRIVATE
int proc_pde
= 0;
44 /* 4MB page size available in hardware? */
45 PRIVATE
int bigpage_ok
= 0;
47 /* Our process table entry. */
48 struct vmproc
*vmp
= &vmproc
[VM_PROC_NR
];
50 /* Spare memory, ready to go after initialization, to avoid a
51 * circular dependency on allocating memory and writing it into VM's
55 int missing_spares
= SPAREPAGES
;
59 } sparepages
[SPAREPAGES
];
61 #define MAX_KERNMAPPINGS 10
63 phys_bytes phys_addr
; /* Physical addr. */
64 phys_bytes len
; /* Length in bytes. */
65 vir_bytes lin_addr
; /* Offset in page table. */
67 } kern_mappings
[MAX_KERNMAPPINGS
];
70 /* Clicks must be pages, as
71 * - they must be page aligned to map them
72 * - they must be a multiple of the page size
73 * - it's inconvenient to have them bigger than pages, because we often want
75 * May as well require them to be equal then.
77 #if CLICK_SIZE != I386_PAGE_SIZE
78 #error CLICK_SIZE must be page size.
81 /* Bytes of virtual address space one pde controls. */
82 #define BYTESPERPDE (I386_VM_PT_ENTRIES * I386_PAGE_SIZE)
84 /* Nevertheless, introduce these macros to make the code readable. */
85 #define CLICK2PAGE(c) ((c) / CLICKSPERPAGE)
87 /* Page table that contains pointers to all page directories. */
88 u32_t page_directories_phys
, *page_directories
= NULL
;
91 /*===========================================================================*
93 *===========================================================================*/
94 PUBLIC
void pt_sanitycheck(pt_t
*pt
, char *file
, int line
)
96 /* Basic pt sanity check. */
101 MYASSERT(pt
->pt_dir
);
102 MYASSERT(pt
->pt_dir_phys
);
104 for(slot
= 0; slot
< ELEMENTS(vmproc
); slot
++) {
105 if(pt
== &vmproc
[slot
].vm_pt
)
109 if(slot
>= ELEMENTS(vmproc
)) {
110 vm_panic("pt_sanitycheck: passed pt not in any proc", NO_NUM
);
113 MYASSERT(usedpages_add(pt
->pt_dir_phys
, I386_PAGE_SIZE
) == OK
);
115 for(i
= proc_pde
; i
< I386_VM_DIR_ENTRIES
; i
++) {
117 if(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
)) {
118 printf("slot %d: pt->pt_pt[%d] = 0x%lx, but pt_dir entry 0x%lx\n",
119 slot
, i
, pt
->pt_pt
[i
], pt
->pt_dir
[i
]);
121 MYASSERT(pt
->pt_dir
[i
] & I386_VM_PRESENT
);
122 MYASSERT(usedpages_add(I386_VM_PFA(pt
->pt_dir
[i
]),
123 I386_PAGE_SIZE
) == OK
);
125 MYASSERT(!(pt
->pt_dir
[i
] & I386_VM_PRESENT
));
131 /*===========================================================================*
133 *===========================================================================*/
134 PRIVATE
void *aalloc(size_t bytes
)
136 /* Page-aligned malloc(). only used if vm_allocpage can't be used. */
139 b
= (u32_t
) malloc(I386_PAGE_SIZE
+ bytes
);
140 if(!b
) vm_panic("aalloc: out of memory", bytes
);
141 b
+= I386_PAGE_SIZE
- (b
% I386_PAGE_SIZE
);
146 /*===========================================================================*
148 *===========================================================================*/
149 PRIVATE u32_t
findhole(pt_t
*pt
, u32_t vmin
, u32_t vmax
)
151 /* Find a space in the virtual address space of pageteble 'pt',
152 * between page-aligned BYTE offsets vmin and vmax, to fit
153 * a page in. Return byte offset.
155 u32_t freefound
= 0, curv
;
156 int pde
= 0, try_restart
;
157 static u32_t lastv
= 0;
159 /* Input sanity check. */
160 vm_assert(vmin
+ I386_PAGE_SIZE
>= vmin
);
161 vm_assert(vmax
>= vmin
+ I386_PAGE_SIZE
);
162 vm_assert((vmin
% I386_PAGE_SIZE
) == 0);
163 vm_assert((vmax
% I386_PAGE_SIZE
) == 0);
166 curv
= ((u32_t
) random()) % ((vmax
- vmin
)/I386_PAGE_SIZE
);
167 curv
*= I386_PAGE_SIZE
;
171 if(curv
< vmin
|| curv
>= vmax
)
176 /* Start looking for a free page starting at vmin. */
180 vm_assert(curv
>= vmin
);
181 vm_assert(curv
< vmax
);
183 pde
= I386_VM_PDE(curv
);
184 pte
= I386_VM_PTE(curv
);
186 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
) ||
187 !(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
192 curv
+=I386_PAGE_SIZE
;
194 if(curv
>= vmax
&& try_restart
) {
200 printf("VM: out of virtual address space in vm\n");
205 /*===========================================================================*
207 *===========================================================================*/
208 PRIVATE
void vm_freepages(vir_bytes vir
, vir_bytes phys
, int pages
, int reason
)
210 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
211 if(vir
>= vmp
->vm_stacktop
) {
212 vm_assert(!(vir
% I386_PAGE_SIZE
));
213 vm_assert(!(phys
% I386_PAGE_SIZE
));
214 FREE_MEM(ABS2CLICK(phys
), pages
);
215 if(pt_writemap(&vmp
->vm_pt
, arch_vir2map(vmp
, vir
),
216 MAP_NONE
, pages
*I386_PAGE_SIZE
, 0, WMF_OVERWRITE
) != OK
)
217 vm_panic("vm_freepages: pt_writemap failed",
220 printf("VM: vm_freepages not freeing VM heap pages (%d)\n",
225 /*===========================================================================*
227 *===========================================================================*/
228 PRIVATE
void *vm_getsparepage(u32_t
*phys
)
231 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
232 for(s
= 0; s
< SPAREPAGES
; s
++) {
233 if(sparepages
[s
].page
) {
235 sp
= sparepages
[s
].page
;
236 *phys
= sparepages
[s
].phys
;
237 sparepages
[s
].page
= NULL
;
239 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
246 /*===========================================================================*
248 *===========================================================================*/
249 PRIVATE
void *vm_checkspares(void)
252 static int total
= 0, worst
= 0;
253 vm_assert(missing_spares
>= 0 && missing_spares
<= SPAREPAGES
);
254 for(s
= 0; s
< SPAREPAGES
&& missing_spares
> 0; s
++)
255 if(!sparepages
[s
].page
) {
257 if((sparepages
[s
].page
= vm_allocpage(&sparepages
[s
].phys
,
260 vm_assert(missing_spares
>= 0);
261 vm_assert(missing_spares
<= SPAREPAGES
);
263 printf("VM: warning: couldn't get new spare page\n");
266 if(worst
< n
) worst
= n
;
272 /*===========================================================================*
274 *===========================================================================*/
275 PUBLIC
void *vm_allocpage(phys_bytes
*phys
, int reason
)
277 /* Allocate a page for use by VM itself. */
282 static int level
= 0;
286 vm_assert(reason
>= 0 && reason
< VMP_CATEGORIES
);
290 vm_assert(level
>= 1);
291 vm_assert(level
<= 2);
293 if(level
> 1 || !(vmp
->vm_flags
& VMF_HASPT
) || !meminit_done
) {
296 s
=vm_getsparepage(phys
);
300 printf("VM: warning: out of spare pages\n");
305 /* VM does have a pagetable, so get a page and map it in there.
306 * Where in our virtual address space can we put it?
308 loc
= findhole(pt
, arch_vir2map(vmp
, vmp
->vm_stacktop
),
309 vmp
->vm_arch
.vm_data_top
);
312 printf("VM: vm_allocpage: findhole failed\n");
316 /* Allocate page of memory for use by VM. As VM
317 * is trusted, we don't have to pre-clear it.
319 if((newpage
= ALLOC_MEM(CLICKSPERPAGE
, 0)) == NO_MEM
) {
321 printf("VM: vm_allocpage: ALLOC_MEM failed\n");
325 *phys
= CLICK2ABS(newpage
);
327 /* Map this page into our address space. */
328 if((r
=pt_writemap(pt
, loc
, *phys
, I386_PAGE_SIZE
,
329 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
, 0)) != OK
) {
330 FREE_MEM(newpage
, CLICKSPERPAGE
);
331 printf("vm_allocpage writemap failed\n", ret
);
336 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
337 vm_panic("VMCTL_FLUSHTLB failed", r
);
342 /* Return user-space-ready pointer to it. */
343 ret
= (void *) arch_map2vir(vmp
, loc
);
348 /*===========================================================================*
350 *===========================================================================*/
351 PUBLIC
void vm_pagelock(void *vir
, int lockflag
)
353 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
356 u32_t flags
= I386_VM_PRESENT
| I386_VM_USER
;
360 m
= arch_vir2map(vmp
, (vir_bytes
) vir
);
362 vm_assert(!(m
% I386_PAGE_SIZE
));
365 flags
|= I386_VM_WRITE
;
368 if((r
=pt_writemap(pt
, m
, 0, I386_PAGE_SIZE
,
369 flags
, WMF_OVERWRITE
| WMF_WRITEFLAGSONLY
)) != OK
) {
370 vm_panic("vm_lockpage: pt_writemap failed\n", NO_NUM
);
373 if((r
=sys_vmctl(SELF
, VMCTL_FLUSHTLB
, 0)) != OK
) {
374 vm_panic("VMCTL_FLUSHTLB failed", r
);
380 /*===========================================================================*
382 *===========================================================================*/
383 PRIVATE
int pt_ptalloc(pt_t
*pt
, int pde
, u32_t flags
)
385 /* Allocate a page table and write its address into the page directory. */
389 /* Argument must make sense. */
390 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
391 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
393 /* We don't expect to overwrite page directory entry, nor
394 * storage for the page table.
396 vm_assert(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
));
397 vm_assert(!pt
->pt_pt
[pde
]);
399 /* Get storage for the page table. */
400 if(!(pt
->pt_pt
[pde
] = vm_allocpage(&pt_phys
, VMP_PAGETABLE
)))
403 for(i
= 0; i
< I386_VM_PT_ENTRIES
; i
++)
404 pt
->pt_pt
[pde
][i
] = 0; /* Empty entry. */
406 /* Make page directory entry.
407 * The PDE is always 'present,' 'writable,' and 'user accessible,'
408 * relying on the PTE for protection.
410 pt
->pt_dir
[pde
] = (pt_phys
& I386_VM_ADDR_MASK
) | flags
411 | I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
416 /*===========================================================================*
418 *===========================================================================*/
419 PUBLIC
int pt_writemap(pt_t
*pt
, vir_bytes v
, phys_bytes physaddr
,
420 size_t bytes
, u32_t flags
, u32_t writemapflags
)
422 /* Write mapping into page table. Allocate a new page table if necessary. */
423 /* Page directory and table entries for this virtual address. */
424 int p
, pages
, pdecheck
;
428 if(writemapflags
& WMF_VERIFY
)
431 vm_assert(!(bytes
% I386_PAGE_SIZE
));
432 vm_assert(!(flags
& ~(PTF_ALLFLAGS
)));
434 pages
= bytes
/ I386_PAGE_SIZE
;
436 /* MAP_NONE means to clear the mapping. It doesn't matter
437 * what's actually written into the PTE if I386_VM_PRESENT
438 * isn't on, so we can just write MAP_NONE into it.
441 if(physaddr
!= MAP_NONE
&& !(flags
& I386_VM_PRESENT
)) {
442 vm_panic("pt_writemap: writing dir with !P\n", NO_NUM
);
444 if(physaddr
== MAP_NONE
&& flags
) {
445 vm_panic("pt_writemap: writing 0 with flags\n", NO_NUM
);
449 finalpde
= I386_VM_PDE(v
+ I386_PAGE_SIZE
* pages
);
451 /* First make sure all the necessary page tables are allocated,
452 * before we start writing in any of them, because it's a pain
453 * to undo our work properly. Walk the range in page-directory-entry
456 for(pdecheck
= I386_VM_PDE(v
); pdecheck
<= finalpde
; pdecheck
++) {
457 vm_assert(pdecheck
>= 0 && pdecheck
< I386_VM_DIR_ENTRIES
);
458 if(pt
->pt_dir
[pdecheck
] & I386_VM_BIGPAGE
) {
459 printf("pt_writemap: trying to write 0x%lx into 0x%lx\n",
461 vm_panic("pt_writemap: BIGPAGE found", NO_NUM
);
463 if(!(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
)) {
466 printf("pt_writemap verify: no pde %d\n", pdecheck
);
469 vm_assert(!pt
->pt_dir
[pdecheck
]);
470 if((r
=pt_ptalloc(pt
, pdecheck
, flags
)) != OK
) {
471 /* Couldn't do (complete) mapping.
472 * Don't bother freeing any previously
473 * allocated page tables, they're
474 * still writable, don't point to nonsense,
475 * and pt_ptalloc leaves the directory
476 * and other data in a consistent state.
478 printf("pt_writemap: pt_ptalloc failed\n", pdecheck
);
482 vm_assert(pt
->pt_dir
[pdecheck
] & I386_VM_PRESENT
);
485 /* Now write in them. */
486 for(p
= 0; p
< pages
; p
++) {
488 int pde
= I386_VM_PDE(v
);
489 int pte
= I386_VM_PTE(v
);
491 vm_assert(!(v
% I386_PAGE_SIZE
));
492 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
493 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
495 /* Page table has to be there. */
496 vm_assert(pt
->pt_dir
[pde
] & I386_VM_PRESENT
);
498 /* Make sure page directory entry for this page table
499 * is marked present and page table entry is available.
501 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
504 /* We don't expect to overwrite a page. */
505 if(!(writemapflags
& (WMF_OVERWRITE
|WMF_VERIFY
)))
506 vm_assert(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
));
508 if(writemapflags
& (WMF_WRITEFLAGSONLY
|WMF_FREE
)) {
509 physaddr
= pt
->pt_pt
[pde
][pte
] & I386_VM_ADDR_MASK
;
512 if(writemapflags
& WMF_FREE
) {
513 FREE_MEM(ABS2CLICK(physaddr
), 1);
516 /* Entry we will write. */
517 entry
= (physaddr
& I386_VM_ADDR_MASK
) | flags
;
521 maskedentry
= pt
->pt_pt
[pde
][pte
];
522 maskedentry
&= ~(I386_VM_ACC
|I386_VM_DIRTY
);
523 /* Verify pagetable entry. */
524 if(maskedentry
!= entry
) {
525 printf("pt_writemap: 0x%lx found, masked 0x%lx, 0x%lx expected\n",
526 pt
->pt_pt
[pde
][pte
], maskedentry
, entry
);
530 /* Write pagetable entry. */
531 pt
->pt_pt
[pde
][pte
] = entry
;
534 physaddr
+= I386_PAGE_SIZE
;
541 /*===========================================================================*
543 *===========================================================================*/
544 PUBLIC
int pt_checkrange(pt_t
*pt
, vir_bytes v
, size_t bytes
,
549 vm_assert(!(bytes
% I386_PAGE_SIZE
));
551 pages
= bytes
/ I386_PAGE_SIZE
;
553 for(p
= 0; p
< pages
; p
++) {
555 int pde
= I386_VM_PDE(v
);
556 int pte
= I386_VM_PTE(v
);
558 vm_assert(!(v
% I386_PAGE_SIZE
));
559 vm_assert(pte
>= 0 && pte
< I386_VM_PT_ENTRIES
);
560 vm_assert(pde
>= 0 && pde
< I386_VM_DIR_ENTRIES
);
562 /* Page table has to be there. */
563 if(!(pt
->pt_dir
[pde
] & I386_VM_PRESENT
))
566 /* Make sure page directory entry for this page table
567 * is marked present and page table entry is available.
569 vm_assert((pt
->pt_dir
[pde
] & I386_VM_PRESENT
) && pt
->pt_pt
[pde
]);
571 if(!(pt
->pt_pt
[pde
][pte
] & I386_VM_PRESENT
)) {
575 if(write
&& !(pt
->pt_pt
[pde
][pte
] & I386_VM_WRITE
)) {
585 /*===========================================================================*
587 *===========================================================================*/
588 PUBLIC
int pt_new(pt_t
*pt
)
590 /* Allocate a pagetable root. On i386, allocate a page-aligned page directory
591 * and set them to 0 (indicating no page tables are allocated). Lookup
592 * its physical address as we'll need that in the future. Verify it's
597 /* Don't ever re-allocate/re-move a certain process slot's
598 * page directory once it's been created. This is a fraction
599 * faster, but also avoids having to invalidate the page
600 * mappings from in-kernel page tables pointing to
601 * the page directories (the page_directories data).
604 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
608 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
609 pt
->pt_dir
[i
] = 0; /* invalid entry (I386_VM_PRESENT bit = 0) */
613 /* Where to start looking for free virtual address space? */
617 if(pt_mapkernel(pt
) != OK
)
618 vm_panic("pt_new: pt_mapkernel failed", NO_NUM
);
623 /*===========================================================================*
625 *===========================================================================*/
626 PUBLIC
int pt_identity(pt_t
*pt
)
628 /* Allocate a pagetable that does a 1:1 mapping. */
631 /* Allocate page directory. */
633 !(pt
->pt_dir
= vm_allocpage(&pt
->pt_dir_phys
, VMP_PAGEDIR
))) {
637 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++) {
639 addr
= I386_BIG_PAGE_SIZE
*i
;
640 pt
->pt_dir
[i
] = (addr
& I386_VM_ADDR_MASK_4MB
) |
643 I386_VM_PRESENT
|I386_VM_WRITE
;
647 /* Where to start looking for free virtual address space? */
653 /*===========================================================================*
655 *===========================================================================*/
656 PUBLIC
void pt_init(phys_bytes usedlimit
)
658 /* By default, the kernel gives us a data segment with pre-allocated
659 * memory that then can't grow. We want to be able to allocate memory
660 * dynamically, however. So here we copy the part of the page table
661 * that's ours, so we get a private page table. Then we increase the
662 * hardware segment size so we can allocate memory above our stack.
666 vir_bytes v
, kpagedir
;
668 vir_bytes extra_clicks
;
670 int global_bit_ok
= 0;
674 vir_bytes sparepages_mem
;
675 phys_bytes sparepages_ph
;
681 /* Get ourselves spare pages. */
682 if(!(sparepages_mem
= (vir_bytes
) aalloc(I386_PAGE_SIZE
*SPAREPAGES
)))
683 vm_panic("pt_init: aalloc for spare failed", NO_NUM
);
684 if((r
=sys_umap(SELF
, VM_D
, (vir_bytes
) sparepages_mem
,
685 I386_PAGE_SIZE
*SPAREPAGES
, &sparepages_ph
)) != OK
)
686 vm_panic("pt_init: sys_umap failed", r
);
688 for(s
= 0; s
< SPAREPAGES
; s
++) {
689 sparepages
[s
].page
= (void *) (sparepages_mem
+ s
*I386_PAGE_SIZE
);
690 sparepages
[s
].phys
= sparepages_ph
+ s
*I386_PAGE_SIZE
;
695 /* global bit and 4MB pages available? */
696 global_bit_ok
= _cpufeature(_CPUF_I386_PGE
);
697 bigpage_ok
= _cpufeature(_CPUF_I386_PSE
);
699 /* Set bit for PTE's and PDE's if available. */
701 global_bit
= I386_VM_GLOBAL
;
703 /* The kernel and boot time processes need an identity mapping.
704 * We use full PDE's for this without separate page tables.
705 * Figure out which pde we can start using for other purposes.
707 id_map_high_pde
= usedlimit
/ I386_BIG_PAGE_SIZE
;
709 /* We have to make mappings up till here. */
710 free_pde
= id_map_high_pde
+1;
712 /* Initial (current) range of our virtual address space. */
713 lo
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[T
].mem_phys
);
714 hi
= CLICK2ABS(vmp
->vm_arch
.vm_seg
[S
].mem_phys
+
715 vmp
->vm_arch
.vm_seg
[S
].mem_len
);
717 vm_assert(!(lo
% I386_PAGE_SIZE
));
718 vm_assert(!(hi
% I386_PAGE_SIZE
));
720 if(lo
< VM_PROCSTART
) {
721 moveup
= VM_PROCSTART
- lo
;
722 vm_assert(!(VM_PROCSTART
% I386_PAGE_SIZE
));
723 vm_assert(!(lo
% I386_PAGE_SIZE
));
724 vm_assert(!(moveup
% I386_PAGE_SIZE
));
727 /* Make new page table for ourselves, partly copied
728 * from the current one.
730 if(pt_new(newpt
) != OK
)
731 vm_panic("pt_init: pt_new failed", NO_NUM
);
733 /* Set up mappings for VM process. */
734 for(v
= lo
; v
< hi
; v
+= I386_PAGE_SIZE
) {
738 /* We have to write the new position in the PT,
739 * so we can move our segments.
741 if(pt_writemap(newpt
, v
+moveup
, v
, I386_PAGE_SIZE
,
742 I386_VM_PRESENT
|I386_VM_WRITE
|I386_VM_USER
, 0) != OK
)
743 vm_panic("pt_init: pt_writemap failed", NO_NUM
);
746 /* Move segments up too. */
747 vmp
->vm_arch
.vm_seg
[T
].mem_phys
+= ABS2CLICK(moveup
);
748 vmp
->vm_arch
.vm_seg
[D
].mem_phys
+= ABS2CLICK(moveup
);
749 vmp
->vm_arch
.vm_seg
[S
].mem_phys
+= ABS2CLICK(moveup
);
751 /* Allocate us a page table in which to remember page directory
754 if(!(page_directories
= vm_allocpage(&page_directories_phys
,
756 vm_panic("no virt addr for vm mappings", NO_NUM
);
758 memset(page_directories
, 0, I386_PAGE_SIZE
);
760 /* Increase our hardware data segment to create virtual address
761 * space above our stack. We want to increase it to VM_DATATOP,
762 * like regular processes have.
764 extra_clicks
= ABS2CLICK(VM_DATATOP
- hi
);
765 vmp
->vm_arch
.vm_seg
[S
].mem_len
+= extra_clicks
;
767 /* We pretend to the kernel we have a huge stack segment to
768 * increase our data segment.
770 vmp
->vm_arch
.vm_data_top
=
771 (vmp
->vm_arch
.vm_seg
[S
].mem_vir
+
772 vmp
->vm_arch
.vm_seg
[S
].mem_len
) << CLICK_SHIFT
;
774 /* Where our free virtual address space starts.
775 * This is only a hint to the VM system.
777 newpt
->pt_virtop
= 0;
779 /* Let other functions know VM now has a private page table. */
780 vmp
->vm_flags
|= VMF_HASPT
;
782 /* Now reserve another pde for kernel's own mappings. */
785 phys_bytes addr
, len
;
786 int flags
, index
= 0;
789 kernmap_pde
= free_pde
++;
790 offset
= kernmap_pde
* I386_BIG_PAGE_SIZE
;
792 while(sys_vmctl_get_mapping(index
, &addr
, &len
,
795 if(index
>= MAX_KERNMAPPINGS
)
796 vm_panic("VM: too many kernel mappings", index
);
797 kern_mappings
[index
].phys_addr
= addr
;
798 kern_mappings
[index
].len
= len
;
799 kern_mappings
[index
].flags
= flags
;
800 kern_mappings
[index
].lin_addr
= offset
;
801 kern_mappings
[index
].flags
=
802 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
|
804 if(flags
& VMMF_UNCACHED
)
805 kern_mappings
[index
].flags
|=
806 I386_VM_PWT
| I386_VM_PCD
;
807 if(addr
% I386_PAGE_SIZE
)
808 vm_panic("VM: addr unaligned", addr
);
809 if(len
% I386_PAGE_SIZE
)
810 vm_panic("VM: len unaligned", len
);
811 vir
= arch_map2vir(&vmproc
[VMP_SYSTEM
], offset
);
812 if(sys_vmctl_reply_mapping(index
, vir
) != OK
)
813 vm_panic("VM: reply failed", NO_NUM
);
820 /* Find a PDE below processes available for mapping in the
821 * page directories (readonly).
823 pagedir_pde
= free_pde
++;
824 pagedir_pde_val
= (page_directories_phys
& I386_VM_ADDR_MASK
) |
825 I386_VM_PRESENT
| I386_VM_USER
| I386_VM_WRITE
;
827 /* Tell kernel about free pde's. */
828 while(free_pde
*I386_BIG_PAGE_SIZE
< VM_PROCSTART
) {
829 if((r
=sys_vmctl(SELF
, VMCTL_I386_FREEPDE
, free_pde
++)) != OK
) {
830 vm_panic("VMCTL_I386_FREEPDE failed", r
);
834 /* first pde in use by process. */
837 kernlimit
= free_pde
*I386_BIG_PAGE_SIZE
;
839 /* Increase kernel segment to address this memory. */
840 if((r
=sys_vmctl(SELF
, VMCTL_I386_KERNELLIMIT
, kernlimit
)) != OK
) {
841 vm_panic("VMCTL_I386_KERNELLIMIT failed", r
);
844 kpagedir
= arch_map2vir(&vmproc
[VMP_SYSTEM
],
845 pagedir_pde
*I386_BIG_PAGE_SIZE
);
847 /* Tell kernel how to get at the page directories. */
848 if((r
=sys_vmctl(SELF
, VMCTL_I386_PAGEDIRS
, kpagedir
)) != OK
) {
849 vm_panic("VMCTL_I386_KERNELLIMIT failed", r
);
852 /* Give our process the new, copied, private page table. */
853 pt_mapkernel(newpt
); /* didn't know about vm_dir pages earlier */
856 /* Now actually enable paging. */
857 if(sys_vmctl_enable_paging(vmp
->vm_arch
.vm_seg
) != OK
)
858 vm_panic("pt_init: enable paging failed", NO_NUM
);
860 /* Back to reality - this is where the stack actually is. */
861 vmp
->vm_arch
.vm_seg
[S
].mem_len
-= extra_clicks
;
868 /*===========================================================================*
870 *===========================================================================*/
871 PUBLIC
int pt_bind(pt_t
*pt
, struct vmproc
*who
)
876 /* Basic sanity checks. */
878 vm_assert(who
->vm_flags
& VMF_INUSE
);
882 vm_assert(slot
>= 0);
883 vm_assert(slot
< ELEMENTS(vmproc
));
884 vm_assert(slot
< I386_VM_PT_ENTRIES
);
886 phys
= pt
->pt_dir_phys
& I386_VM_ADDR_MASK
;
887 vm_assert(pt
->pt_dir_phys
== phys
);
889 /* Update "page directory pagetable." */
890 page_directories
[slot
] = phys
| I386_VM_PRESENT
|I386_VM_WRITE
;
893 printf("VM: slot %d has pde val 0x%lx\n", slot
, page_directories
[slot
]);
895 /* Tell kernel about new page table root. */
896 return sys_vmctl(who
->vm_endpoint
, VMCTL_I386_SETCR3
,
897 pt
? pt
->pt_dir_phys
: 0);
900 /*===========================================================================*
902 *===========================================================================*/
903 PUBLIC
void pt_free(pt_t
*pt
)
905 /* Free memory associated with this pagetable. */
908 for(i
= 0; i
< I386_VM_DIR_ENTRIES
; i
++)
910 vm_freepages((vir_bytes
) pt
->pt_pt
[i
],
911 I386_VM_PFA(pt
->pt_dir
[i
]), 1, VMP_PAGETABLE
);
916 /*===========================================================================*
918 *===========================================================================*/
919 PUBLIC
int pt_mapkernel(pt_t
*pt
)
923 /* Any i386 page table needs to map in the kernel address space. */
924 vm_assert(vmproc
[VMP_SYSTEM
].vm_flags
& VMF_INUSE
);
928 for(pde
= 0; pde
<= id_map_high_pde
; pde
++) {
930 addr
= pde
* I386_BIG_PAGE_SIZE
;
931 vm_assert((addr
& I386_VM_ADDR_MASK
) == addr
);
932 pt
->pt_dir
[pde
] = addr
| I386_VM_PRESENT
|
933 I386_VM_BIGPAGE
| I386_VM_USER
|
934 I386_VM_WRITE
| global_bit
;
937 vm_panic("VM: pt_mapkernel: no bigpage", NO_NUM
);
940 if(pagedir_pde
>= 0) {
941 /* Kernel also wants to know about all page directories. */
942 pt
->pt_dir
[pagedir_pde
] = pagedir_pde_val
;
945 for(i
= 0; i
< kernmappings
; i
++) {
947 kern_mappings
[i
].lin_addr
,
948 kern_mappings
[i
].phys_addr
,
949 kern_mappings
[i
].len
,
950 kern_mappings
[i
].flags
, 0) != OK
) {
951 vm_panic("pt_mapkernel: pt_writemap failed", NO_NUM
);
958 /*===========================================================================*
960 *===========================================================================*/
961 PUBLIC
void pt_cycle(void)