1 /* See COPYRIGHT for copyright information. */
6 #include <inc/string.h>
7 #include <inc/assert.h>
10 #include <kern/kclock.h>
13 // These variables are set by i386_detect_memory()
14 size_t npages
; // Amount of physical memory (in pages)
15 static size_t npages_basemem
; // Amount of base memory (in pages)
17 // These variables are set in mem_init()
18 pde_t
*kern_pgdir
; // Kernel's initial page directory
19 struct Page
*pages
; // Physical page state array
20 static Page
*page_free_list
; // Free list of physical pages
22 extern char bootstack
[]; // Lowest addr in boot-time kernel stack
23 // (a virtual address in remapped physical mem)
25 // Global descriptor table.
27 // The kernel and user segments are identical (except for the DPL).
28 // To load the SS register, the CPL must equal the DPL. Thus,
29 // we must duplicate the segments for the user and the kernel.
31 struct Segdesc gdt
[] = {
32 SEG_NULL
, // 0x0 - unused (always faults)
33 SEG(STA_X
| STA_R
, 0x0, 0xffffffff, 0), // 0x8 - kernel code segment
34 SEG(STA_W
, 0x0, 0xffffffff, 0), // 0x10 - kernel data segment
35 SEG(STA_X
| STA_R
, 0x0, 0xffffffff, 3), // 0x18 - user code segment
36 SEG(STA_W
, 0x0, 0xffffffff, 3), // 0x20 - user data segment
37 SEG_NULL
// 0x28 - tss, initialized in
41 struct Pseudodesc gdt_pd
= {
42 sizeof(gdt
) - 1, (unsigned long) gdt
45 static physaddr_t
check_va2pa(pde_t
*pgdir
, uintptr_t va
);
48 // --------------------------------------------------------------
49 // Detect machine's physical memory setup.
50 // --------------------------------------------------------------
55 return mc146818_read(r
) | (mc146818_read(r
+ 1) << 8);
59 i386_detect_memory(void)
61 size_t npages_extendedmem
;
63 // Use CMOS calls to measure available base & extended memory.
64 // (CMOS calls return results in kilobytes.)
65 npages_basemem
= (nvram_read(NVRAM_BASELO
) * 1024) / PGSIZE
;
66 npages_extendedmem
= (nvram_read(NVRAM_EXTLO
) * 1024) / PGSIZE
;
68 // Calculate the number of physical pages available in both base
69 // and extended memory.
70 if (npages_extendedmem
)
71 npages
= (EXTPHYSMEM
/ PGSIZE
) + npages_extendedmem
;
73 npages
= npages_basemem
;
75 cprintf("Physical memory: %uK available, base = %uK, extended = %uK\n",
76 npages
* PGSIZE
/ 1024,
77 npages_basemem
* PGSIZE
/ 1024,
78 npages_extendedmem
* PGSIZE
/ 1024);
82 // --------------------------------------------------------------
83 // Set up initial memory mappings and turn on MMU.
84 // --------------------------------------------------------------
86 static void check_kern_pgdir(void);
87 static void check_page_alloc(void);
88 static void check_page(void);
89 static void page_map_segment(pde_t
*pgdir
, uintptr_t la
, size_t size
, physaddr_t pa
, int perm
);
91 // This simple physical memory allocator is used only while JOS is setting
92 // up its virtual memory system. page_alloc() is the real allocator.
94 // If n>0, allocates enough pages of contiguous physical memory to hold 'n'
95 // bytes. Doesn't initialize the memory. Returns a kernel virtual address.
97 // If n==0, returns the address of the next free page without allocating
100 // If we're out of memory, boot_alloc should panic.
101 // This function may ONLY be used during initialization,
102 // before the free_pages list has been set up.
104 boot_alloc(uint32_t n
)
106 static char *nextfree
; // virtual address of next byte of free memory
109 // Initialize nextfree if this is the first time.
110 // 'end' is a magic symbol automatically generated by the linker,
111 // which points to the end of the kernel's bss segment:
112 // the first virtual address that the linker did *not* assign
113 // to any kernel code or global variables.
116 nextfree
= ROUNDUP((char *) end
, PGSIZE
);
119 // Allocate a chunk large enough to hold 'n' bytes, then update
120 // nextfree. Make sure nextfree is kept aligned
121 // to a multiple of PGSIZE.
123 // LAB 2: Your code here.
128 // Set up a two-level page table:
129 // kern_pgdir is its linear (virtual) address of the root
130 // boot_cr3 is the physical adresss of the root
131 // Then turn on paging. Then effectively turn off segmentation.
132 // (i.e., the segment base addrs are set to zero).
134 // This function only sets up the kernel part of the address space
135 // (ie. addresses >= UTOP). The user part of the address space
136 // will be setup later.
138 // From UTOP to ULIM, the user is allowed to read but not write.
139 // Above ULIM the user cannot read (or write).
146 // Find out how much memory the machine has (npages & npages_basemem).
147 i386_detect_memory();
149 // Remove this line when you're ready to test this function.
150 panic("mem_init: This function is not finished\n");
152 //////////////////////////////////////////////////////////////////////
153 // create initial page directory.
154 kern_pgdir
= (pde_t
*) boot_alloc(PGSIZE
);
155 memset(kern_pgdir
, 0, PGSIZE
);
157 //////////////////////////////////////////////////////////////////////
158 // Recursively insert PD in itself as a page table, to form
159 // a virtual page table at virtual address UVPT.
160 // (For now, you don't have understand the greater purpose of the
162 // Permissions: kernel R, user R
163 kern_pgdir
[PDX(UVPT
)] = PADDR(kern_pgdir
) | PTE_U
| PTE_P
;
165 //////////////////////////////////////////////////////////////////////
166 // Allocate an array of npages 'struct Page's and store it in 'pages'.
167 // The kernel uses this array to keep track of physical pages: for
168 // each physical page, there is a corresponding struct Page in this
169 // array. 'npages' is the number of physical pages in memory.
170 // Your code goes here:
173 //////////////////////////////////////////////////////////////////////
174 // Make 'envs' point to an array of size 'NENV' of 'struct Env'.
175 // LAB 3: Your code here.
177 //////////////////////////////////////////////////////////////////////
178 // Now that we've allocated the initial kernel data structures, we set
179 // up the list of free physical pages. Once we've done so, all further
180 // memory management will go through the page_* functions. In
181 // particular, we can now map memory using page_map_segment
189 //////////////////////////////////////////////////////////////////////
190 // Now we set up virtual memory
192 //////////////////////////////////////////////////////////////////////
193 // Use the physical memory that 'bootstack' refers to as the kernel
194 // stack. The kernel stack grows down from virtual address KSTACKTOP.
195 // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
196 // to be the kernel stack, but break this into two pieces:
197 // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
198 // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
199 // the kernel overflows its stack, it will fault rather than
200 // overwrite memory. Known as a "guard page".
201 // Permissions: kernel RW, user NONE
202 // Your code goes here:
204 //////////////////////////////////////////////////////////////////////
205 // Map all of physical memory at KERNBASE.
206 // Ie. the VA range [KERNBASE, 2^32) should map to
207 // the PA range [0, 2^32 - KERNBASE)
208 // We might not have 2^32 - KERNBASE bytes of physical memory, but
209 // we just set up the mapping anyway.
210 // Permissions: kernel RW, user NONE
211 // Your code goes here:
213 //////////////////////////////////////////////////////////////////////
214 // Map the 'envs' array read-only by the user at linear address UENVS.
215 // Permissions: kernel R, user R
216 // (That's the UENVS version; 'envs' itself is kernel RW, user NONE.)
217 // LAB 3: Your code here.
219 //////////////////////////////////////////////////////////////////////
220 // Map 'pages' read-only by the user at linear address UPAGES.
221 // Permissions: kernel R, user R
222 // (That's the UPAGES version; 'pages' itself is kernel RW, user NONE.)
223 // LAB 3: Your code here.
225 // Check that the initial page directory has been set up correctly.
228 //////////////////////////////////////////////////////////////////////
229 // On x86, segmentation maps a VA to a LA (linear addr) and
230 // paging maps the LA to a PA. I.e. VA => LA => PA. If paging is
231 // turned off the LA is used as the PA. Note: there is no way to
232 // turn off segmentation. The closest thing is to set the base
233 // address to 0, so the VA => LA mapping is the identity.
235 // Current mapping: VA KERNBASE+x => PA x.
236 // (segmentation base=-KERNBASE and paging is off)
238 // From here on down we must maintain this VA KERNBASE + x => PA x
239 // mapping, even though we are turning on paging and reconfiguring
242 // Map VA 0:4MB same as VA KERNBASE, i.e. to PA 0:4MB.
243 // (Limits our kernel to <4MB)
244 kern_pgdir
[0] = kern_pgdir
[PDX(KERNBASE
)];
246 // Install page table.
247 lcr3(PADDR(kern_pgdir
));
251 cr0
|= CR0_PE
|CR0_PG
|CR0_AM
|CR0_WP
|CR0_NE
|CR0_TS
|CR0_EM
|CR0_MP
;
252 cr0
&= ~(CR0_TS
|CR0_EM
);
255 // Current mapping: KERNBASE+x => x => x.
256 // (x < 4MB so uses paging pgdir[0])
258 // Reload all segment registers.
259 asm volatile("lgdt gdt_pd");
260 asm volatile("movw %%ax,%%gs" :: "a" (GD_UD
|3));
261 asm volatile("movw %%ax,%%fs" :: "a" (GD_UD
|3));
262 asm volatile("movw %%ax,%%es" :: "a" (GD_KD
));
263 asm volatile("movw %%ax,%%ds" :: "a" (GD_KD
));
264 asm volatile("movw %%ax,%%ss" :: "a" (GD_KD
));
265 asm volatile("ljmp %0,$1f\n 1:\n" :: "i" (GD_KT
)); // reload cs
266 asm volatile("lldt %%ax" :: "a" (0));
268 // Final mapping: KERNBASE+x => KERNBASE+x => x.
270 // This mapping was only used after paging was turned on but
271 // before the segment registers were reloaded.
274 // Flush the TLB for good measure, to kill the pgdir[0] mapping.
275 lcr3(PADDR(kern_pgdir
));
279 // --------------------------------------------------------------
280 // Tracking of physical pages.
281 // The 'pages' array has one 'struct Page' entry per physical page.
282 // Pages are reference counted, and free pages are kept on a linked list.
283 // --------------------------------------------------------------
286 // Initialize page structure and memory free list.
287 // After this is done, NEVER use boot_alloc again. ONLY use the page
288 // allocator functions below to allocate and deallocate physical
289 // memory via the page_free_list.
294 // The example code here marks all physical pages as free.
295 // However this is not truly the case. What memory is free?
296 // 1) Mark physical page 0 as in use.
297 // This way we preserve the real-mode IDT and BIOS structures
298 // in case we ever need them. (Currently we don't, but...)
299 // 2) The rest of base memory, [PGSIZE, basemem) is free.
300 // 3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
301 // never be allocated.
302 // 4) Then extended memory [EXTPHYSMEM, ...).
303 // Some of it is in use, some is free. Where is the kernel
304 // in physical memory? Which pages are already in use for
305 // page tables and other data structures?
307 // Change the code to reflect this.
310 for (i
= 0; i
< npages
; i
++) {
312 pages
[i
].pp_link
= page_free_list
;
313 page_free_list
= &pages
[i
];
318 // Allocates a physical page.
319 // Does NOT set the contents of the physical page to zero, NOR does it
320 // increment the reference count of the page - the caller must do
321 // these if necessary.
322 // Returns NULL if out of free memory.
327 // Fill this function in
332 // Return a page to the free list.
333 // (This function should only be called when pp->pp_ref reaches 0.)
336 page_free(struct Page
*pp
)
338 // Fill this function in
342 // --------------------------------------------------------------
343 // Looking up pages and adding them to page directories.
344 // --------------------------------------------------------------
346 // Given 'pgdir', a pointer to a page directory, pgdir_walk returns
347 // a pointer to the page table entry (PTE) for linear address 'va'.
348 // This requires walking the two-level page table structure.
350 // The relevant page table page might not exist yet.
351 // If this is true, and create == 0, then pgdir_walk returns NULL.
352 // Otherwise, pgdir_walk allocates a new page table page with page_alloc.
353 // - If the allocation fails, pgdir_walk returns NULL.
354 // - Otherwise, the new page's reference count is incremented,
355 // the page is cleared,
356 // and pgdir_walk returns a pointer into the new page table page.
358 // Hint: Check out page2pa() and page2kva() in kern/pmap.h.
360 // Hint 2: the x86 MMU checks permission bits in both the page directory
361 // and the page table, so it's safe to leave permissions in the page
362 // more permissive than strictly necessary.
364 pgdir_walk(pde_t
*pgdir
, uintptr_t va
, int create
)
366 // Fill this function in
371 // Map [la, la+size) of linear address space to physical [pa, pa+size)
372 // in the page table rooted at pgdir. Size is a multiple of PGSIZE.
373 // Use permission bits perm|PTE_P for the entries. Does not manipulate
374 // 'struct Page' structures.
376 // Hint: Try using pgdir_walk.
378 page_map_segment(pde_t
*pgdir
, uintptr_t la
, size_t size
, physaddr_t pa
, int perm
)
380 // Fill this function in
384 // Map the physical page 'pp' at virtual address 'va'.
385 // The permissions (the low 12 bits) of the page table entry
386 // should be set to 'perm|PTE_P'.
389 // - If there is already a page mapped at 'va', it should be page_remove()d.
390 // - If necessary, on demand, a page table should be allocated and inserted
392 // - pp->pp_ref should be incremented if the insertion succeeds.
393 // - The TLB must be invalidated if a page was formerly present at 'va'.
395 // Corner-case hint: Make sure to consider what happens when the same
396 // pp is re-inserted at the same virtual address in the same pgdir.
400 // -E_NO_MEM, if page table couldn't be allocated
402 // Hint: Check out pgdir_walk, page_remove, page2pa, and similar functions.
405 page_insert(pde_t
*pgdir
, struct Page
*pp
, uintptr_t va
, pte_t perm
)
407 // Fill this function in
412 // Return the struct Page for the page mapped at virtual address 'va'.
413 // If pte_store is not zero, then we store in it the address
414 // of the pte for this page.
416 // Return NULL if there is no page mapped at va.
418 // Hint: the TA solution uses pgdir_walk and pa2page.
421 page_lookup(pde_t
*pgdir
, uintptr_t va
, pte_t
**pte_store
)
423 // Fill this function in
428 // Decrement the reference count on a page,
429 // freeing it if there are no more refs.
432 page_decref(struct Page
* pp
)
434 if (--pp
->pp_ref
== 0)
439 // Unmaps the physical page at virtual address 'va'.
440 // If there is no physical page at that address, silently does nothing.
443 // - The ref count on the physical page should decrement.
444 // - The physical page should be freed if the refcount reaches 0.
445 // - The pg table entry corresponding to 'va' should be set to 0.
446 // (if such a PTE exists)
447 // - The TLB must be invalidated if you remove an entry from
450 // Hint: The TA solution is implemented using page_lookup,
451 // tlb_invalidate, and page_decref.
454 page_remove(pde_t
*pgdir
, uintptr_t va
)
456 // Fill this function in
460 // Invalidate a TLB entry, but only if the page tables being
461 // edited are the ones currently in use by the processor.
464 tlb_invalidate(pde_t
*pgdir
, uintptr_t va
)
466 // Flush the entry only if we're modifying the current address space.
467 if (!curenv
|| curenv
->env_pgdir
== pgdir
)
471 static uintptr_t user_mem_check_addr
;
474 // Check that an environment is allowed to access the range of memory
475 // [va, va+len) with permissions 'perm | PTE_P'.
476 // Normally 'perm' will contain PTE_U at least, but this is not required.
477 // 'va' and 'len' need not be page-aligned; you must test every page that
478 // contains any of that range. You will test either 'len/PGSIZE',
479 // 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages.
481 // A user program can access a virtual address if (1) the address is below
482 // ULIM, and (2) the page table gives it permission. These are exactly
483 // the tests you should implement here.
485 // If there is an error, set the 'user_mem_check_addr' variable to the first
486 // erroneous virtual address.
488 // Returns 0 if the user program can access this range of addresses,
489 // and -E_FAULT otherwise.
492 user_mem_check(struct Env
*env
, uintptr_t va
, size_t len
, pte_t perm
)
494 // LAB 3: Your code here.
500 // Checks that environment 'env' is allowed to access the range
501 // of memory [va, va+len) with permissions 'perm | PTE_U | PTE_P'.
502 // If it can, then the function simply returns.
503 // If it cannot, 'env' is destroyed and, if env is the current
504 // environment, this function will not return.
507 user_mem_assert(struct Env
*env
, uintptr_t va
, size_t len
, pte_t perm
)
509 if (user_mem_check(env
, va
, len
, perm
| PTE_U
) < 0) {
510 cprintf("[%08x] user_mem_check assertion failure for "
511 "va %08x\n", env
->env_id
, user_mem_check_addr
);
512 env_destroy(env
); // may not return
517 // --------------------------------------------------------------
518 // Checking functions.
519 // --------------------------------------------------------------
522 // Check the physical page allocator (page_alloc(), page_free(),
528 struct Page
*pp
, *pp0
, *pp1
, *pp2
;
530 char *first_free_page
;
533 panic("'pages' is still a null pointer!");
535 // if there's a page that shouldn't be on
536 // the free list, try to make sure it
537 // eventually causes trouble.
538 for (pp0
= page_free_list
; pp0
; pp0
= pp0
->pp_link
)
539 memset(page2kva(pp0
), 0x97, 128);
541 first_free_page
= (char *) boot_alloc(0);
542 for (pp0
= page_free_list
; pp0
; pp0
= pp0
->pp_link
) {
543 // check that we didn't corrupt the free list itself
544 assert(pp0
>= pages
);
545 assert(pp0
< pages
+ npages
);
547 // check a few pages that shouldn't be on the free list
548 assert(page2pa(pp0
) != 0);
549 assert(page2pa(pp0
) != IOPHYSMEM
);
550 assert(page2pa(pp0
) != EXTPHYSMEM
- PGSIZE
);
551 assert(page2pa(pp0
) != EXTPHYSMEM
);
552 assert(page2kva(pp0
) != ROUNDDOWN(first_free_page
- 1, PGSIZE
));
555 // should be able to allocate three pages
557 assert((pp0
= page_alloc()));
558 assert((pp1
= page_alloc()));
559 assert((pp2
= page_alloc()));
562 assert(pp1
&& pp1
!= pp0
);
563 assert(pp2
&& pp2
!= pp1
&& pp2
!= pp0
);
564 assert(page2pa(pp0
) < npages
*PGSIZE
);
565 assert(page2pa(pp1
) < npages
*PGSIZE
);
566 assert(page2pa(pp2
) < npages
*PGSIZE
);
568 // temporarily steal the rest of the free pages
572 // should be no free memory
573 assert(!page_alloc());
575 // free and re-allocate?
580 assert((pp0
= page_alloc()));
581 assert((pp1
= page_alloc()));
582 assert((pp2
= page_alloc()));
584 assert(pp1
&& pp1
!= pp0
);
585 assert(pp2
&& pp2
!= pp1
&& pp2
!= pp0
);
586 assert(!page_alloc());
588 // give free list back
591 // free the pages we took
596 cprintf("check_page_alloc() succeeded!\n");
600 // Checks that the kernel part of virtual address space
601 // has been setup roughly correctly(by i386_vm_init()).
603 // This function doesn't test every corner case,
604 // in fact it doesn't test the permission bits at all,
605 // but it is a pretty good sanity check.
609 check_kern_pgdir(void)
617 for (i
= 0; i
< npages
* PGSIZE
; i
+= PGSIZE
)
618 assert(check_va2pa(pgdir
, KERNBASE
+ i
) == i
);
620 // check kernel stack
621 for (i
= 0; i
< KSTKSIZE
; i
+= PGSIZE
)
622 assert(check_va2pa(pgdir
, KSTACKTOP
- KSTKSIZE
+ i
) == PADDR(bootstack
) + i
);
623 assert(check_va2pa(pgdir
, KSTACKTOP
- PTSIZE
) == (physaddr_t
) ~0);
625 // check for zero/non-zero in PDEs
626 for (i
= 0; i
< NPDENTRIES
; i
++) {
629 case PDX(KSTACKTOP
-1):
635 if (i
>= PDX(KERNBASE
+ npages
* PGSIZE
))
636 /* either way is OK */;
637 else if (i
>= PDX(KERNBASE
))
640 assert(pgdir
[i
] == 0);
646 n
= ROUNDUP(npages
*sizeof(struct Page
), PGSIZE
);
647 for (i
= 0; i
< n
; i
+= PGSIZE
)
648 assert(check_va2pa(pgdir
, UPAGES
+ i
) == PADDR(pages
) + i
);
650 // check envs array (new test for lab 3)
651 n
= ROUNDUP(NENV
*sizeof(struct Env
), PGSIZE
);
652 for (i
= 0; i
< n
; i
+= PGSIZE
)
653 assert(check_va2pa(pgdir
, UENVS
+ i
) == PADDR(envs
) + i
);
655 cprintf("check_kern_pgdir() succeeded!\n");
658 // Return the physical address of the page containing 'va',
659 // defined by the page directory 'pgdir'.
660 // Returns the special value ~0 if 'va' is not mapped.
661 // The hardware normally performs this functionality for us!
662 // We define our own version to help check the check_kern_pgdir() function.
665 check_va2pa(pde_t
*pgdir
, uintptr_t va
)
669 pgdir
= &pgdir
[PDX(va
)];
670 if (!(*pgdir
& PTE_P
))
672 p
= (pte_t
*) KADDR(PTE_ADDR(*pgdir
));
673 if (!(p
[PTX(va
)] & PTE_P
))
675 return PTE_ADDR(p
[PTX(va
)]);
678 // check page_insert, page_remove, &c
682 struct Page
*pp
, *pp0
, *pp1
, *pp2
;
688 // should be able to allocate three pages
690 assert((pp0
= page_alloc()));
691 assert((pp1
= page_alloc()));
692 assert((pp2
= page_alloc()));
695 assert(pp1
&& pp1
!= pp0
);
696 assert(pp2
&& pp2
!= pp1
&& pp2
!= pp0
);
698 // temporarily steal the rest of the free pages
702 // should be no free memory
703 assert(!page_alloc());
705 // there is no page allocated at address 0
706 assert(page_lookup(kern_pgdir
, 0x0, &ptep
) == NULL
);
708 // there is no free memory, so we can't allocate a page table
709 assert(page_insert(kern_pgdir
, pp1
, 0x0, 0) < 0);
711 // free pp0 and try again: pp0 should be used for page table
713 assert(page_insert(kern_pgdir
, pp1
, 0x0, 0) == 0);
714 assert(PTE_ADDR(kern_pgdir
[0]) == page2pa(pp0
));
715 assert(check_va2pa(kern_pgdir
, 0x0) == page2pa(pp1
));
716 assert(pp1
->pp_ref
== 1);
717 assert(pp0
->pp_ref
== 1);
719 // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table
720 assert(page_insert(kern_pgdir
, pp2
, PGSIZE
, 0) == 0);
721 assert(check_va2pa(kern_pgdir
, PGSIZE
) == page2pa(pp2
));
722 assert(pp2
->pp_ref
== 1);
724 // should be no free memory
725 assert(!page_alloc());
727 // should be able to map pp2 at PGSIZE because it's already there
728 assert(page_insert(kern_pgdir
, pp2
, PGSIZE
, 0) == 0);
729 assert(check_va2pa(kern_pgdir
, PGSIZE
) == page2pa(pp2
));
730 assert(pp2
->pp_ref
== 1);
732 // pp2 should NOT be on the free list
733 // could happen in ref counts are handled sloppily in page_insert
734 assert(!page_alloc());
736 // check that pgdir_walk returns a pointer to the pte
737 ptep
= (pte_t
*) KADDR(PTE_ADDR(kern_pgdir
[PDX(PGSIZE
)]));
738 assert(pgdir_walk(kern_pgdir
, PGSIZE
, 0) == ptep
+PTX(PGSIZE
));
740 // should be able to change permissions too.
741 assert(page_insert(kern_pgdir
, pp2
, PGSIZE
, PTE_U
) == 0);
742 assert(check_va2pa(kern_pgdir
, PGSIZE
) == page2pa(pp2
));
743 assert(pp2
->pp_ref
== 1);
744 assert(*pgdir_walk(kern_pgdir
, PGSIZE
, 0) & PTE_U
);
745 assert(kern_pgdir
[0] & PTE_U
);
747 // should not be able to map at PTSIZE because need free page for page table
748 assert(page_insert(kern_pgdir
, pp0
, PTSIZE
, 0) < 0);
750 // insert pp1 at PGSIZE (replacing pp2)
751 assert(page_insert(kern_pgdir
, pp1
, PGSIZE
, 0) == 0);
752 assert(!(*pgdir_walk(kern_pgdir
, PGSIZE
, 0) & PTE_U
));
754 // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ...
755 assert(check_va2pa(kern_pgdir
, 0) == page2pa(pp1
));
756 assert(check_va2pa(kern_pgdir
, PGSIZE
) == page2pa(pp1
));
757 // ... and ref counts should reflect this
758 assert(pp1
->pp_ref
== 2);
759 assert(pp2
->pp_ref
== 0);
761 // pp2 should be returned by page_alloc
762 assert((pp
= page_alloc()) && pp
== pp2
);
764 // unmapping pp1 at 0 should keep pp1 at PGSIZE
765 page_remove(kern_pgdir
, 0x0);
766 assert(check_va2pa(kern_pgdir
, 0x0) == (physaddr_t
) ~0);
767 assert(check_va2pa(kern_pgdir
, PGSIZE
) == page2pa(pp1
));
768 assert(pp1
->pp_ref
== 1);
769 assert(pp2
->pp_ref
== 0);
771 // unmapping pp1 at PGSIZE should free it
772 page_remove(kern_pgdir
, PGSIZE
);
773 assert(check_va2pa(kern_pgdir
, 0x0) == (physaddr_t
) ~0);
774 assert(check_va2pa(kern_pgdir
, PGSIZE
) == (physaddr_t
) ~0);
775 assert(pp1
->pp_ref
== 0);
776 assert(pp2
->pp_ref
== 0);
778 // so it should be returned by page_alloc
779 assert((pp
= page_alloc()) && pp
== pp1
);
781 // should be no free memory
782 assert(!page_alloc());
785 // should be able to page_insert to change a page
786 // and see the new data immediately.
787 memset(page2kva(pp1
), 1, PGSIZE
);
788 memset(page2kva(pp2
), 2, PGSIZE
);
789 page_insert(kern_pgdir
, pp1
, 0x0, 0);
790 assert(pp1
->pp_ref
== 1);
791 assert(*(int*)0 == 0x01010101);
792 page_insert(kern_pgdir
, pp2
, 0x0, 0);
793 assert(*(int*)0 == 0x02020202);
794 assert(pp2
->pp_ref
== 1);
795 assert(pp1
->pp_ref
== 0);
796 page_remove(kern_pgdir
, 0x0);
797 assert(pp2
->pp_ref
== 0);
800 // forcibly take pp0 back
801 assert(PTE_ADDR(kern_pgdir
[0]) == page2pa(pp0
));
803 assert(pp0
->pp_ref
== 1);
806 // check pointer arithmetic in pgdir_walk
808 va
= PGSIZE
* NPDENTRIES
+ PGSIZE
;
809 ptep
= pgdir_walk(kern_pgdir
, va
, 1);
810 ptep1
= (pte_t
*) KADDR(PTE_ADDR(kern_pgdir
[PDX(va
)]));
811 assert(ptep
== ptep1
+ PTX(va
));
812 kern_pgdir
[PDX(va
)] = 0;
815 // check that new page tables get cleared
816 memset(page2kva(pp0
), 0xFF, PGSIZE
);
818 pgdir_walk(kern_pgdir
, 0x0, 1);
819 ptep
= (pte_t
*) page2kva(pp0
);
820 for(i
=0; i
<NPTENTRIES
; i
++)
821 assert((ptep
[i
] & PTE_P
) == 0);
825 // give free list back
828 // free the pages we took
833 cprintf("check_page() succeeded!\n");