1 /* $NetBSD: pmap.c,v 1.59 2009/11/07 07:27:45 cegger Exp $ */
4 * Copyright 2001 Wasabi Systems, Inc.
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 * derived from this software without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.59 2009/11/07 07:27:45 cegger Exp $");
72 #include <sys/param.h>
73 #include <sys/malloc.h>
75 #include <sys/queue.h>
76 #include <sys/systm.h>
78 #include <sys/device.h>
82 #include <machine/cpu.h>
83 #include <machine/pcb.h>
84 #include <machine/powerpc.h>
86 #include <powerpc/spr.h>
87 #include <machine/tlb.h>
90 * kernmap is an array of PTEs large enough to map in
91 * 4GB. At 16KB/page it is 256K entries or 2MB.
93 #define KERNMAP_SIZE ((0xffffffffU/PAGE_SIZE)+1)
99 volatile struct pmap
*ctxbusy
[NUMCTX
];
101 #define TLBF_USED 0x1
103 #define TLBF_LOCKED 0x4
104 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED)
106 typedef struct tlb_info_s
{
108 char ti_ctx
; /* TLB_PID assiciated with the entry */
112 volatile tlb_info_t tlb_info
[NTLB
];
113 /* We'll use a modified FIFO replacement policy cause it's cheap */
114 volatile int tlbnext
;
116 static int tlb_nreserved
= 0;
117 static int pmap_bootstrap_done
= 0;
120 struct evcnt tlbmiss_ev
= EVCNT_INITIALIZER(EVCNT_TYPE_TRAP
,
121 NULL
, "cpu", "tlbmiss");
122 struct evcnt tlbhit_ev
= EVCNT_INITIALIZER(EVCNT_TYPE_TRAP
,
123 NULL
, "cpu", "tlbhit");
124 struct evcnt tlbflush_ev
= EVCNT_INITIALIZER(EVCNT_TYPE_TRAP
,
125 NULL
, "cpu", "tlbflush");
126 struct evcnt tlbenter_ev
= EVCNT_INITIALIZER(EVCNT_TYPE_TRAP
,
127 NULL
, "cpu", "tlbenter");
129 struct pmap kernel_pmap_
;
130 struct pmap
*const kernel_pmap_ptr
= &kernel_pmap_
;
133 static u_int nextavail
;
135 extern paddr_t msgbuf_paddr
;
138 static struct mem_region
*mem
, *avail
;
141 * This is a cache of referenced/modified bits.
142 * Bits herein are shifted by ATTRSHFT.
144 static char *pmap_attrib
;
147 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED)
148 #define PV_UNWIRE(pv) ((pv)->pv_va &= ~PV_WIRED)
149 #define PV_ISWIRED(pv) ((pv)->pv_va & PV_WIRED)
150 #define PV_CMPVA(va,pv) (!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
153 struct pv_entry
*pv_next
; /* Linked list of mappings */
154 vaddr_t pv_va
; /* virtual address of mapping */
158 /* Each index corresponds to TLB_SIZE_* value. */
159 static size_t tlbsize
[] = {
160 1024, /* TLB_SIZE_1K */
161 4096, /* TLB_SIZE_4K */
162 16384, /* TLB_SIZE_16K */
163 65536, /* TLB_SIZE_64K */
164 262144, /* TLB_SIZE_256K */
165 1048576, /* TLB_SIZE_1M */
166 4194304, /* TLB_SIZE_4M */
167 16777216, /* TLB_SIZE_16M */
170 struct pv_entry
*pv_table
;
171 static struct pool pv_pool
;
173 static int pmap_initialized
;
175 static int ctx_flush(int);
177 inline struct pv_entry
*pa_to_pv(paddr_t
);
178 static inline char *pa_to_attr(paddr_t
);
180 static inline volatile u_int
*pte_find(struct pmap
*, vaddr_t
);
181 static inline int pte_enter(struct pmap
*, vaddr_t
, u_int
);
183 static inline int pmap_enter_pv(struct pmap
*, vaddr_t
, paddr_t
, int);
184 static void pmap_remove_pv(struct pmap
*, vaddr_t
, paddr_t
);
186 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
189 inline struct pv_entry
*
194 bank
= vm_physseg_find(atop(pa
), &pg
);
197 return &vm_physmem
[bank
].pmseg
.pvent
[pg
];
201 pa_to_attr(paddr_t pa
)
205 bank
= vm_physseg_find(atop(pa
), &pg
);
208 return &vm_physmem
[bank
].pmseg
.attrs
[pg
];
212 * Insert PTE into page table.
215 pte_enter(struct pmap
*pm
, vaddr_t va
, u_int pte
)
221 if (!pm
->pm_ptbl
[seg
]) {
222 /* Don't allocate a page to clear a non-existent mapping. */
225 /* Allocate a page XXXX this will sleep! */
227 (uint
*)uvm_km_alloc(kernel_map
, PAGE_SIZE
, 0,
228 UVM_KMF_WIRED
| UVM_KMF_ZERO
);
230 oldpte
= pm
->pm_ptbl
[seg
][ptn
];
231 pm
->pm_ptbl
[seg
][ptn
] = pte
;
234 ppc4xx_tlb_flush(va
, pm
->pm_ctx
);
237 pm
->pm_stats
.resident_count
--;
239 pm
->pm_stats
.resident_count
++;
245 * Get a pointer to a PTE in a page table.
248 pte_find(struct pmap
*pm
, vaddr_t va
)
253 if (pm
->pm_ptbl
[seg
])
254 return (&pm
->pm_ptbl
[seg
][ptn
]);
260 * This is called during initppc, before the system is really initialized.
263 pmap_bootstrap(u_int kernelstart
, u_int kernelend
)
265 struct mem_region
*mp
, *mp1
;
269 tlbnext
= tlb_nreserved
;
272 * Allocate the kernel page table at the end of
273 * kernel space so it's in the locked TTE.
275 kernmap
= (void *)kernelend
;
278 * Initialize kernel page table.
280 for (i
= 0; i
< STSZ
; i
++) {
281 pmap_kernel()->pm_ptbl
[i
] = 0;
283 ctxbusy
[0] = ctxbusy
[1] = pmap_kernel();
286 * Announce page-size to the VM-system
288 uvmexp
.pagesize
= NBPG
;
294 mem_regions(&mem
, &avail
);
295 for (mp
= mem
; mp
->size
; mp
++) {
296 physmem
+= btoc(mp
->size
);
297 printf("+%lx,",mp
->size
);
302 * Count the number of available entries.
304 for (cnt
= 0, mp
= avail
; mp
->size
; mp
++)
308 * Page align all regions.
309 * Non-page aligned memory isn't very interesting to us.
310 * Also, sort the entries for ascending addresses.
312 kernelstart
&= ~PGOFSET
;
313 kernelend
= (kernelend
+ PGOFSET
) & ~PGOFSET
;
314 for (mp
= avail
; mp
->size
; mp
++) {
316 e
= mp
->start
+ mp
->size
;
317 printf("%08x-%08x -> ",s
,e
);
319 * Check whether this region holds all of the kernel.
321 if (s
< kernelstart
&& e
> kernelend
) {
322 avail
[cnt
].start
= kernelend
;
323 avail
[cnt
++].size
= e
- kernelend
;
327 * Look whether this regions starts within the kernel.
329 if (s
>= kernelstart
&& s
< kernelend
) {
335 * Now look whether this region ends within the kernel.
337 if (e
> kernelstart
&& e
<= kernelend
) {
338 if (s
>= kernelstart
)
343 * Now page align the start and size of the region.
350 printf("%08x-%08x = %x\n",s
,e
,sz
);
352 * Check whether some memory is left here.
357 (cnt
- (mp
- avail
)) * sizeof *mp
);
363 * Do an insertion sort.
366 for (mp1
= avail
; mp1
< mp
; mp1
++)
370 memmove(mp1
+ 1, mp1
, (char *)mp
- (char *)mp1
);
380 * We cannot do pmap_steal_memory here,
381 * since we don't run with translation enabled yet.
387 sz
= round_page(MSGBUFSIZE
);
389 for (mp1
= avail
; mp1
->size
; mp1
++)
393 panic("not enough memory?");
396 msgbuf_paddr
= mp
->start
+ mp
->size
- sz
;
399 memmove(mp
, mp
+ 1, (cnt
- (mp
- avail
)) * sizeof *mp
);
402 for (mp
= avail
; mp
->size
; mp
++)
403 uvm_page_physload(atop(mp
->start
), atop(mp
->start
+ mp
->size
),
404 atop(mp
->start
), atop(mp
->start
+ mp
->size
),
405 VM_FREELIST_DEFAULT
);
408 * Initialize kernel pmap and hardware.
410 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */
411 pmap_kernel()->pm_ctx
= KERNEL_PID
;
412 nextavail
= avail
->start
;
414 evcnt_attach_static(&tlbmiss_ev
);
415 evcnt_attach_static(&tlbhit_ev
);
416 evcnt_attach_static(&tlbflush_ev
);
417 evcnt_attach_static(&tlbenter_ev
);
419 pmap_bootstrap_done
= 1;
423 * Restrict given range to physical memory
428 pmap_real_memory(paddr_t
*start
, psize_t
*size
)
430 struct mem_region
*mp
;
432 for (mp
= mem
; mp
->size
; mp
++) {
433 if (*start
+ *size
> mp
->start
&&
434 *start
< mp
->start
+ mp
->size
) {
435 if (*start
< mp
->start
) {
436 *size
-= mp
->start
- *start
;
439 if (*start
+ *size
> mp
->start
+ mp
->size
)
440 *size
= mp
->start
+ mp
->size
- *start
;
448 * Initialize anything else for pmap handling.
449 * Called during vm_init().
461 sz
= (vsize_t
)((sizeof(struct pv_entry
) + 1) * npgs
);
463 addr
= uvm_km_alloc(kernel_map
, sz
, 0, UVM_KMF_WIRED
| UVM_KMF_ZERO
);
465 pv
= pv_table
= (struct pv_entry
*)addr
;
466 for (i
= npgs
; --i
>= 0;)
468 pmap_attrib
= (char *)pv
;
473 for (bank
= 0; bank
< vm_nphysseg
; bank
++) {
474 sz
= vm_physmem
[bank
].end
- vm_physmem
[bank
].start
;
475 vm_physmem
[bank
].pmseg
.pvent
= pv
;
476 vm_physmem
[bank
].pmseg
.attrs
= attr
;
481 pmap_initialized
= 1;
484 /* Setup a pool for additional pvlist structures */
485 pool_init(&pv_pool
, sizeof(struct pv_entry
), 0, 0, 0, "pv_entry", NULL
,
490 * How much virtual space is available to the kernel?
493 pmap_virtual_space(vaddr_t
*start
, vaddr_t
*end
)
498 * Reserve one segment for kernel virtual memory
500 *start
= (vaddr_t
)(KERNEL_SR
<< ADDR_SR_SHFT
);
501 *end
= *start
+ SEGMENT_LENGTH
;
503 *start
= (vaddr_t
) VM_MIN_KERNEL_ADDRESS
;
504 *end
= (vaddr_t
) VM_MAX_KERNEL_ADDRESS
;
508 #ifdef PMAP_GROWKERNEL
510 * Preallocate kernel page tables to a specified VA.
511 * This simply loops through the first TTE for each
512 * page table from the beginning of the kernel pmap,
513 * reads the entry, and if the result is
514 * zero (either invalid entry or no page table) it stores
515 * a zero there, populating page tables in the process.
516 * This is not the most efficient technique but i don't
517 * expect it to be called that often.
519 extern struct vm_page
*vm_page_alloc1(void);
520 extern void vm_page_free1(struct vm_page
*);
522 vaddr_t kbreak
= VM_MIN_KERNEL_ADDRESS
;
525 pmap_growkernel(vaddr_t maxkvaddr
)
530 struct pmap
*pm
= pmap_kernel();
534 /* Align with the start of a page table */
535 for (kbreak
&= ~(PTMAP
-1); kbreak
< maxkvaddr
;
539 if (pte_find(pm
, kbreak
))
542 if (uvm
.page_init_done
) {
543 pg
= (paddr_t
)VM_PAGE_TO_PHYS(vm_page_alloc1());
545 if (!uvm_page_physget(&pg
))
546 panic("pmap_growkernel: no memory");
549 panic("pmap_growkernel: no pages");
550 pmap_zero_page((paddr_t
)pg
);
552 /* XXX This is based on all phymem being addressable */
553 pm
->pm_ptbl
[seg
] = (u_int
*)pg
;
562 * Allocate and return a memory cell with no associated object.
569 pg
= uvm_pagealloc(NULL
, 0, NULL
, UVM_PGA_USERESERVE
);
571 pg
->wire_count
= 1; /* no mappings yet */
572 pg
->flags
&= ~PG_BUSY
; /* never busy */
580 * Returns the given page to the free list,
581 * disassociating it with any VM object.
583 * Object and page must be locked prior to entry.
586 vm_page_free1(struct vm_page
*pg
)
589 if (pg
->flags
!= (PG_CLEAN
|PG_FAKE
)) {
590 printf("Freeing invalid page %p\n", pg
);
591 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg
));
598 pg
->flags
|= PG_BUSY
;
605 * Create and return a physical map.
612 pm
= malloc(sizeof *pm
, M_VMPMAP
, M_WAITOK
);
613 memset(pm
, 0, sizeof *pm
);
619 * Add a reference to the given pmap.
622 pmap_reference(struct pmap
*pm
)
629 * Retire the given pmap from service.
630 * Should only be called if the map contains no valid mappings.
633 pmap_destroy(struct pmap
*pm
)
637 if (--pm
->pm_refs
> 0) {
640 KASSERT(pm
->pm_stats
.resident_count
== 0);
641 KASSERT(pm
->pm_stats
.wired_count
== 0);
642 for (i
= 0; i
< STSZ
; i
++)
643 if (pm
->pm_ptbl
[i
]) {
644 uvm_km_free(kernel_map
, (vaddr_t
)pm
->pm_ptbl
[i
],
645 PAGE_SIZE
, UVM_KMF_WIRED
);
646 pm
->pm_ptbl
[i
] = NULL
;
654 * Copy the range specified by src_addr/len
655 * from the source map to the range dst_addr/len
656 * in the destination map.
658 * This routine is only advisory and need not do anything.
661 pmap_copy(struct pmap
*dst_pmap
, struct pmap
*src_pmap
, vaddr_t dst_addr
,
662 vsize_t len
, vaddr_t src_addr
)
667 * Require that all active physical maps contain no
668 * incorrect entries NOW.
671 pmap_update(struct pmap
*pmap
)
676 * Fill the given physical page with zeroes.
679 pmap_zero_page(paddr_t pa
)
682 #ifdef PPC_4XX_NOCACHE
683 memset((void *)pa
, 0, PAGE_SIZE
);
687 for (i
= PAGE_SIZE
/CACHELINESIZE
; i
> 0; i
--) {
688 __asm
volatile ("dcbz 0,%0" :: "r"(pa
));
695 * Copy the given physical source page to its destination.
698 pmap_copy_page(paddr_t src
, paddr_t dst
)
701 memcpy((void *)dst
, (void *)src
, PAGE_SIZE
);
702 dcache_flush_page(dst
);
706 * This returns != 0 on success.
709 pmap_enter_pv(struct pmap
*pm
, vaddr_t va
, paddr_t pa
, int flags
)
711 struct pv_entry
*pv
, *npv
= NULL
;
714 if (!pmap_initialized
)
721 * No entries yet, use header as the first entry.
728 * There is at least one other VA mapping this page.
729 * Place this entry after the header.
731 npv
= pool_get(&pv_pool
, PR_NOWAIT
);
733 if ((flags
& PMAP_CANFAIL
) == 0)
734 panic("pmap_enter_pv: failed");
740 npv
->pv_next
= pv
->pv_next
;
744 if (flags
& PMAP_WIRED
) {
746 pm
->pm_stats
.wired_count
++;
753 pmap_remove_pv(struct pmap
*pm
, vaddr_t va
, paddr_t pa
)
755 struct pv_entry
*pv
, *npv
;
758 * Remove from the PV table.
765 * If it is the first entry on the list, it is actually
766 * in the header and we must copy the following entry up
767 * to the header. Otherwise we must search the list for
768 * the entry. In either case we free the now unused entry.
770 if (pm
== pv
->pv_pm
&& PV_CMPVA(va
, pv
)) {
771 if (PV_ISWIRED(pv
)) {
772 pm
->pm_stats
.wired_count
--;
774 if ((npv
= pv
->pv_next
)) {
776 pool_put(&pv_pool
, npv
);
780 for (; (npv
= pv
->pv_next
) != NULL
; pv
= npv
)
781 if (pm
== npv
->pv_pm
&& PV_CMPVA(va
, npv
))
784 pv
->pv_next
= npv
->pv_next
;
785 if (PV_ISWIRED(npv
)) {
786 pm
->pm_stats
.wired_count
--;
788 pool_put(&pv_pool
, npv
);
794 * Insert physical page at pa into the given pmap at virtual address va.
797 pmap_enter(struct pmap
*pm
, vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
804 * Have to remove any existing mapping first.
806 pmap_remove(pm
, va
, va
+ PAGE_SIZE
);
808 if (flags
& PMAP_WIRED
)
811 managed
= uvm_pageismanaged(pa
);
817 /* XXXX -- need to support multiple page sizes. */
820 if ((flags
& (PME_NOCACHE
| PME_WRITETHROUG
)) ==
821 (PME_NOCACHE
| PME_WRITETHROUG
))
822 panic("pmap_enter: uncached & writethrough");
824 if (flags
& PME_NOCACHE
)
825 /* Must be I/O mapping */
826 tte
|= TTE_I
| TTE_G
;
827 #ifdef PPC_4XX_NOCACHE
830 else if (flags
& PME_WRITETHROUG
)
831 /* Uncached and writethrough are not compatible */
834 if (pm
== pmap_kernel())
835 tte
|= TTE_ZONE(ZONE_PRIV
);
837 tte
|= TTE_ZONE(ZONE_USER
);
839 if (flags
& VM_PROT_WRITE
)
842 if (flags
& VM_PROT_EXECUTE
)
846 * Now record mapping for later back-translation.
848 if (pmap_initialized
&& managed
) {
851 if (!pmap_enter_pv(pm
, va
, pa
, flags
)) {
852 /* Could not enter pv on a managed page */
856 /* Now set attributes. */
857 attr
= pa_to_attr(pa
);
860 panic("managed but no attr");
862 if (flags
& VM_PROT_ALL
)
863 *attr
|= PMAP_ATTR_REF
;
864 if (flags
& VM_PROT_WRITE
)
865 *attr
|= PMAP_ATTR_CHG
;
870 /* Insert page into page table. */
871 pte_enter(pm
, va
, tte
);
873 /* If this is a real fault, enter it in the tlb */
874 if (tte
&& ((flags
& PMAP_WIRED
) == 0)) {
875 ppc4xx_tlb_enter(pm
->pm_ctx
, va
, tte
);
879 /* Flush the real memory from the instruction cache. */
880 if ((prot
& VM_PROT_EXECUTE
) && (tte
& TTE_I
) == 0)
881 __syncicache((void *)pa
, PAGE_SIZE
);
887 pmap_unwire(struct pmap
*pm
, vaddr_t va
)
893 if (!pmap_extract(pm
, va
, &pa
)) {
903 if (pm
== pv
->pv_pm
&& PV_CMPVA(va
, pv
)) {
904 if (PV_ISWIRED(pv
)) {
906 pm
->pm_stats
.wired_count
--;
916 pmap_kenter_pa(vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
920 struct pmap
*pm
= pmap_kernel();
923 * Have to remove any existing mapping first.
931 * Since the kernel does not handle execution privileges properly,
932 * we will handle read and execute permissions together.
935 if (prot
& VM_PROT_ALL
) {
937 tte
= TTE_PA(pa
) | TTE_EX
| TTE_ZONE(ZONE_PRIV
);
938 /* XXXX -- need to support multiple page sizes. */
941 if ((prot
& (PME_NOCACHE
| PME_WRITETHROUG
)) ==
942 (PME_NOCACHE
| PME_WRITETHROUG
))
943 panic("pmap_kenter_pa: uncached & writethrough");
945 if (prot
& PME_NOCACHE
)
946 /* Must be I/O mapping */
947 tte
|= TTE_I
| TTE_G
;
948 #ifdef PPC_4XX_NOCACHE
951 else if (prot
& PME_WRITETHROUG
)
952 /* Uncached and writethrough are not compatible */
955 if (prot
& VM_PROT_WRITE
)
961 /* Insert page into page table. */
962 pte_enter(pm
, va
, tte
);
967 pmap_kremove(vaddr_t va
, vsize_t len
)
971 pte_enter(pmap_kernel(), va
, 0);
978 * Remove the given range of mapping entries.
981 pmap_remove(struct pmap
*pm
, vaddr_t va
, vaddr_t endva
)
990 if ((ptp
= pte_find(pm
, va
)) && (pa
= *ptp
)) {
992 pmap_remove_pv(pm
, va
, pa
);
994 ppc4xx_tlb_flush(va
, pm
->pm_ctx
);
995 pm
->pm_stats
.resident_count
--;
1004 * Get the physical page address for the given pmap/virtual address.
1007 pmap_extract(struct pmap
*pm
, vaddr_t va
, paddr_t
*pap
)
1009 int seg
= STIDX(va
);
1010 int ptn
= PTIDX(va
);
1015 if (pm
->pm_ptbl
[seg
] && (pa
= pm
->pm_ptbl
[seg
][ptn
])) {
1016 *pap
= TTE_PA(pa
) | (va
& PGOFSET
);
1023 * Lower the protection on the specified range of this pmap.
1025 * There are only two cases: either the protection is going to 0,
1026 * or it is going to read-only.
1029 pmap_protect(struct pmap
*pm
, vaddr_t sva
, vaddr_t eva
, vm_prot_t prot
)
1031 volatile u_int
*ptp
;
1034 if ((prot
& VM_PROT_READ
) == 0) {
1035 pmap_remove(pm
, sva
, eva
);
1039 if ((prot
& VM_PROT_WRITE
) == 0) {
1042 if ((prot
& VM_PROT_EXECUTE
) == 0) {
1050 if ((ptp
= pte_find(pm
, sva
)) != NULL
) {
1052 ppc4xx_tlb_flush(sva
, pm
->pm_ctx
);
1060 pmap_check_attr(struct vm_page
*pg
, u_int mask
, int clear
)
1067 * First modify bits in cache.
1069 pa
= VM_PAGE_TO_PHYS(pg
);
1070 attr
= pa_to_attr(pa
);
1075 rv
= ((*attr
& mask
) != 0);
1078 pmap_page_protect(pg
, mask
== PMAP_ATTR_CHG
? VM_PROT_READ
: 0);
1086 * Lower the protection on the specified physical page.
1088 * There are only two cases: either the protection is going to 0,
1089 * or it is going to read-only.
1092 pmap_page_protect(struct vm_page
*pg
, vm_prot_t prot
)
1094 paddr_t pa
= VM_PAGE_TO_PHYS(pg
);
1096 struct pv_entry
*pvh
, *pv
, *npv
;
1103 /* Handle extra pvs which may be deleted in the operation */
1104 for (pv
= pvh
->pv_next
; pv
; pv
= npv
) {
1109 pmap_protect(pm
, va
, va
+ PAGE_SIZE
, prot
);
1111 /* Now check the head pv */
1116 pmap_protect(pm
, va
, va
+ PAGE_SIZE
, prot
);
1121 * Activate the address space for the specified process. If the process
1122 * is the current process, load the new MMU context.
1125 pmap_activate(struct lwp
*l
)
1128 struct pcb
*pcb
= &l
->l_proc
->p_addr
->u_pcb
;
1129 pmap_t pmap
= l
->l_proc
->p_vmspace
->vm_map
.pmap
;
1132 * XXX Normally performed in cpu_fork().
1134 printf("pmap_activate(%p), pmap=%p\n",l
,pmap
);
1140 * Deactivate the specified process's address space.
1143 pmap_deactivate(struct lwp
*l
)
1148 * Synchronize caches corresponding to [addr, addr+len) in p.
1151 pmap_procwr(struct proc
*p
, vaddr_t va
, size_t len
)
1153 struct pmap
*pm
= p
->p_vmspace
->vm_map
.pmap
;
1154 int msr
, ctx
, opid
, step
;
1156 step
= CACHELINESIZE
;
1159 * Need to turn off IMMU and switch to user context.
1162 if (!(ctx
= pm
->pm_ctx
)) {
1163 /* No context -- assign it one */
1167 __asm
volatile("mfmsr %0;"
1184 : "=&r" (msr
), "=&r" (opid
)
1185 : "r" (ctx
), "r" (va
), "r" (len
), "r" (step
), "r" (-step
),
1186 "K" (PSL_IR
| PSL_DR
));
1190 /* This has to be done in real mode !!! */
1192 ppc4xx_tlb_flush(vaddr_t va
, int pid
)
1197 /* If there's no context then it can't be mapped. */
1201 __asm( "mfpid %1;" /* Save PID */
1202 "mfmsr %2;" /* Save MSR */
1203 "li %0,0;" /* Now clear MSR */
1205 "mtpid %4;" /* Set PID */
1207 "tlbsx. %0,0,%3;" /* Search TLB */
1209 "mtpid %1;" /* Restore PID */
1210 "mtmsr %2;" /* Restore MSR */
1216 : "=&r" (i
), "=&r" (found
), "=&r" (msr
)
1217 : "r" (va
), "r" (pid
));
1218 if (found
&& !TLB_LOCKED(i
)) {
1220 /* Now flush translation */
1224 : : "r" (0), "r" (i
));
1226 tlb_info
[i
].ti_ctx
= 0;
1227 tlb_info
[i
].ti_flags
= 0;
1229 /* Successful flushes */
1230 tlbflush_ev
.ev_count
++;
1235 ppc4xx_tlb_flush_all(void)
1239 for (i
= 0; i
< NTLB
; i
++)
1240 if (!TLB_LOCKED(i
)) {
1244 : : "r" (0), "r" (i
));
1245 tlb_info
[i
].ti_ctx
= 0;
1246 tlb_info
[i
].ti_flags
= 0;
1249 __asm
volatile("sync;isync");
1252 /* Find a TLB entry to evict. */
1254 ppc4xx_tlb_find_victim(void)
1259 if (++tlbnext
>= NTLB
)
1260 tlbnext
= tlb_nreserved
;
1261 flags
= tlb_info
[tlbnext
].ti_flags
;
1262 if (!(flags
& TLBF_USED
) ||
1263 (flags
& (TLBF_LOCKED
| TLBF_REF
)) == 0) {
1264 u_long va
, stack
= (u_long
)&va
;
1266 if (!((tlb_info
[tlbnext
].ti_va
^ stack
) & (~PGOFSET
)) &&
1267 (tlb_info
[tlbnext
].ti_ctx
== KERNEL_PID
) &&
1268 (flags
& TLBF_USED
)) {
1269 /* Kernel stack page */
1271 tlb_info
[tlbnext
].ti_flags
= flags
;
1277 tlb_info
[tlbnext
].ti_flags
= (flags
& ~TLBF_REF
);
1283 ppc4xx_tlb_enter(int ctx
, vaddr_t va
, u_int pte
)
1291 tlbenter_ev
.ev_count
++;
1293 sz
= (pte
& TTE_SZ_MASK
) >> TTE_SZ_SHIFT
;
1294 pa
= (pte
& TTE_RPN_MASK(sz
));
1295 th
= (va
& TLB_EPN_MASK
) | (sz
<< TLB_SIZE_SHFT
) | TLB_VALID
;
1296 tl
= (pte
& ~TLB_RPN_MASK
) | pa
;
1297 tl
|= ppc4xx_tlbflags(va
, pa
);
1300 idx
= ppc4xx_tlb_find_victim();
1303 if ((idx
< tlb_nreserved
) || (idx
>= NTLB
)) {
1304 panic("ppc4xx_tlb_enter: replacing entry %ld", idx
);
1308 tlb_info
[idx
].ti_va
= (va
& TLB_EPN_MASK
);
1309 tlb_info
[idx
].ti_ctx
= ctx
;
1310 tlb_info
[idx
].ti_flags
= TLBF_USED
| TLBF_REF
;
1313 "mfmsr %0;" /* Save MSR */
1315 "tlbwe %1,%3,0;" /* Invalidate old entry. */
1316 "mtmsr %1;" /* Clear MSR */
1317 "mfpid %1;" /* Save old PID */
1318 "mtpid %2;" /* Load translation ctx */
1322 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */
1324 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */
1326 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */
1328 : "=&r" (msr
), "=&r" (pid
)
1329 : "r" (ctx
), "r" (idx
), "r" (tl
), "r" (th
));
1334 ppc4xx_tlb_init(void)
1338 /* Mark reserved TLB entries */
1339 for (i
= 0; i
< tlb_nreserved
; i
++) {
1340 tlb_info
[i
].ti_flags
= TLBF_LOCKED
| TLBF_USED
;
1341 tlb_info
[i
].ti_ctx
= KERNEL_PID
;
1344 /* Setup security zones */
1345 /* Z0 - accessible by kernel only if TLB entry permissions allow
1346 * Z1,Z2 - access is controlled by TLB entry permissions
1347 * Z3 - full access regardless of TLB entry permissions
1353 :: "K"(SPR_ZPR
), "r" (0x1b000000));
1357 * ppc4xx_tlb_size_mask:
1359 * Roundup size to supported page size, return TLBHI mask and real size.
1362 ppc4xx_tlb_size_mask(size_t size
, int *mask
, int *rsiz
)
1366 for (i
= 0; i
< __arraycount(tlbsize
); i
++)
1367 if (size
<= tlbsize
[i
]) {
1368 *mask
= (i
<< TLB_SIZE_SHFT
);
1376 * ppc4xx_tlb_mapiodev:
1378 * Lookup virtual address of mapping previously entered via
1379 * ppc4xx_tlb_reserve. Search TLB directly so that we don't
1380 * need to waste extra storage for reserved mappings. Note
1381 * that reading TLBHI also sets PID, but all reserved mappings
1382 * use KERNEL_PID, so the side effect is nil.
1385 ppc4xx_tlb_mapiodev(paddr_t base
, psize_t len
)
1392 /* tlb_nreserved is only allowed to grow, so this is safe. */
1393 for (i
= 0; i
< tlb_nreserved
; i
++) {
1395 " tlbre %0,%2,1 \n" /* TLBLO */
1396 " tlbre %1,%2,0 \n" /* TLBHI */
1397 : "=&r" (lo
), "=&r" (hi
)
1400 KASSERT(hi
& TLB_VALID
);
1401 KASSERT(mfspr(SPR_PID
) == KERNEL_PID
);
1403 pa
= (lo
& TLB_RPN_MASK
);
1407 sz
= tlbsize
[(hi
& TLB_SIZE_MASK
) >> TLB_SIZE_SHFT
];
1408 if ((base
+ len
) > (pa
+ sz
))
1411 va
= (hi
& TLB_EPN_MASK
) + (base
& (sz
- 1)); /* sz = 2^n */
1412 return (void *)(va
);
1419 * ppc4xx_tlb_reserve:
1421 * Map physical range to kernel virtual chunk via reserved TLB entry.
1424 ppc4xx_tlb_reserve(paddr_t pa
, vaddr_t va
, size_t size
, int flags
)
1429 /* Called before pmap_bootstrap(), va outside kernel space. */
1430 KASSERT(va
< VM_MIN_KERNEL_ADDRESS
|| va
>= VM_MAX_KERNEL_ADDRESS
);
1431 KASSERT(! pmap_bootstrap_done
);
1432 KASSERT(tlb_nreserved
< NTLB
);
1435 if (ppc4xx_tlb_size_mask(size
, &szmask
, &rsize
) != 0)
1436 panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
1437 size
, tlb_nreserved
);
1439 /* Real size will be power of two >= 1024, so this is OK. */
1440 pa
&= ~(rsize
- 1); /* RPN */
1441 va
&= ~(rsize
- 1); /* EPN */
1443 lo
= pa
| TLB_WR
| flags
;
1444 hi
= va
| TLB_VALID
| szmask
;
1446 #ifdef PPC_4XX_NOCACHE
1451 " tlbwe %1,%0,1 \n" /* write TLBLO */
1452 " tlbwe %2,%0,0 \n" /* write TLBHI */
1455 : : "r" (tlb_nreserved
), "r" (lo
), "r" (hi
));
1461 * We should pass the ctx in from trap code.
1464 pmap_tlbmiss(vaddr_t va
, int ctx
)
1466 volatile u_int
*pte
;
1469 tlbmiss_ev
.ev_count
++;
1472 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
1473 * Physical RAM is expected to live in this range, care must be taken
1474 * to not clobber 0 upto ${physmem} with device mappings in machdep
1477 if (ctx
!= KERNEL_PID
|| va
>= VM_MIN_KERNEL_ADDRESS
) {
1478 pte
= pte_find((struct pmap
*)__UNVOLATILE(ctxbusy
[ctx
]), va
);
1480 /* Map unmanaged addresses directly for kernel access */
1488 /* Create a 16MB writable mapping. */
1489 #ifdef PPC_4XX_NOCACHE
1490 tte
= TTE_PA(va
) | TTE_ZONE(ZONE_PRIV
) | TTE_SZ_16M
| TTE_I
|TTE_WR
;
1492 tte
= TTE_PA(va
) | TTE_ZONE(ZONE_PRIV
) | TTE_SZ_16M
| TTE_WR
;
1495 tlbhit_ev
.ev_count
++;
1496 ppc4xx_tlb_enter(ctx
, va
, tte
);
1502 * Flush all the entries matching a context from the TLB.
1509 /* We gotta steal this context */
1510 for (i
= tlb_nreserved
; i
< NTLB
; i
++) {
1511 if (tlb_info
[i
].ti_ctx
== cnum
) {
1512 /* Can't steal ctx if it has a locked entry. */
1513 if (TLB_LOCKED(i
)) {
1515 printf("ctx_flush: can't invalidate "
1516 "locked mapping %d "
1517 "for context %d\n", i
, cnum
);
1525 if (i
< tlb_nreserved
)
1526 panic("TLB entry %d not locked", i
);
1528 /* Invalidate particular TLB entry regardless of locked status */
1529 __asm
volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i
));
1530 tlb_info
[i
].ti_flags
= 0;
1537 * Allocate a context. If necessary, steal one from someone else.
1539 * The new context is flushed from the TLB before returning.
1542 ctx_alloc(struct pmap
*pm
)
1545 static int next
= MINCTX
;
1547 if (pm
== pmap_kernel()) {
1549 printf("ctx_alloc: kernel pmap!\n");
1555 /* Find a likely context. */
1558 if ((++cnum
) > NUMCTX
)
1560 } while (ctxbusy
[cnum
] != NULL
&& cnum
!= next
);
1562 /* Now clean it out */
1565 cnum
= MINCTX
; /* Never steal ctx 0 or 1 */
1566 if (ctx_flush(cnum
)) {
1567 /* oops -- something's wired. */
1568 if ((++cnum
) > NUMCTX
)
1573 if (ctxbusy
[cnum
]) {
1575 /* We should identify this pmap and clear it */
1576 printf("Warning: stealing context %d\n", cnum
);
1578 ctxbusy
[cnum
]->pm_ctx
= 0;
1589 * Give away a context.
1592 ctx_free(struct pmap
*pm
)
1596 oldctx
= pm
->pm_ctx
;
1599 panic("ctx_free: freeing kernel context");
1601 if (ctxbusy
[oldctx
] == 0)
1602 printf("ctx_free: freeing free context %d\n", oldctx
);
1603 if (ctxbusy
[oldctx
] != pm
) {
1604 printf("ctx_free: freeing someone esle's context\n "
1605 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
1606 oldctx
, (void *)(u_long
)ctxbusy
[oldctx
], pm
);
1612 /* We should verify it has not been stolen and reallocated... */
1613 ctxbusy
[oldctx
] = NULL
;
1620 * Test ref/modify handling.
1622 void pmap_testout(void);
1633 /* Allocate a page */
1634 va
= (vaddr_t
)uvm_km_alloc(kernel_map
, PAGE_SIZE
, 0,
1635 UVM_KMF_WIRED
| UVM_KMF_ZERO
);
1638 pmap_extract(pmap_kernel(), va
, &pa
);
1639 pg
= PHYS_TO_VM_PAGE(pa
);
1640 pmap_unwire(pmap_kernel(), va
);
1642 pmap_kremove(va
, PAGE_SIZE
);
1643 pmap_enter(pmap_kernel(), va
, pa
, VM_PROT_ALL
, 0);
1644 pmap_update(pmap_kernel());
1646 /* Now clear reference and modify */
1647 ref
= pmap_clear_reference(pg
);
1648 mod
= pmap_clear_modify(pg
);
1649 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1650 (void *)(u_long
)va
, (long)pa
,
1653 /* Check it's properly cleared */
1654 ref
= pmap_is_referenced(pg
);
1655 mod
= pmap_is_modified(pg
);
1656 printf("Checking cleared page: ref %d, mod %d\n",
1659 /* Reference page */
1662 ref
= pmap_is_referenced(pg
);
1663 mod
= pmap_is_modified(pg
);
1664 printf("Referenced page: ref %d, mod %d val %x\n",
1667 /* Now clear reference and modify */
1668 ref
= pmap_clear_reference(pg
);
1669 mod
= pmap_clear_modify(pg
);
1670 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1671 (void *)(u_long
)va
, (long)pa
,
1677 ref
= pmap_is_referenced(pg
);
1678 mod
= pmap_is_modified(pg
);
1679 printf("Modified page: ref %d, mod %d\n",
1682 /* Now clear reference and modify */
1683 ref
= pmap_clear_reference(pg
);
1684 mod
= pmap_clear_modify(pg
);
1685 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1686 (void *)(u_long
)va
, (long)pa
,
1689 /* Check it's properly cleared */
1690 ref
= pmap_is_referenced(pg
);
1691 mod
= pmap_is_modified(pg
);
1692 printf("Checking cleared page: ref %d, mod %d\n",
1698 ref
= pmap_is_referenced(pg
);
1699 mod
= pmap_is_modified(pg
);
1700 printf("Modified page: ref %d, mod %d\n",
1703 /* Check pmap_protect() */
1704 pmap_protect(pmap_kernel(), va
, va
+1, VM_PROT_READ
);
1705 pmap_update(pmap_kernel());
1706 ref
= pmap_is_referenced(pg
);
1707 mod
= pmap_is_modified(pg
);
1708 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
1711 /* Now clear reference and modify */
1712 ref
= pmap_clear_reference(pg
);
1713 mod
= pmap_clear_modify(pg
);
1714 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1715 (void *)(u_long
)va
, (long)pa
,
1718 /* Reference page */
1721 ref
= pmap_is_referenced(pg
);
1722 mod
= pmap_is_modified(pg
);
1723 printf("Referenced page: ref %d, mod %d val %x\n",
1726 /* Now clear reference and modify */
1727 ref
= pmap_clear_reference(pg
);
1728 mod
= pmap_clear_modify(pg
);
1729 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1730 (void *)(u_long
)va
, (long)pa
,
1735 pmap_enter(pmap_kernel(), va
, pa
, VM_PROT_ALL
, 0);
1736 pmap_update(pmap_kernel());
1740 ref
= pmap_is_referenced(pg
);
1741 mod
= pmap_is_modified(pg
);
1742 printf("Modified page: ref %d, mod %d\n",
1745 /* Check pmap_protect() */
1746 pmap_protect(pmap_kernel(), va
, va
+1, VM_PROT_NONE
);
1747 pmap_update(pmap_kernel());
1748 ref
= pmap_is_referenced(pg
);
1749 mod
= pmap_is_modified(pg
);
1750 printf("pmap_protect(): ref %d, mod %d\n",
1753 /* Now clear reference and modify */
1754 ref
= pmap_clear_reference(pg
);
1755 mod
= pmap_clear_modify(pg
);
1756 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1757 (void *)(u_long
)va
, (long)pa
,
1760 /* Reference page */
1763 ref
= pmap_is_referenced(pg
);
1764 mod
= pmap_is_modified(pg
);
1765 printf("Referenced page: ref %d, mod %d val %x\n",
1768 /* Now clear reference and modify */
1769 ref
= pmap_clear_reference(pg
);
1770 mod
= pmap_clear_modify(pg
);
1771 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1772 (void *)(u_long
)va
, (long)pa
,
1777 pmap_enter(pmap_kernel(), va
, pa
, VM_PROT_ALL
, 0);
1778 pmap_update(pmap_kernel());
1782 ref
= pmap_is_referenced(pg
);
1783 mod
= pmap_is_modified(pg
);
1784 printf("Modified page: ref %d, mod %d\n",
1787 /* Check pmap_pag_protect() */
1788 pmap_page_protect(pg
, VM_PROT_READ
);
1789 ref
= pmap_is_referenced(pg
);
1790 mod
= pmap_is_modified(pg
);
1791 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
1794 /* Now clear reference and modify */
1795 ref
= pmap_clear_reference(pg
);
1796 mod
= pmap_clear_modify(pg
);
1797 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1798 (void *)(u_long
)va
, (long)pa
,
1801 /* Reference page */
1804 ref
= pmap_is_referenced(pg
);
1805 mod
= pmap_is_modified(pg
);
1806 printf("Referenced page: ref %d, mod %d val %x\n",
1809 /* Now clear reference and modify */
1810 ref
= pmap_clear_reference(pg
);
1811 mod
= pmap_clear_modify(pg
);
1812 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1813 (void *)(u_long
)va
, (long)pa
,
1818 pmap_enter(pmap_kernel(), va
, pa
, VM_PROT_ALL
, 0);
1819 pmap_update(pmap_kernel());
1823 ref
= pmap_is_referenced(pg
);
1824 mod
= pmap_is_modified(pg
);
1825 printf("Modified page: ref %d, mod %d\n",
1828 /* Check pmap_pag_protect() */
1829 pmap_page_protect(pg
, VM_PROT_NONE
);
1830 ref
= pmap_is_referenced(pg
);
1831 mod
= pmap_is_modified(pg
);
1832 printf("pmap_page_protect(): ref %d, mod %d\n",
1835 /* Now clear reference and modify */
1836 ref
= pmap_clear_reference(pg
);
1837 mod
= pmap_clear_modify(pg
);
1838 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1839 (void *)(u_long
)va
, (long)pa
,
1843 /* Reference page */
1846 ref
= pmap_is_referenced(pg
);
1847 mod
= pmap_is_modified(pg
);
1848 printf("Referenced page: ref %d, mod %d val %x\n",
1851 /* Now clear reference and modify */
1852 ref
= pmap_clear_reference(pg
);
1853 mod
= pmap_clear_modify(pg
);
1854 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1855 (void *)(u_long
)va
, (long)pa
,
1860 pmap_enter(pmap_kernel(), va
, pa
, VM_PROT_ALL
, 0);
1861 pmap_update(pmap_kernel());
1865 ref
= pmap_is_referenced(pg
);
1866 mod
= pmap_is_modified(pg
);
1867 printf("Modified page: ref %d, mod %d\n",
1871 pmap_remove(pmap_kernel(), va
, va
+1);
1872 pmap_update(pmap_kernel());
1873 ref
= pmap_is_referenced(pg
);
1874 mod
= pmap_is_modified(pg
);
1875 printf("Unmapped page: ref %d, mod %d\n", ref
, mod
);
1877 /* Now clear reference and modify */
1878 ref
= pmap_clear_reference(pg
);
1879 mod
= pmap_clear_modify(pg
);
1880 printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1881 (void *)(u_long
)va
, (long)pa
, ref
, mod
);
1883 /* Check it's properly cleared */
1884 ref
= pmap_is_referenced(pg
);
1885 mod
= pmap_is_modified(pg
);
1886 printf("Checking cleared page: ref %d, mod %d\n",
1889 pmap_remove(pmap_kernel(), va
, va
+ PAGE_SIZE
);
1890 pmap_kenter_pa(va
, pa
, VM_PROT_ALL
, 0);
1891 uvm_km_free(kernel_map
, (vaddr_t
)va
, PAGE_SIZE
, UVM_KMF_WIRED
);