1 /* $NetBSD: pmap.c,v 1.68 2009/11/07 07:27:46 cegger Exp $ */
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10 * of Kyma Systems LLC.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36 * Copyright (C) 1995, 1996 TooLs GmbH.
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by TooLs GmbH.
50 * 4. The name of TooLs GmbH may not be used to endorse or promote products
51 * derived from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.68 2009/11/07 07:27:46 cegger Exp $");
68 #define PMAP_NOOPNAMES
70 #include "opt_ppcarch.h"
71 #include "opt_altivec.h"
72 #include "opt_multiprocessor.h"
75 #include <sys/param.h>
76 #include <sys/malloc.h>
79 #include <sys/queue.h>
80 #include <sys/device.h> /* for evcnt */
81 #include <sys/systm.h>
82 #include <sys/atomic.h>
86 #include <machine/pcb.h>
87 #include <machine/powerpc.h>
88 #include <powerpc/spr.h>
89 #include <powerpc/oea/sr_601.h>
90 #include <powerpc/bat.h>
91 #include <powerpc/stdarg.h>
97 volatile struct pteg
*pmap_pteg_table
;
98 unsigned int pmap_pteg_cnt
;
99 unsigned int pmap_pteg_mask
;
101 static paddr_t pmap_memlimit
= PMAP_MEMLIMIT
;
103 static paddr_t pmap_memlimit
= -PAGE_SIZE
; /* there is no limit */
106 struct pmap kernel_pmap_
;
107 unsigned int pmap_pages_stolen
;
108 u_long pmap_pte_valid
;
109 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
110 u_long pmap_pvo_enter_depth
;
111 u_long pmap_pvo_remove_depth
;
115 extern paddr_t msgbuf_paddr
;
118 static struct mem_region
*mem
, *avail
;
119 static u_int mem_cnt
, avail_cnt
;
121 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
123 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA64) && !defined(PPC_OEA64_BRIDGE)
124 # define PMAPNAME(name) pmap_##name
128 #if defined(PMAP_OEA64)
129 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64_BRIDGE)
130 # define PMAPNAME(name) pmap_##name
134 #if defined(PMAP_OEA64_BRIDGE)
135 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64)
136 # define PMAPNAME(name) pmap_##name
140 #if defined(PMAP_OEA)
141 #define _PRIxpte "lx"
143 #define _PRIxpte PRIx64
149 #if defined(PMAP_EXCLUDE_DECLS) && !defined(PMAPNAME)
150 #if defined(PMAP_OEA)
151 #define PMAPNAME(name) pmap32_##name
152 #elif defined(PMAP_OEA64)
153 #define PMAPNAME(name) pmap64_##name
154 #elif defined(PMAP_OEA64_BRIDGE)
155 #define PMAPNAME(name) pmap64bridge_##name
157 #error unknown variant for pmap
159 #endif /* PMAP_EXLCUDE_DECLS && !PMAPNAME */
161 #if defined(PMAPNAME)
162 #define STATIC static
163 #define pmap_pte_spill PMAPNAME(pte_spill)
164 #define pmap_real_memory PMAPNAME(real_memory)
165 #define pmap_init PMAPNAME(init)
166 #define pmap_virtual_space PMAPNAME(virtual_space)
167 #define pmap_create PMAPNAME(create)
168 #define pmap_reference PMAPNAME(reference)
169 #define pmap_destroy PMAPNAME(destroy)
170 #define pmap_copy PMAPNAME(copy)
171 #define pmap_update PMAPNAME(update)
172 #define pmap_enter PMAPNAME(enter)
173 #define pmap_remove PMAPNAME(remove)
174 #define pmap_kenter_pa PMAPNAME(kenter_pa)
175 #define pmap_kremove PMAPNAME(kremove)
176 #define pmap_extract PMAPNAME(extract)
177 #define pmap_protect PMAPNAME(protect)
178 #define pmap_unwire PMAPNAME(unwire)
179 #define pmap_page_protect PMAPNAME(page_protect)
180 #define pmap_query_bit PMAPNAME(query_bit)
181 #define pmap_clear_bit PMAPNAME(clear_bit)
183 #define pmap_activate PMAPNAME(activate)
184 #define pmap_deactivate PMAPNAME(deactivate)
186 #define pmap_pinit PMAPNAME(pinit)
187 #define pmap_procwr PMAPNAME(procwr)
189 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
190 #define pmap_pte_print PMAPNAME(pte_print)
191 #define pmap_pteg_check PMAPNAME(pteg_check)
192 #define pmap_print_mmruregs PMAPNAME(print_mmuregs)
193 #define pmap_print_pte PMAPNAME(print_pte)
194 #define pmap_pteg_dist PMAPNAME(pteg_dist)
196 #if defined(DEBUG) || defined(PMAPCHECK)
197 #define pmap_pvo_verify PMAPNAME(pvo_verify)
198 #define pmapcheck PMAPNAME(check)
200 #if defined(DEBUG) || defined(PMAPDEBUG)
201 #define pmapdebug PMAPNAME(debug)
203 #define pmap_steal_memory PMAPNAME(steal_memory)
204 #define pmap_bootstrap PMAPNAME(bootstrap)
206 #define STATIC /* nothing */
207 #endif /* PMAPNAME */
209 STATIC
int pmap_pte_spill(struct pmap
*, vaddr_t
, bool);
210 STATIC
void pmap_real_memory(paddr_t
*, psize_t
*);
211 STATIC
void pmap_init(void);
212 STATIC
void pmap_virtual_space(vaddr_t
*, vaddr_t
*);
213 STATIC pmap_t
pmap_create(void);
214 STATIC
void pmap_reference(pmap_t
);
215 STATIC
void pmap_destroy(pmap_t
);
216 STATIC
void pmap_copy(pmap_t
, pmap_t
, vaddr_t
, vsize_t
, vaddr_t
);
217 STATIC
void pmap_update(pmap_t
);
218 STATIC
int pmap_enter(pmap_t
, vaddr_t
, paddr_t
, vm_prot_t
, u_int
);
219 STATIC
void pmap_remove(pmap_t
, vaddr_t
, vaddr_t
);
220 STATIC
void pmap_kenter_pa(vaddr_t
, paddr_t
, vm_prot_t
, u_int
);
221 STATIC
void pmap_kremove(vaddr_t
, vsize_t
);
222 STATIC
bool pmap_extract(pmap_t
, vaddr_t
, paddr_t
*);
224 STATIC
void pmap_protect(pmap_t
, vaddr_t
, vaddr_t
, vm_prot_t
);
225 STATIC
void pmap_unwire(pmap_t
, vaddr_t
);
226 STATIC
void pmap_page_protect(struct vm_page
*, vm_prot_t
);
227 STATIC
bool pmap_query_bit(struct vm_page
*, int);
228 STATIC
bool pmap_clear_bit(struct vm_page
*, int);
230 STATIC
void pmap_activate(struct lwp
*);
231 STATIC
void pmap_deactivate(struct lwp
*);
233 STATIC
void pmap_pinit(pmap_t pm
);
234 STATIC
void pmap_procwr(struct proc
*, vaddr_t
, size_t);
236 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
237 STATIC
void pmap_pte_print(volatile struct pte
*);
238 STATIC
void pmap_pteg_check(void);
239 STATIC
void pmap_print_mmuregs(void);
240 STATIC
void pmap_print_pte(pmap_t
, vaddr_t
);
241 STATIC
void pmap_pteg_dist(void);
243 #if defined(DEBUG) || defined(PMAPCHECK)
244 STATIC
void pmap_pvo_verify(void);
246 STATIC vaddr_t
pmap_steal_memory(vsize_t
, vaddr_t
*, vaddr_t
*);
247 STATIC
void pmap_bootstrap(paddr_t
, paddr_t
);
250 const struct pmap_ops
PMAPNAME(ops
) = {
251 .pmapop_pte_spill
= pmap_pte_spill
,
252 .pmapop_real_memory
= pmap_real_memory
,
253 .pmapop_init
= pmap_init
,
254 .pmapop_virtual_space
= pmap_virtual_space
,
255 .pmapop_create
= pmap_create
,
256 .pmapop_reference
= pmap_reference
,
257 .pmapop_destroy
= pmap_destroy
,
258 .pmapop_copy
= pmap_copy
,
259 .pmapop_update
= pmap_update
,
260 .pmapop_enter
= pmap_enter
,
261 .pmapop_remove
= pmap_remove
,
262 .pmapop_kenter_pa
= pmap_kenter_pa
,
263 .pmapop_kremove
= pmap_kremove
,
264 .pmapop_extract
= pmap_extract
,
265 .pmapop_protect
= pmap_protect
,
266 .pmapop_unwire
= pmap_unwire
,
267 .pmapop_page_protect
= pmap_page_protect
,
268 .pmapop_query_bit
= pmap_query_bit
,
269 .pmapop_clear_bit
= pmap_clear_bit
,
270 .pmapop_activate
= pmap_activate
,
271 .pmapop_deactivate
= pmap_deactivate
,
272 .pmapop_pinit
= pmap_pinit
,
273 .pmapop_procwr
= pmap_procwr
,
274 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
275 .pmapop_pte_print
= pmap_pte_print
,
276 .pmapop_pteg_check
= pmap_pteg_check
,
277 .pmapop_print_mmuregs
= pmap_print_mmuregs
,
278 .pmapop_print_pte
= pmap_print_pte
,
279 .pmapop_pteg_dist
= pmap_pteg_dist
,
281 .pmapop_pte_print
= NULL
,
282 .pmapop_pteg_check
= NULL
,
283 .pmapop_print_mmuregs
= NULL
,
284 .pmapop_print_pte
= NULL
,
285 .pmapop_pteg_dist
= NULL
,
287 #if defined(DEBUG) || defined(PMAPCHECK)
288 .pmapop_pvo_verify
= pmap_pvo_verify
,
290 .pmapop_pvo_verify
= NULL
,
292 .pmapop_steal_memory
= pmap_steal_memory
,
293 .pmapop_bootstrap
= pmap_bootstrap
,
295 #endif /* !PMAPNAME */
298 * The following structure is aligned to 32 bytes
301 LIST_ENTRY(pvo_entry
) pvo_vlink
; /* Link to common virt page */
302 TAILQ_ENTRY(pvo_entry
) pvo_olink
; /* Link to overflow entry */
303 struct pte pvo_pte
; /* Prebuilt PTE */
304 pmap_t pvo_pmap
; /* ptr to owning pmap */
305 vaddr_t pvo_vaddr
; /* VA of entry */
306 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
307 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
308 #define PVO_WIRED 0x0010 /* PVO entry is wired */
309 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
310 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
311 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
312 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
313 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
314 #define PVO_ENTER_INSERT 0 /* PVO has been removed */
315 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
316 #define PVO_SPILL_SET 2 /* PVO has been spilled */
317 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
318 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
319 #define PVO_PMAP_PROTECT 5 /* PVO has changed */
320 #define PVO_REMOVE 6 /* PVO has been removed */
321 #define PVO_WHERE_MASK 15
322 #define PVO_WHERE_SHFT 8
323 } __attribute__ ((aligned (32)));
324 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
325 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
326 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
327 #define PVO_PTEGIDX_CLR(pvo) \
328 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
329 #define PVO_PTEGIDX_SET(pvo,i) \
330 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
331 #define PVO_WHERE(pvo,w) \
332 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
333 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
335 TAILQ_HEAD(pvo_tqhead
, pvo_entry
);
336 struct pvo_tqhead
*pmap_pvo_table
; /* pvo entries by ptegroup index */
337 static struct pvo_head pmap_pvo_kunmanaged
= LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged
); /* list of unmanaged pages */
338 static struct pvo_head pmap_pvo_unmanaged
= LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged
); /* list of unmanaged pages */
340 struct pool pmap_pool
; /* pool for pmap structures */
341 struct pool pmap_upvo_pool
; /* pool for pvo entries for unmanaged pages */
342 struct pool pmap_mpvo_pool
; /* pool for pvo entries for managed pages */
345 * We keep a cache of unmanaged pages to be used for pvo entries for
349 SIMPLEQ_ENTRY(pvo_page
) pvop_link
;
351 SIMPLEQ_HEAD(pvop_head
, pvo_page
);
352 static struct pvop_head pmap_upvop_head
= SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head
);
353 static struct pvop_head pmap_mpvop_head
= SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head
);
354 u_long pmap_upvop_free
;
355 u_long pmap_upvop_maxfree
;
356 u_long pmap_mpvop_free
;
357 u_long pmap_mpvop_maxfree
;
359 static void *pmap_pool_ualloc(struct pool
*, int);
360 static void *pmap_pool_malloc(struct pool
*, int);
362 static void pmap_pool_ufree(struct pool
*, void *);
363 static void pmap_pool_mfree(struct pool
*, void *);
365 static struct pool_allocator pmap_pool_mallocator
= {
366 .pa_alloc
= pmap_pool_malloc
,
367 .pa_free
= pmap_pool_mfree
,
371 static struct pool_allocator pmap_pool_uallocator
= {
372 .pa_alloc
= pmap_pool_ualloc
,
373 .pa_free
= pmap_pool_ufree
,
377 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
378 void pmap_pte_print(volatile struct pte
*);
379 void pmap_pteg_check(void);
380 void pmap_pteg_dist(void);
381 void pmap_print_pte(pmap_t
, vaddr_t
);
382 void pmap_print_mmuregs(void);
385 #if defined(DEBUG) || defined(PMAPCHECK)
391 void pmap_pvo_verify(void);
392 static void pmap_pvo_check(const struct pvo_entry
*);
393 #define PMAP_PVO_CHECK(pvo) \
396 pmap_pvo_check(pvo); \
399 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
401 static int pmap_pte_insert(int, struct pte
*);
402 static int pmap_pvo_enter(pmap_t
, struct pool
*, struct pvo_head
*,
403 vaddr_t
, paddr_t
, register_t
, int);
404 static void pmap_pvo_remove(struct pvo_entry
*, int, struct pvo_head
*);
405 static void pmap_pvo_free(struct pvo_entry
*);
406 static void pmap_pvo_free_list(struct pvo_head
*);
407 static struct pvo_entry
*pmap_pvo_find_va(pmap_t
, vaddr_t
, int *);
408 static volatile struct pte
*pmap_pvo_to_pte(const struct pvo_entry
*, int);
409 static struct pvo_entry
*pmap_pvo_reclaim(struct pmap
*);
410 static void pvo_set_exec(struct pvo_entry
*);
411 static void pvo_clear_exec(struct pvo_entry
*);
413 static void tlbia(void);
415 static void pmap_release(pmap_t
);
416 static paddr_t
pmap_boot_find_memory(psize_t
, psize_t
, int);
418 static uint32_t pmap_pvo_reclaim_nextidx
;
420 static int pmap_pvo_reclaim_debugctr
;
423 #define VSID_NBPW (sizeof(uint32_t) * 8)
424 static uint32_t pmap_vsid_bitmap
[NPMAPS
/ VSID_NBPW
];
426 static int pmap_initialized
;
428 #if defined(DEBUG) || defined(PMAPDEBUG)
429 #define PMAPDEBUG_BOOT 0x0001
430 #define PMAPDEBUG_PTE 0x0002
431 #define PMAPDEBUG_EXEC 0x0008
432 #define PMAPDEBUG_PVOENTER 0x0010
433 #define PMAPDEBUG_PVOREMOVE 0x0020
434 #define PMAPDEBUG_ACTIVATE 0x0100
435 #define PMAPDEBUG_CREATE 0x0200
436 #define PMAPDEBUG_ENTER 0x1000
437 #define PMAPDEBUG_KENTER 0x2000
438 #define PMAPDEBUG_KREMOVE 0x4000
439 #define PMAPDEBUG_REMOVE 0x8000
441 unsigned int pmapdebug
= 0;
443 # define DPRINTF(x) printf x
444 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
447 # define DPRINTFN(n, x)
455 extern struct evcnt pmap_evcnt_mappings
;
456 extern struct evcnt pmap_evcnt_unmappings
;
458 extern struct evcnt pmap_evcnt_kernel_mappings
;
459 extern struct evcnt pmap_evcnt_kernel_unmappings
;
461 extern struct evcnt pmap_evcnt_mappings_replaced
;
463 extern struct evcnt pmap_evcnt_exec_mappings
;
464 extern struct evcnt pmap_evcnt_exec_cached
;
466 extern struct evcnt pmap_evcnt_exec_synced
;
467 extern struct evcnt pmap_evcnt_exec_synced_clear_modify
;
468 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove
;
470 extern struct evcnt pmap_evcnt_exec_uncached_page_protect
;
471 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify
;
472 extern struct evcnt pmap_evcnt_exec_uncached_zero_page
;
473 extern struct evcnt pmap_evcnt_exec_uncached_copy_page
;
474 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove
;
476 extern struct evcnt pmap_evcnt_updates
;
477 extern struct evcnt pmap_evcnt_collects
;
478 extern struct evcnt pmap_evcnt_copies
;
480 extern struct evcnt pmap_evcnt_ptes_spilled
;
481 extern struct evcnt pmap_evcnt_ptes_unspilled
;
482 extern struct evcnt pmap_evcnt_ptes_evicted
;
484 extern struct evcnt pmap_evcnt_ptes_primary
[8];
485 extern struct evcnt pmap_evcnt_ptes_secondary
[8];
486 extern struct evcnt pmap_evcnt_ptes_removed
;
487 extern struct evcnt pmap_evcnt_ptes_changed
;
488 extern struct evcnt pmap_evcnt_pvos_reclaimed
;
489 extern struct evcnt pmap_evcnt_pvos_failed
;
491 extern struct evcnt pmap_evcnt_zeroed_pages
;
492 extern struct evcnt pmap_evcnt_copied_pages
;
493 extern struct evcnt pmap_evcnt_idlezeroed_pages
;
495 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
496 #define PMAPCOUNT2(ev) ((ev).ev_count++)
498 #define PMAPCOUNT(ev) ((void) 0)
499 #define PMAPCOUNT2(ev) ((void) 0)
502 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
504 /* XXXSL: this needs to be moved to assembler */
505 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
507 #define TLBSYNC() __asm volatile("tlbsync")
508 #define SYNC() __asm volatile("sync")
509 #define EIEIO() __asm volatile("eieio")
510 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va))
511 #define MFMSR() mfmsr()
512 #define MTMSR(psl) mtmsr(psl)
513 #define MFPVR() mfpvr()
514 #define MFSRIN(va) mfsrin(va)
515 #define MFTB() mfrtcltbl()
517 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
518 static inline register_t
522 __asm
volatile ("mfsrin %0,%1" : "=r"(sr
) : "r"(va
));
527 #if defined (PMAP_OEA64_BRIDGE)
528 extern void mfmsr64 (register64_t
*result
);
529 #endif /* PMAP_OEA64_BRIDGE */
531 #define PMAP_LOCK() KERNEL_LOCK(1, NULL)
532 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
534 static inline register_t
535 pmap_interrupts_off(void)
537 register_t msr
= MFMSR();
539 MTMSR(msr
& ~PSL_EE
);
544 pmap_interrupts_restore(register_t msr
)
550 static inline u_int32_t
554 if ((MFPVR() >> 16) == MPC601
)
555 return (mfrtcl() >> 7);
562 * These small routines may have to be replaced,
563 * if/when we support processors other that the 604.
572 #if defined(PMAP_OEA)
574 * Why not use "tlbia"? Because not all processors implement it.
576 * This needs to be a per-CPU callback to do the appropriate thing
579 for (i
= 0; i
< (char *)0x00040000; i
+= 0x00001000) {
584 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
585 /* This is specifically for the 970, 970UM v1.6 pp. 140. */
586 for (i
= 0; i
<= (char *)0xFF000; i
+= 0x00001000) {
596 static inline register_t
597 va_to_vsid(const struct pmap
*pm
, vaddr_t addr
)
599 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
600 return (pm
->pm_sr
[addr
>> ADDR_SR_SHFT
] & SR_VSID
) >> SR_VSID_SHFT
;
601 #else /* PMAP_OEA64 */
603 const struct ste
*ste
;
607 hash
= (addr
>> ADDR_ESID_SHFT
) & ADDR_ESID_HASH
;
610 * Try the primary group first
612 ste
= pm
->pm_stes
[hash
].stes
;
613 for (i
= 0; i
< 8; i
++, ste
++) {
614 if (ste
->ste_hi
& STE_V
) &&
615 (addr
& ~(ADDR_POFF
|ADDR_PIDX
)) == (ste
->ste_hi
& STE_ESID
))
620 * Then the secondary group.
622 ste
= pm
->pm_stes
[hash
^ ADDR_ESID_HASH
].stes
;
623 for (i
= 0; i
< 8; i
++, ste
++) {
624 if (ste
->ste_hi
& STE_V
) &&
625 (addr
& ~(ADDR_POFF
|ADDR_PIDX
)) == (ste
->ste_hi
& STE_ESID
))
632 * Rather than searching the STE groups for the VSID, we know
633 * how we generate that from the ESID and so do that.
635 return VSID_MAKE(addr
>> ADDR_SR_SHFT
, pm
->pm_vsid
) >> SR_VSID_SHFT
;
637 #endif /* PMAP_OEA */
640 static inline register_t
641 va_to_pteg(const struct pmap
*pm
, vaddr_t addr
)
645 hash
= va_to_vsid(pm
, addr
) ^ ((addr
& ADDR_PIDX
) >> ADDR_PIDX_SHFT
);
646 return hash
& pmap_pteg_mask
;
649 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
651 * Given a PTE in the page table, calculate the VADDR that hashes to it.
652 * The only bit of magic is that the top 4 bits of the address doesn't
653 * technically exist in the PTE. But we know we reserved 4 bits of the
654 * VSID for it so that's how we get it.
657 pmap_pte_to_va(volatile const struct pte
*pt
)
660 uintptr_t ptaddr
= (uintptr_t) pt
;
662 if (pt
->pte_hi
& PTE_HID
)
663 ptaddr
^= (pmap_pteg_mask
* sizeof(struct pteg
));
665 /* PPC Bits 10-19 PPC64 Bits 42-51 */
666 #if defined(PMAP_OEA)
667 va
= ((pt
->pte_hi
>> PTE_VSID_SHFT
) ^ (ptaddr
/ sizeof(struct pteg
))) & 0x3ff;
668 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
669 va
= ((pt
->pte_hi
>> PTE_VSID_SHFT
) ^ (ptaddr
/ sizeof(struct pteg
))) & 0x7ff;
671 va
<<= ADDR_PIDX_SHFT
;
673 /* PPC Bits 4-9 PPC64 Bits 36-41 */
674 va
|= (pt
->pte_hi
& PTE_API
) << ADDR_API_SHFT
;
676 #if defined(PMAP_OEA64)
677 /* PPC63 Bits 0-35 */
678 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
679 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
681 va
|= VSID_TO_SR(pt
->pte_hi
>> PTE_VSID_SHFT
) << ADDR_SR_SHFT
;
688 static inline struct pvo_head
*
689 pa_to_pvoh(paddr_t pa
, struct vm_page
**pg_p
)
693 pg
= PHYS_TO_VM_PAGE(pa
);
697 return &pmap_pvo_unmanaged
;
698 return &pg
->mdpage
.mdpg_pvoh
;
701 static inline struct pvo_head
*
702 vm_page_to_pvoh(struct vm_page
*pg
)
704 return &pg
->mdpage
.mdpg_pvoh
;
709 pmap_attr_clear(struct vm_page
*pg
, int ptebit
)
711 pg
->mdpage
.mdpg_attrs
&= ~ptebit
;
715 pmap_attr_fetch(struct vm_page
*pg
)
717 return pg
->mdpage
.mdpg_attrs
;
721 pmap_attr_save(struct vm_page
*pg
, int ptebit
)
723 pg
->mdpage
.mdpg_attrs
|= ptebit
;
727 pmap_pte_compare(const volatile struct pte
*pt
, const struct pte
*pvo_pt
)
729 if (pt
->pte_hi
== pvo_pt
->pte_hi
731 && ((pt
->pte_lo
^ pvo_pt
->pte_lo
) &
732 ~(PTE_REF
|PTE_CHG
)) == 0
740 pmap_pte_create(struct pte
*pt
, const struct pmap
*pm
, vaddr_t va
, register_t pte_lo
)
743 * Construct the PTE. Default to IMB initially. Valid bit
744 * only gets set when the real pte is set in memory.
746 * Note: Don't set the valid bit for correct operation of tlb update.
748 #if defined(PMAP_OEA)
749 pt
->pte_hi
= (va_to_vsid(pm
, va
) << PTE_VSID_SHFT
)
750 | (((va
& ADDR_PIDX
) >> (ADDR_API_SHFT
- PTE_API_SHFT
)) & PTE_API
);
752 #elif defined (PMAP_OEA64_BRIDGE)
753 pt
->pte_hi
= ((u_int64_t
)va_to_vsid(pm
, va
) << PTE_VSID_SHFT
)
754 | (((va
& ADDR_PIDX
) >> (ADDR_API_SHFT
- PTE_API_SHFT
)) & PTE_API
);
755 pt
->pte_lo
= (u_int64_t
) pte_lo
;
756 #elif defined (PMAP_OEA64)
757 #error PMAP_OEA64 not supported
758 #endif /* PMAP_OEA */
762 pmap_pte_synch(volatile struct pte
*pt
, struct pte
*pvo_pt
)
764 pvo_pt
->pte_lo
|= pt
->pte_lo
& (PTE_REF
|PTE_CHG
);
768 pmap_pte_clear(volatile struct pte
*pt
, vaddr_t va
, int ptebit
)
771 * As shown in Section 7.6.3.2.3
773 pt
->pte_lo
&= ~ptebit
;
779 #ifdef MULTIPROCESSOR
785 pmap_pte_set(volatile struct pte
*pt
, struct pte
*pvo_pt
)
787 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
788 if (pvo_pt
->pte_hi
& PTE_VALID
)
789 panic("pte_set: setting an already valid pte %p", pvo_pt
);
791 pvo_pt
->pte_hi
|= PTE_VALID
;
794 * Update the PTE as defined in section 7.6.3.1
795 * Note that the REF/CHG bits are from pvo_pt and thus should
796 * have been saved so this routine can restore them (if desired).
798 pt
->pte_lo
= pvo_pt
->pte_lo
;
800 pt
->pte_hi
= pvo_pt
->pte_hi
;
803 #ifdef MULTIPROCESSOR
810 pmap_pte_unset(volatile struct pte
*pt
, struct pte
*pvo_pt
, vaddr_t va
)
812 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
813 if ((pvo_pt
->pte_hi
& PTE_VALID
) == 0)
814 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt
, pt
);
815 if ((pt
->pte_hi
& PTE_VALID
) == 0)
816 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt
, pt
);
819 pvo_pt
->pte_hi
&= ~PTE_VALID
;
821 * Force the ref & chg bits back into the PTEs.
825 * Invalidate the pte ... (Section 7.6.3.3)
827 pt
->pte_hi
&= ~PTE_VALID
;
835 * Save the ref & chg bits ...
837 pmap_pte_synch(pt
, pvo_pt
);
842 pmap_pte_change(volatile struct pte
*pt
, struct pte
*pvo_pt
, vaddr_t va
)
847 pmap_pte_unset(pt
, pvo_pt
, va
);
848 pmap_pte_set(pt
, pvo_pt
);
852 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
853 * (either primary or secondary location).
855 * Note: both the destination and source PTEs must not have PTE_VALID set.
859 pmap_pte_insert(int ptegidx
, struct pte
*pvo_pt
)
861 volatile struct pte
*pt
;
865 DPRINTFN(PTE
, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte
" %#" _PRIxpte
"\n",
866 ptegidx
, pvo_pt
->pte_hi
, pvo_pt
->pte_lo
));
869 * First try primary hash.
871 for (pt
= pmap_pteg_table
[ptegidx
].pt
, i
= 0; i
< 8; i
++, pt
++) {
872 if ((pt
->pte_hi
& PTE_VALID
) == 0) {
873 pvo_pt
->pte_hi
&= ~PTE_HID
;
874 pmap_pte_set(pt
, pvo_pt
);
880 * Now try secondary hash.
882 ptegidx
^= pmap_pteg_mask
;
883 for (pt
= pmap_pteg_table
[ptegidx
].pt
, i
= 0; i
< 8; i
++, pt
++) {
884 if ((pt
->pte_hi
& PTE_VALID
) == 0) {
885 pvo_pt
->pte_hi
|= PTE_HID
;
886 pmap_pte_set(pt
, pvo_pt
);
896 * Tries to spill a page table entry from the overflow area.
897 * This runs in either real mode (if dealing with a exception spill)
898 * or virtual mode when dealing with manually spilling one of the
899 * kernel's pte entries. In either case, interrupts are already
904 pmap_pte_spill(struct pmap
*pm
, vaddr_t addr
, bool exec
)
906 struct pvo_entry
*source_pvo
, *victim_pvo
, *next_pvo
;
907 struct pvo_entry
*pvo
;
908 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
909 struct pvo_tqhead
*pvoh
, *vpvoh
= NULL
;
911 volatile struct pteg
*pteg
;
912 volatile struct pte
*pt
;
916 ptegidx
= va_to_pteg(pm
, addr
);
919 * Have to substitute some entry. Use the primary hash for this.
920 * Use low bits of timebase as random generator. Make sure we are
921 * not picking a kernel pte for replacement.
923 pteg
= &pmap_pteg_table
[ptegidx
];
925 for (j
= 0; j
< 8; j
++) {
927 if ((pt
->pte_hi
& PTE_VALID
) == 0)
929 if (VSID_TO_HASH((pt
->pte_hi
& PTE_VSID
) >> PTE_VSID_SHFT
)
938 pvoh
= &pmap_pvo_table
[ptegidx
];
939 TAILQ_FOREACH(pvo
, pvoh
, pvo_olink
) {
942 * We need to find pvo entry for this address...
944 PMAP_PVO_CHECK(pvo
); /* sanity check */
947 * If we haven't found the source and we come to a PVO with
948 * a valid PTE, then we know we can't find it because all
949 * evicted PVOs always are first in the list.
951 if (source_pvo
== NULL
&& (pvo
->pvo_pte
.pte_hi
& PTE_VALID
))
953 if (source_pvo
== NULL
&& pm
== pvo
->pvo_pmap
&&
954 addr
== PVO_VADDR(pvo
)) {
957 * Now we have found the entry to be spilled into the
958 * pteg. Attempt to insert it into the page table.
960 j
= pmap_pte_insert(ptegidx
, &pvo
->pvo_pte
);
962 PVO_PTEGIDX_SET(pvo
, j
);
963 PMAP_PVO_CHECK(pvo
); /* sanity check */
964 PVO_WHERE(pvo
, SPILL_INSERT
);
965 pvo
->pvo_pmap
->pm_evictions
--;
966 PMAPCOUNT(ptes_spilled
);
967 PMAPCOUNT2(((pvo
->pvo_pte
.pte_hi
& PTE_HID
)
968 ? pmap_evcnt_ptes_secondary
969 : pmap_evcnt_ptes_primary
)[j
]);
972 * Since we keep the evicted entries at the
973 * from of the PVO list, we need move this
974 * (now resident) PVO after the evicted
977 next_pvo
= TAILQ_NEXT(pvo
, pvo_olink
);
980 * If we don't have to move (either we were the
981 * last entry or the next entry was valid),
982 * don't change our position. Otherwise
983 * move ourselves to the tail of the queue.
985 if (next_pvo
!= NULL
&&
986 !(next_pvo
->pvo_pte
.pte_hi
& PTE_VALID
)) {
987 TAILQ_REMOVE(pvoh
, pvo
, pvo_olink
);
988 TAILQ_INSERT_TAIL(pvoh
, pvo
, pvo_olink
);
994 if (exec
&& !PVO_EXECUTABLE_P(source_pvo
)) {
997 if (victim_pvo
!= NULL
)
1002 * We also need the pvo entry of the victim we are replacing
1003 * so save the R & C bits of the PTE.
1005 if ((pt
->pte_hi
& PTE_HID
) == 0 && victim_pvo
== NULL
&&
1006 pmap_pte_compare(pt
, &pvo
->pvo_pte
)) {
1007 vpvoh
= pvoh
; /* *1* */
1009 if (source_pvo
!= NULL
)
1014 if (source_pvo
== NULL
) {
1015 PMAPCOUNT(ptes_unspilled
);
1020 if (victim_pvo
== NULL
) {
1021 if ((pt
->pte_hi
& PTE_HID
) == 0)
1022 panic("pmap_pte_spill: victim p-pte (%p) has "
1023 "no pvo entry!", pt
);
1026 * If this is a secondary PTE, we need to search
1027 * its primary pvo bucket for the matching PVO.
1029 vpvoh
= &pmap_pvo_table
[ptegidx
^ pmap_pteg_mask
]; /* *2* */
1030 TAILQ_FOREACH(pvo
, vpvoh
, pvo_olink
) {
1031 PMAP_PVO_CHECK(pvo
); /* sanity check */
1034 * We also need the pvo entry of the victim we are
1035 * replacing so save the R & C bits of the PTE.
1037 if (pmap_pte_compare(pt
, &pvo
->pvo_pte
)) {
1042 if (victim_pvo
== NULL
)
1043 panic("pmap_pte_spill: victim s-pte (%p) has "
1044 "no pvo entry!", pt
);
1048 * The victim should be not be a kernel PVO/PTE entry.
1050 KASSERT(victim_pvo
->pvo_pmap
!= pmap_kernel());
1051 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo
));
1052 KASSERT(PVO_PTEGIDX_GET(victim_pvo
) == i
);
1055 * We are invalidating the TLB entry for the EA for the
1056 * we are replacing even though its valid; If we don't
1057 * we lose any ref/chg bit changes contained in the TLB
1060 source_pvo
->pvo_pte
.pte_hi
&= ~PTE_HID
;
1063 * To enforce the PVO list ordering constraint that all
1064 * evicted entries should come before all valid entries,
1065 * move the source PVO to the tail of its list and the
1066 * victim PVO to the head of its list (which might not be
1067 * the same list, if the victim was using the secondary hash).
1069 TAILQ_REMOVE(pvoh
, source_pvo
, pvo_olink
);
1070 TAILQ_INSERT_TAIL(pvoh
, source_pvo
, pvo_olink
);
1071 TAILQ_REMOVE(vpvoh
, victim_pvo
, pvo_olink
);
1072 TAILQ_INSERT_HEAD(vpvoh
, victim_pvo
, pvo_olink
);
1073 pmap_pte_unset(pt
, &victim_pvo
->pvo_pte
, victim_pvo
->pvo_vaddr
);
1074 pmap_pte_set(pt
, &source_pvo
->pvo_pte
);
1075 victim_pvo
->pvo_pmap
->pm_evictions
++;
1076 source_pvo
->pvo_pmap
->pm_evictions
--;
1077 PVO_WHERE(victim_pvo
, SPILL_UNSET
);
1078 PVO_WHERE(source_pvo
, SPILL_SET
);
1080 PVO_PTEGIDX_CLR(victim_pvo
);
1081 PVO_PTEGIDX_SET(source_pvo
, i
);
1082 PMAPCOUNT2(pmap_evcnt_ptes_primary
[i
]);
1083 PMAPCOUNT(ptes_spilled
);
1084 PMAPCOUNT(ptes_evicted
);
1085 PMAPCOUNT(ptes_removed
);
1087 PMAP_PVO_CHECK(victim_pvo
);
1088 PMAP_PVO_CHECK(source_pvo
);
1095 * Restrict given range to physical memory
1098 pmap_real_memory(paddr_t
*start
, psize_t
*size
)
1100 struct mem_region
*mp
;
1102 for (mp
= mem
; mp
->size
; mp
++) {
1103 if (*start
+ *size
> mp
->start
1104 && *start
< mp
->start
+ mp
->size
) {
1105 if (*start
< mp
->start
) {
1106 *size
-= mp
->start
- *start
;
1109 if (*start
+ *size
> mp
->start
+ mp
->size
)
1110 *size
= mp
->start
+ mp
->size
- *start
;
1118 * Initialize anything else for pmap handling.
1119 * Called during vm_init().
1124 pool_init(&pmap_mpvo_pool
, sizeof(struct pvo_entry
),
1125 sizeof(struct pvo_entry
), 0, 0, "pmap_mpvopl",
1126 &pmap_pool_mallocator
, IPL_NONE
);
1128 pool_setlowat(&pmap_mpvo_pool
, 1008);
1130 pmap_initialized
= 1;
1135 * How much virtual space does the kernel get?
1138 pmap_virtual_space(vaddr_t
*start
, vaddr_t
*end
)
1141 * For now, reserve one segment (minus some overhead) for kernel
1144 *start
= VM_MIN_KERNEL_ADDRESS
;
1145 *end
= VM_MAX_KERNEL_ADDRESS
;
1149 * Allocate, initialize, and return a new physical map.
1156 pm
= pool_get(&pmap_pool
, PR_WAITOK
);
1157 memset((void *)pm
, 0, sizeof *pm
);
1160 DPRINTFN(CREATE
,("pmap_create: pm %p:\n"
1161 "\t%#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
1162 " %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
"\n"
1163 "\t%#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
1164 " %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
" %#" _PRIsr
"\n",
1166 pm
->pm_sr
[0], pm
->pm_sr
[1],
1167 pm
->pm_sr
[2], pm
->pm_sr
[3],
1168 pm
->pm_sr
[4], pm
->pm_sr
[5],
1169 pm
->pm_sr
[6], pm
->pm_sr
[7],
1170 pm
->pm_sr
[8], pm
->pm_sr
[9],
1171 pm
->pm_sr
[10], pm
->pm_sr
[11],
1172 pm
->pm_sr
[12], pm
->pm_sr
[13],
1173 pm
->pm_sr
[14], pm
->pm_sr
[15]));
1178 * Initialize a preallocated and zeroed pmap structure.
1181 pmap_pinit(pmap_t pm
)
1183 register_t entropy
= MFTB();
1188 * Allocate some segment registers for this pmap.
1192 for (i
= 0; i
< NPMAPS
; i
+= VSID_NBPW
) {
1193 static register_t pmap_vsidcontext
;
1197 /* Create a new value by multiplying by a prime adding in
1198 * entropy from the timebase register. This is to make the
1199 * VSID more random so that the PT Hash function collides
1200 * less often. (note that the prime causes gcc to do shifts
1201 * instead of a multiply)
1203 pmap_vsidcontext
= (pmap_vsidcontext
* 0x1105) + entropy
;
1204 hash
= pmap_vsidcontext
& (NPMAPS
- 1);
1205 if (hash
== 0) { /* 0 is special, avoid it */
1206 entropy
+= 0xbadf00d;
1210 mask
= 1L << (hash
& (VSID_NBPW
-1));
1211 hash
= pmap_vsidcontext
;
1212 if (pmap_vsid_bitmap
[n
] & mask
) { /* collision? */
1213 /* anything free in this bucket? */
1214 if (~pmap_vsid_bitmap
[n
] == 0) {
1215 entropy
= hash
^ (hash
>> 16);
1218 i
= ffs(~pmap_vsid_bitmap
[n
]) - 1;
1220 hash
&= ~(VSID_NBPW
-1);
1223 hash
&= PTE_VSID
>> PTE_VSID_SHFT
;
1224 pmap_vsid_bitmap
[n
] |= mask
;
1226 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1227 for (i
= 0; i
< 16; i
++)
1228 pm
->pm_sr
[i
] = VSID_MAKE(i
, hash
) | SR_PRKEY
|
1235 panic("pmap_pinit: out of segments");
1239 * Add a reference to the given pmap.
1242 pmap_reference(pmap_t pm
)
1244 atomic_inc_uint(&pm
->pm_refs
);
1248 * Retire the given pmap from service.
1249 * Should only be called if the map contains no valid mappings.
1252 pmap_destroy(pmap_t pm
)
1254 if (atomic_dec_uint_nv(&pm
->pm_refs
) == 0) {
1256 pool_put(&pmap_pool
, pm
);
1261 * Release any resources held by the given physical map.
1262 * Called when a pmap initialized by pmap_pinit is being released.
1265 pmap_release(pmap_t pm
)
1269 KASSERT(pm
->pm_stats
.resident_count
== 0);
1270 KASSERT(pm
->pm_stats
.wired_count
== 0);
1273 if (pm
->pm_sr
[0] == 0)
1274 panic("pmap_release");
1275 idx
= pm
->pm_vsid
& (NPMAPS
-1);
1276 mask
= 1 << (idx
% VSID_NBPW
);
1279 KASSERT(pmap_vsid_bitmap
[idx
] & mask
);
1280 pmap_vsid_bitmap
[idx
] &= ~mask
;
1285 * Copy the range specified by src_addr/len
1286 * from the source map to the range dst_addr/len
1287 * in the destination map.
1289 * This routine is only advisory and need not do anything.
1292 pmap_copy(pmap_t dst_pmap
, pmap_t src_pmap
, vaddr_t dst_addr
,
1293 vsize_t len
, vaddr_t src_addr
)
1299 * Require that all active physical maps contain no
1300 * incorrect entries NOW.
1303 pmap_update(struct pmap
*pmap
)
1310 pmap_pvo_pte_index(const struct pvo_entry
*pvo
, int ptegidx
)
1314 * We can find the actual pte entry without searching by
1315 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1316 * and by noticing the HID bit.
1318 pteidx
= ptegidx
* 8 + PVO_PTEGIDX_GET(pvo
);
1319 if (pvo
->pvo_pte
.pte_hi
& PTE_HID
)
1320 pteidx
^= pmap_pteg_mask
* 8;
1324 volatile struct pte
*
1325 pmap_pvo_to_pte(const struct pvo_entry
*pvo
, int pteidx
)
1327 volatile struct pte
*pt
;
1329 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1330 if ((pvo
->pvo_pte
.pte_hi
& PTE_VALID
) == 0)
1335 * If we haven't been supplied the ptegidx, calculate it.
1339 ptegidx
= va_to_pteg(pvo
->pvo_pmap
, pvo
->pvo_vaddr
);
1340 pteidx
= pmap_pvo_pte_index(pvo
, ptegidx
);
1343 pt
= &pmap_pteg_table
[pteidx
>> 3].pt
[pteidx
& 7];
1345 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1348 if ((pvo
->pvo_pte
.pte_hi
& PTE_VALID
) && !PVO_PTEGIDX_ISSET(pvo
)) {
1349 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1350 "pvo but no valid pte index", pvo
);
1352 if ((pvo
->pvo_pte
.pte_hi
& PTE_VALID
) == 0 && PVO_PTEGIDX_ISSET(pvo
)) {
1353 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1354 "pvo but no valid pte", pvo
);
1357 if ((pt
->pte_hi
^ (pvo
->pvo_pte
.pte_hi
& ~PTE_VALID
)) == PTE_VALID
) {
1358 if ((pvo
->pvo_pte
.pte_hi
& PTE_VALID
) == 0) {
1359 #if defined(DEBUG) || defined(PMAPCHECK)
1362 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1363 "pmap_pteg_table %p but invalid in pvo",
1366 if (((pt
->pte_lo
^ pvo
->pvo_pte
.pte_lo
) & ~(PTE_CHG
|PTE_REF
)) != 0) {
1367 #if defined(DEBUG) || defined(PMAPCHECK)
1370 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1371 "not match pte %p in pmap_pteg_table",
1377 if (pvo
->pvo_pte
.pte_hi
& PTE_VALID
) {
1378 #if defined(DEBUG) || defined(PMAPCHECK)
1381 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1382 "pmap_pteg_table but valid in pvo", pvo
, pt
);
1385 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1389 pmap_pvo_find_va(pmap_t pm
, vaddr_t va
, int *pteidx_p
)
1391 struct pvo_entry
*pvo
;
1395 ptegidx
= va_to_pteg(pm
, va
);
1397 TAILQ_FOREACH(pvo
, &pmap_pvo_table
[ptegidx
], pvo_olink
) {
1398 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1399 if ((uintptr_t) pvo
>= SEGMENT_LENGTH
)
1400 panic("pmap_pvo_find_va: invalid pvo %p on "
1401 "list %#x (%p)", pvo
, ptegidx
,
1402 &pmap_pvo_table
[ptegidx
]);
1404 if (pvo
->pvo_pmap
== pm
&& PVO_VADDR(pvo
) == va
) {
1406 *pteidx_p
= pmap_pvo_pte_index(pvo
, ptegidx
);
1410 if ((pm
== pmap_kernel()) && (va
< SEGMENT_LENGTH
))
1411 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva
"\n",
1412 __func__
, (pm
== pmap_kernel() ? "kernel" : "user"), va
);
1416 #if defined(DEBUG) || defined(PMAPCHECK)
1418 pmap_pvo_check(const struct pvo_entry
*pvo
)
1420 struct pvo_head
*pvo_head
;
1421 struct pvo_entry
*pvo0
;
1422 volatile struct pte
*pt
;
1427 if ((uintptr_t)(pvo
+1) >= SEGMENT_LENGTH
)
1428 panic("pmap_pvo_check: pvo %p: invalid address", pvo
);
1430 if ((uintptr_t)(pvo
->pvo_pmap
+1) >= SEGMENT_LENGTH
) {
1431 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1432 pvo
, pvo
->pvo_pmap
);
1436 if ((uintptr_t)TAILQ_NEXT(pvo
, pvo_olink
) >= SEGMENT_LENGTH
||
1437 (((uintptr_t)TAILQ_NEXT(pvo
, pvo_olink
)) & 0x1f) != 0) {
1438 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1439 pvo
, TAILQ_NEXT(pvo
, pvo_olink
));
1443 if ((uintptr_t)LIST_NEXT(pvo
, pvo_vlink
) >= SEGMENT_LENGTH
||
1444 (((uintptr_t)LIST_NEXT(pvo
, pvo_vlink
)) & 0x1f) != 0) {
1445 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1446 pvo
, LIST_NEXT(pvo
, pvo_vlink
));
1450 if (PVO_MANAGED_P(pvo
)) {
1451 pvo_head
= pa_to_pvoh(pvo
->pvo_pte
.pte_lo
& PTE_RPGN
, NULL
);
1453 if (pvo
->pvo_vaddr
< VM_MIN_KERNEL_ADDRESS
) {
1454 printf("pmap_pvo_check: pvo %p: non kernel address "
1455 "on kernel unmanaged list\n", pvo
);
1458 pvo_head
= &pmap_pvo_kunmanaged
;
1460 LIST_FOREACH(pvo0
, pvo_head
, pvo_vlink
) {
1465 printf("pmap_pvo_check: pvo %p: not present "
1466 "on its vlist head %p\n", pvo
, pvo_head
);
1469 if (pvo
!= pmap_pvo_find_va(pvo
->pvo_pmap
, pvo
->pvo_vaddr
, NULL
)) {
1470 printf("pmap_pvo_check: pvo %p: not present "
1471 "on its olist head\n", pvo
);
1474 pt
= pmap_pvo_to_pte(pvo
, -1);
1476 if (pvo
->pvo_pte
.pte_hi
& PTE_VALID
) {
1477 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1482 if ((uintptr_t) pt
< (uintptr_t) &pmap_pteg_table
[0] ||
1484 (uintptr_t) &pmap_pteg_table
[pmap_pteg_cnt
]) {
1485 printf("pmap_pvo_check: pvo %p: pte %p not in "
1486 "pteg table\n", pvo
, pt
);
1489 if (((((uintptr_t) pt
) >> 3) & 7) != PVO_PTEGIDX_GET(pvo
)) {
1490 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1494 if (pvo
->pvo_pte
.pte_hi
!= pt
->pte_hi
) {
1495 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1496 "%#" _PRIxpte
"/%#" _PRIxpte
"\n", pvo
,
1497 pvo
->pvo_pte
.pte_hi
,
1501 if (((pvo
->pvo_pte
.pte_lo
^ pt
->pte_lo
) &
1502 (PTE_PP
|PTE_WIMG
|PTE_RPGN
)) != 0) {
1503 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1504 "%#" _PRIxpte
"/%#" _PRIxpte
"\n", pvo
,
1505 (pvo
->pvo_pte
.pte_lo
& (PTE_PP
|PTE_WIMG
|PTE_RPGN
)),
1506 (pt
->pte_lo
& (PTE_PP
|PTE_WIMG
|PTE_RPGN
)));
1509 if ((pmap_pte_to_va(pt
) ^ PVO_VADDR(pvo
)) & 0x0fffffff) {
1510 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva
""
1511 " doesn't not match PVO's VA %#" _PRIxva
"\n",
1512 pvo
, pt
, pmap_pte_to_va(pt
), PVO_VADDR(pvo
));
1519 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo
,
1524 #endif /* DEBUG || PMAPCHECK */
1527 * Search the PVO table looking for a non-wired entry.
1528 * If we find one, remove it and return it.
1532 pmap_pvo_reclaim(struct pmap
*pm
)
1534 struct pvo_tqhead
*pvoh
;
1535 struct pvo_entry
*pvo
;
1536 uint32_t idx
, endidx
;
1538 endidx
= pmap_pvo_reclaim_nextidx
;
1539 for (idx
= (endidx
+ 1) & pmap_pteg_mask
; idx
!= endidx
;
1540 idx
= (idx
+ 1) & pmap_pteg_mask
) {
1541 pvoh
= &pmap_pvo_table
[idx
];
1542 TAILQ_FOREACH(pvo
, pvoh
, pvo_olink
) {
1543 if (!PVO_WIRED_P(pvo
)) {
1544 pmap_pvo_remove(pvo
, -1, NULL
);
1545 pmap_pvo_reclaim_nextidx
= idx
;
1546 PMAPCOUNT(pvos_reclaimed
);
1555 * This returns whether this is the first mapping of a page.
1558 pmap_pvo_enter(pmap_t pm
, struct pool
*pl
, struct pvo_head
*pvo_head
,
1559 vaddr_t va
, paddr_t pa
, register_t pte_lo
, int flags
)
1561 struct pvo_entry
*pvo
;
1562 struct pvo_tqhead
*pvoh
;
1566 int poolflags
= PR_NOWAIT
;
1569 * Compute the PTE Group index.
1572 ptegidx
= va_to_pteg(pm
, va
);
1574 msr
= pmap_interrupts_off();
1576 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1577 if (pmap_pvo_remove_depth
> 0)
1578 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1579 if (++pmap_pvo_enter_depth
> 1)
1580 panic("pmap_pvo_enter: called recursively!");
1584 * Remove any existing mapping for this page. Reuse the
1585 * pvo entry if there a mapping.
1587 TAILQ_FOREACH(pvo
, &pmap_pvo_table
[ptegidx
], pvo_olink
) {
1588 if (pvo
->pvo_pmap
== pm
&& PVO_VADDR(pvo
) == va
) {
1590 if ((pmapdebug
& PMAPDEBUG_PVOENTER
) &&
1591 ((pvo
->pvo_pte
.pte_lo
^ (pa
|pte_lo
)) &
1592 ~(PTE_REF
|PTE_CHG
)) == 0 &&
1593 va
< VM_MIN_KERNEL_ADDRESS
) {
1594 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte
"/%#" _PRIxpa
"\n",
1595 pvo
, pvo
->pvo_pte
.pte_lo
, pte_lo
|pa
);
1596 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte
" sr=%#" _PRIsr
"\n",
1597 pvo
->pvo_pte
.pte_hi
,
1598 pm
->pm_sr
[va
>> ADDR_SR_SHFT
]);
1599 pmap_pte_print(pmap_pvo_to_pte(pvo
, -1));
1605 PMAPCOUNT(mappings_replaced
);
1606 pmap_pvo_remove(pvo
, -1, NULL
);
1612 * If we aren't overwriting an mapping, try to allocate
1614 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1615 --pmap_pvo_enter_depth
;
1617 pmap_interrupts_restore(msr
);
1621 pvo
= pool_get(pl
, poolflags
);
1625 * Exercise pmap_pvo_reclaim() a little.
1627 if (pvo
&& (flags
& PMAP_CANFAIL
) != 0 &&
1628 pmap_pvo_reclaim_debugctr
++ > 0x1000 &&
1629 (pmap_pvo_reclaim_debugctr
& 0xff) == 0) {
1635 msr
= pmap_interrupts_off();
1636 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1637 ++pmap_pvo_enter_depth
;
1640 pvo
= pmap_pvo_reclaim(pm
);
1642 if ((flags
& PMAP_CANFAIL
) == 0)
1643 panic("pmap_pvo_enter: failed");
1644 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1645 pmap_pvo_enter_depth
--;
1647 PMAPCOUNT(pvos_failed
);
1648 pmap_interrupts_restore(msr
);
1653 pvo
->pvo_vaddr
= va
;
1655 pvo
->pvo_vaddr
&= ~ADDR_POFF
;
1656 if (flags
& VM_PROT_EXECUTE
) {
1657 PMAPCOUNT(exec_mappings
);
1660 if (flags
& PMAP_WIRED
)
1661 pvo
->pvo_vaddr
|= PVO_WIRED
;
1662 if (pvo_head
!= &pmap_pvo_kunmanaged
) {
1663 pvo
->pvo_vaddr
|= PVO_MANAGED
;
1664 PMAPCOUNT(mappings
);
1666 PMAPCOUNT(kernel_mappings
);
1668 pmap_pte_create(&pvo
->pvo_pte
, pm
, va
, pa
| pte_lo
);
1670 LIST_INSERT_HEAD(pvo_head
, pvo
, pvo_vlink
);
1671 if (PVO_WIRED_P(pvo
))
1672 pvo
->pvo_pmap
->pm_stats
.wired_count
++;
1673 pvo
->pvo_pmap
->pm_stats
.resident_count
++;
1675 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1677 ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva
" pa %#" _PRIxpa
"\n",
1682 * We hope this succeeds but it isn't required.
1684 pvoh
= &pmap_pvo_table
[ptegidx
];
1685 i
= pmap_pte_insert(ptegidx
, &pvo
->pvo_pte
);
1687 PVO_PTEGIDX_SET(pvo
, i
);
1688 PVO_WHERE(pvo
, ENTER_INSERT
);
1689 PMAPCOUNT2(((pvo
->pvo_pte
.pte_hi
& PTE_HID
)
1690 ? pmap_evcnt_ptes_secondary
: pmap_evcnt_ptes_primary
)[i
]);
1691 TAILQ_INSERT_TAIL(pvoh
, pvo
, pvo_olink
);
1695 * Since we didn't have room for this entry (which makes it
1696 * and evicted entry), place it at the head of the list.
1698 TAILQ_INSERT_HEAD(pvoh
, pvo
, pvo_olink
);
1699 PMAPCOUNT(ptes_evicted
);
1702 * If this is a kernel page, make sure it's active.
1704 if (pm
== pmap_kernel()) {
1705 i
= pmap_pte_spill(pm
, va
, false);
1709 PMAP_PVO_CHECK(pvo
); /* sanity check */
1710 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1711 pmap_pvo_enter_depth
--;
1713 pmap_interrupts_restore(msr
);
1718 pmap_pvo_remove(struct pvo_entry
*pvo
, int pteidx
, struct pvo_head
*pvol
)
1720 volatile struct pte
*pt
;
1723 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1724 if (++pmap_pvo_remove_depth
> 1)
1725 panic("pmap_pvo_remove: called recursively!");
1729 * If we haven't been supplied the ptegidx, calculate it.
1732 ptegidx
= va_to_pteg(pvo
->pvo_pmap
, pvo
->pvo_vaddr
);
1733 pteidx
= pmap_pvo_pte_index(pvo
, ptegidx
);
1735 ptegidx
= pteidx
>> 3;
1736 if (pvo
->pvo_pte
.pte_hi
& PTE_HID
)
1737 ptegidx
^= pmap_pteg_mask
;
1739 PMAP_PVO_CHECK(pvo
); /* sanity check */
1742 * If there is an active pte entry, we need to deactivate it
1743 * (and save the ref & chg bits).
1745 pt
= pmap_pvo_to_pte(pvo
, pteidx
);
1747 pmap_pte_unset(pt
, &pvo
->pvo_pte
, pvo
->pvo_vaddr
);
1748 PVO_WHERE(pvo
, REMOVE
);
1749 PVO_PTEGIDX_CLR(pvo
);
1750 PMAPCOUNT(ptes_removed
);
1752 KASSERT(pvo
->pvo_pmap
->pm_evictions
> 0);
1753 pvo
->pvo_pmap
->pm_evictions
--;
1757 * Account for executable mappings.
1759 if (PVO_EXECUTABLE_P(pvo
))
1760 pvo_clear_exec(pvo
);
1763 * Update our statistics.
1765 pvo
->pvo_pmap
->pm_stats
.resident_count
--;
1766 if (PVO_WIRED_P(pvo
))
1767 pvo
->pvo_pmap
->pm_stats
.wired_count
--;
1770 * Save the REF/CHG bits into their cache if the page is managed.
1772 if (PVO_MANAGED_P(pvo
)) {
1773 register_t ptelo
= pvo
->pvo_pte
.pte_lo
;
1774 struct vm_page
*pg
= PHYS_TO_VM_PAGE(ptelo
& PTE_RPGN
);
1778 * If this page was changed and it is mapped exec,
1781 if ((ptelo
& PTE_CHG
) &&
1782 (pmap_attr_fetch(pg
) & PTE_EXEC
)) {
1783 struct pvo_head
*pvoh
= vm_page_to_pvoh(pg
);
1784 if (LIST_EMPTY(pvoh
)) {
1785 DPRINTFN(EXEC
, ("[pmap_pvo_remove: "
1786 "%#" _PRIxpa
": clear-exec]\n",
1787 VM_PAGE_TO_PHYS(pg
)));
1788 pmap_attr_clear(pg
, PTE_EXEC
);
1789 PMAPCOUNT(exec_uncached_pvo_remove
);
1791 DPRINTFN(EXEC
, ("[pmap_pvo_remove: "
1792 "%#" _PRIxpa
": syncicache]\n",
1793 VM_PAGE_TO_PHYS(pg
)));
1794 pmap_syncicache(VM_PAGE_TO_PHYS(pg
),
1796 PMAPCOUNT(exec_synced_pvo_remove
);
1800 pmap_attr_save(pg
, ptelo
& (PTE_REF
|PTE_CHG
));
1802 PMAPCOUNT(unmappings
);
1804 PMAPCOUNT(kernel_unmappings
);
1808 * Remove the PVO from its lists and return it to the pool.
1810 LIST_REMOVE(pvo
, pvo_vlink
);
1811 TAILQ_REMOVE(&pmap_pvo_table
[ptegidx
], pvo
, pvo_olink
);
1813 LIST_INSERT_HEAD(pvol
, pvo
, pvo_vlink
);
1815 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1816 pmap_pvo_remove_depth
--;
1821 pmap_pvo_free(struct pvo_entry
*pvo
)
1824 pool_put(PVO_MANAGED_P(pvo
) ? &pmap_mpvo_pool
: &pmap_upvo_pool
, pvo
);
1828 pmap_pvo_free_list(struct pvo_head
*pvol
)
1830 struct pvo_entry
*pvo
, *npvo
;
1832 for (pvo
= LIST_FIRST(pvol
); pvo
!= NULL
; pvo
= npvo
) {
1833 npvo
= LIST_NEXT(pvo
, pvo_vlink
);
1834 LIST_REMOVE(pvo
, pvo_vlink
);
1840 * Mark a mapping as executable.
1841 * If this is the first executable mapping in the segment,
1842 * clear the noexec flag.
1845 pvo_set_exec(struct pvo_entry
*pvo
)
1847 struct pmap
*pm
= pvo
->pvo_pmap
;
1849 if (pm
== pmap_kernel() || PVO_EXECUTABLE_P(pvo
)) {
1852 pvo
->pvo_vaddr
|= PVO_EXECUTABLE
;
1853 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1855 int sr
= PVO_VADDR(pvo
) >> ADDR_SR_SHFT
;
1856 if (pm
->pm_exec
[sr
]++ == 0) {
1857 pm
->pm_sr
[sr
] &= ~SR_NOEXEC
;
1864 * Mark a mapping as non-executable.
1865 * If this was the last executable mapping in the segment,
1866 * set the noexec flag.
1869 pvo_clear_exec(struct pvo_entry
*pvo
)
1871 struct pmap
*pm
= pvo
->pvo_pmap
;
1873 if (pm
== pmap_kernel() || !PVO_EXECUTABLE_P(pvo
)) {
1876 pvo
->pvo_vaddr
&= ~PVO_EXECUTABLE
;
1877 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1879 int sr
= PVO_VADDR(pvo
) >> ADDR_SR_SHFT
;
1880 if (--pm
->pm_exec
[sr
] == 0) {
1881 pm
->pm_sr
[sr
] |= SR_NOEXEC
;
1888 * Insert physical page at pa into the given pmap at virtual address va.
1891 pmap_enter(pmap_t pm
, vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
1893 struct mem_region
*mp
;
1894 struct pvo_head
*pvo_head
;
1904 if (__predict_false(!pmap_initialized
)) {
1905 pvo_head
= &pmap_pvo_kunmanaged
;
1906 pl
= &pmap_upvo_pool
;
1909 was_exec
= PTE_EXEC
;
1911 pvo_head
= pa_to_pvoh(pa
, &pg
);
1912 pl
= &pmap_mpvo_pool
;
1913 pvo_flags
= PVO_MANAGED
;
1917 ("pmap_enter(%p, %#" _PRIxva
", %#" _PRIxpa
", 0x%x, 0x%x):",
1918 pm
, va
, pa
, prot
, flags
));
1921 * If this is a managed page, and it's the first reference to the
1922 * page clear the execness of the page. Otherwise fetch the execness.
1925 was_exec
= pmap_attr_fetch(pg
) & PTE_EXEC
;
1927 DPRINTFN(ENTER
, (" was_exec=%d", was_exec
));
1930 * Assume the page is cache inhibited and access is guarded unless
1931 * it's in our available memory array. If it is in the memory array,
1932 * asssume it's in memory coherent memory.
1935 if ((flags
& PMAP_NC
) == 0) {
1936 for (mp
= mem
; mp
->size
; mp
++) {
1937 if (pa
>= mp
->start
&& pa
< mp
->start
+ mp
->size
) {
1944 if (prot
& VM_PROT_WRITE
)
1950 * If this was in response to a fault, "pre-fault" the PTE's
1951 * changed/referenced bit appropriately.
1953 if (flags
& VM_PROT_WRITE
)
1955 if (flags
& VM_PROT_ALL
)
1959 * We need to know if this page can be executable
1961 flags
|= (prot
& VM_PROT_EXECUTE
);
1964 * Record mapping for later back-translation and pte spilling.
1965 * This will overwrite any existing mapping.
1967 error
= pmap_pvo_enter(pm
, pl
, pvo_head
, va
, pa
, pte_lo
, flags
);
1970 * Flush the real page from the instruction cache if this page is
1971 * mapped executable and cacheable and has not been flushed since
1972 * the last time it was modified.
1975 (flags
& VM_PROT_EXECUTE
) &&
1976 (pte_lo
& PTE_I
) == 0 &&
1978 DPRINTFN(ENTER
, (" syncicache"));
1979 PMAPCOUNT(exec_synced
);
1980 pmap_syncicache(pa
, PAGE_SIZE
);
1982 pmap_attr_save(pg
, PTE_EXEC
);
1983 PMAPCOUNT(exec_cached
);
1984 #if defined(DEBUG) || defined(PMAPDEBUG)
1985 if (pmapdebug
& PMAPDEBUG_ENTER
)
1986 printf(" marked-as-exec");
1987 else if (pmapdebug
& PMAPDEBUG_EXEC
)
1988 printf("[pmap_enter: %#" _PRIxpa
": marked-as-exec]\n",
1989 VM_PAGE_TO_PHYS(pg
));
1995 DPRINTFN(ENTER
, (": error=%d\n", error
));
2003 pmap_kenter_pa(vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
2005 struct mem_region
*mp
;
2009 #if defined (PMAP_OEA64_BRIDGE)
2010 if (va
< VM_MIN_KERNEL_ADDRESS
)
2011 panic("pmap_kenter_pa: attempt to enter "
2012 "non-kernel address %#" _PRIxva
"!", va
);
2016 ("pmap_kenter_pa(%#" _PRIxva
",%#" _PRIxpa
",%#x)\n", va
, pa
, prot
));
2021 * Assume the page is cache inhibited and access is guarded unless
2022 * it's in our available memory array. If it is in the memory array,
2023 * asssume it's in memory coherent memory.
2026 if ((prot
& PMAP_NC
) == 0) {
2027 for (mp
= mem
; mp
->size
; mp
++) {
2028 if (pa
>= mp
->start
&& pa
< mp
->start
+ mp
->size
) {
2035 if (prot
& VM_PROT_WRITE
)
2041 * We don't care about REF/CHG on PVOs on the unmanaged list.
2043 error
= pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool
,
2044 &pmap_pvo_kunmanaged
, va
, pa
, pte_lo
, prot
|PMAP_WIRED
);
2047 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva
" pa %#" _PRIxpa
": %d",
2054 pmap_kremove(vaddr_t va
, vsize_t len
)
2056 if (va
< VM_MIN_KERNEL_ADDRESS
)
2057 panic("pmap_kremove: attempt to remove "
2058 "non-kernel address %#" _PRIxva
"!", va
);
2060 DPRINTFN(KREMOVE
,("pmap_kremove(%#" _PRIxva
",%#" _PRIxva
")\n", va
, len
));
2061 pmap_remove(pmap_kernel(), va
, va
+ len
);
2065 * Remove the given range of mapping entries.
2068 pmap_remove(pmap_t pm
, vaddr_t va
, vaddr_t endva
)
2070 struct pvo_head pvol
;
2071 struct pvo_entry
*pvo
;
2077 msr
= pmap_interrupts_off();
2078 for (; va
< endva
; va
+= PAGE_SIZE
) {
2079 pvo
= pmap_pvo_find_va(pm
, va
, &pteidx
);
2081 pmap_pvo_remove(pvo
, pteidx
, &pvol
);
2084 pmap_interrupts_restore(msr
);
2085 pmap_pvo_free_list(&pvol
);
2090 * Get the physical page address for the given pmap/virtual address.
2093 pmap_extract(pmap_t pm
, vaddr_t va
, paddr_t
*pap
)
2095 struct pvo_entry
*pvo
;
2101 * If this is a kernel pmap lookup, also check the battable
2102 * and if we get a hit, translate the VA to a PA using the
2103 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
2104 * that will wrap back to 0.
2106 if (pm
== pmap_kernel() &&
2107 (va
< VM_MIN_KERNEL_ADDRESS
||
2108 (KERNEL2_SR
< 15 && VM_MAX_KERNEL_ADDRESS
<= va
))) {
2109 KASSERT((va
>> ADDR_SR_SHFT
) != USER_SR
);
2110 #if defined (PMAP_OEA)
2112 if ((MFPVR() >> 16) == MPC601
) {
2113 register_t batu
= battable
[va
>> 23].batu
;
2114 register_t batl
= battable
[va
>> 23].batl
;
2115 register_t sr
= iosrtable
[va
>> ADDR_SR_SHFT
];
2116 if (BAT601_VALID_P(batl
) &&
2117 BAT601_VA_MATCH_P(batu
, batl
, va
)) {
2119 (~(batl
& BAT601_BSM
) << 17) & ~0x1ffffL
;
2121 *pap
= (batl
& mask
) | (va
& ~mask
);
2124 } else if (SR601_VALID_P(sr
) &&
2125 SR601_PA_MATCH_P(sr
, va
)) {
2132 #endif /* PPC_OEA601 */
2134 register_t batu
= battable
[va
>> ADDR_SR_SHFT
].batu
;
2135 if (BAT_VALID_P(batu
,0) && BAT_VA_MATCH_P(batu
,va
)) {
2137 battable
[va
>> ADDR_SR_SHFT
].batl
;
2139 (~(batu
& BAT_BL
) << 15) & ~0x1ffffL
;
2141 *pap
= (batl
& mask
) | (va
& ~mask
);
2147 #elif defined (PMAP_OEA64_BRIDGE)
2148 if (va
>= SEGMENT_LENGTH
)
2149 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
2150 __func__
, (pm
== pmap_kernel() ? "kernel" : "user"), va
);
2157 #elif defined (PMAP_OEA64)
2158 #error PPC_OEA64 not supported
2159 #endif /* PPC_OEA */
2162 msr
= pmap_interrupts_off();
2163 pvo
= pmap_pvo_find_va(pm
, va
& ~ADDR_POFF
, NULL
);
2165 PMAP_PVO_CHECK(pvo
); /* sanity check */
2167 *pap
= (pvo
->pvo_pte
.pte_lo
& PTE_RPGN
)
2170 pmap_interrupts_restore(msr
);
2176 * Lower the protection on the specified range of this pmap.
2179 pmap_protect(pmap_t pm
, vaddr_t va
, vaddr_t endva
, vm_prot_t prot
)
2181 struct pvo_entry
*pvo
;
2182 volatile struct pte
*pt
;
2187 * Since this routine only downgrades protection, we should
2188 * always be called with at least one bit not set.
2190 KASSERT(prot
!= VM_PROT_ALL
);
2193 * If there is no protection, this is equivalent to
2194 * remove the pmap from the pmap.
2196 if ((prot
& VM_PROT_READ
) == 0) {
2197 pmap_remove(pm
, va
, endva
);
2203 msr
= pmap_interrupts_off();
2204 for (; va
< endva
; va
+= PAGE_SIZE
) {
2205 pvo
= pmap_pvo_find_va(pm
, va
, &pteidx
);
2208 PMAP_PVO_CHECK(pvo
); /* sanity check */
2211 * Revoke executable if asked to do so.
2213 if ((prot
& VM_PROT_EXECUTE
) == 0)
2214 pvo_clear_exec(pvo
);
2218 * If the page is already read-only, no change
2221 if ((pvo
->pvo_pte
.pte_lo
& PTE_PP
) == PTE_BR
)
2225 * Grab the PTE pointer before we diddle with
2226 * the cached PTE copy.
2228 pt
= pmap_pvo_to_pte(pvo
, pteidx
);
2230 * Change the protection of the page.
2232 pvo
->pvo_pte
.pte_lo
&= ~PTE_PP
;
2233 pvo
->pvo_pte
.pte_lo
|= PTE_BR
;
2236 * If the PVO is in the page table, update
2240 pmap_pte_change(pt
, &pvo
->pvo_pte
, pvo
->pvo_vaddr
);
2241 PVO_WHERE(pvo
, PMAP_PROTECT
);
2242 PMAPCOUNT(ptes_changed
);
2245 PMAP_PVO_CHECK(pvo
); /* sanity check */
2247 pmap_interrupts_restore(msr
);
2252 pmap_unwire(pmap_t pm
, vaddr_t va
)
2254 struct pvo_entry
*pvo
;
2258 msr
= pmap_interrupts_off();
2259 pvo
= pmap_pvo_find_va(pm
, va
, NULL
);
2261 if (PVO_WIRED_P(pvo
)) {
2262 pvo
->pvo_vaddr
&= ~PVO_WIRED
;
2263 pm
->pm_stats
.wired_count
--;
2265 PMAP_PVO_CHECK(pvo
); /* sanity check */
2267 pmap_interrupts_restore(msr
);
2272 * Lower the protection on the specified physical page.
2275 pmap_page_protect(struct vm_page
*pg
, vm_prot_t prot
)
2277 struct pvo_head
*pvo_head
, pvol
;
2278 struct pvo_entry
*pvo
, *next_pvo
;
2279 volatile struct pte
*pt
;
2284 KASSERT(prot
!= VM_PROT_ALL
);
2286 msr
= pmap_interrupts_off();
2289 * When UVM reuses a page, it does a pmap_page_protect with
2290 * VM_PROT_NONE. At that point, we can clear the exec flag
2291 * since we know the page will have different contents.
2293 if ((prot
& VM_PROT_READ
) == 0) {
2294 DPRINTFN(EXEC
, ("[pmap_page_protect: %#" _PRIxpa
": clear-exec]\n",
2295 VM_PAGE_TO_PHYS(pg
)));
2296 if (pmap_attr_fetch(pg
) & PTE_EXEC
) {
2297 PMAPCOUNT(exec_uncached_page_protect
);
2298 pmap_attr_clear(pg
, PTE_EXEC
);
2302 pvo_head
= vm_page_to_pvoh(pg
);
2303 for (pvo
= LIST_FIRST(pvo_head
); pvo
!= NULL
; pvo
= next_pvo
) {
2304 next_pvo
= LIST_NEXT(pvo
, pvo_vlink
);
2305 PMAP_PVO_CHECK(pvo
); /* sanity check */
2308 * Downgrading to no mapping at all, we just remove the entry.
2310 if ((prot
& VM_PROT_READ
) == 0) {
2311 pmap_pvo_remove(pvo
, -1, &pvol
);
2316 * If EXEC permission is being revoked, just clear the
2319 if ((prot
& VM_PROT_EXECUTE
) == 0)
2320 pvo_clear_exec(pvo
);
2323 * If this entry is already RO, don't diddle with the
2326 if ((pvo
->pvo_pte
.pte_lo
& PTE_PP
) == PTE_BR
) {
2327 PMAP_PVO_CHECK(pvo
);
2332 * Grab the PTE before the we diddle the bits so
2333 * pvo_to_pte can verify the pte contents are as
2336 pt
= pmap_pvo_to_pte(pvo
, -1);
2337 pvo
->pvo_pte
.pte_lo
&= ~PTE_PP
;
2338 pvo
->pvo_pte
.pte_lo
|= PTE_BR
;
2340 pmap_pte_change(pt
, &pvo
->pvo_pte
, pvo
->pvo_vaddr
);
2341 PVO_WHERE(pvo
, PMAP_PAGE_PROTECT
);
2342 PMAPCOUNT(ptes_changed
);
2344 PMAP_PVO_CHECK(pvo
); /* sanity check */
2346 pmap_interrupts_restore(msr
);
2347 pmap_pvo_free_list(&pvol
);
2353 * Activate the address space for the specified process. If the process
2354 * is the current process, load the new MMU context.
2357 pmap_activate(struct lwp
*l
)
2359 struct pcb
*pcb
= lwp_getpcb(l
);
2360 pmap_t pmap
= l
->l_proc
->p_vmspace
->vm_map
.pmap
;
2363 ("pmap_activate: lwp %p (curlwp %p)\n", l
, curlwp
));
2366 * XXX Normally performed in cpu_fork().
2371 * In theory, the SR registers need only be valid on return
2372 * to user space wait to do them there.
2375 /* Store pointer to new current pmap. */
2381 * Deactivate the specified process's address space.
2384 pmap_deactivate(struct lwp
*l
)
2389 pmap_query_bit(struct vm_page
*pg
, int ptebit
)
2391 struct pvo_entry
*pvo
;
2392 volatile struct pte
*pt
;
2397 if (pmap_attr_fetch(pg
) & ptebit
) {
2402 msr
= pmap_interrupts_off();
2403 LIST_FOREACH(pvo
, vm_page_to_pvoh(pg
), pvo_vlink
) {
2404 PMAP_PVO_CHECK(pvo
); /* sanity check */
2406 * See if we saved the bit off. If so cache, it and return
2409 if (pvo
->pvo_pte
.pte_lo
& ptebit
) {
2410 pmap_attr_save(pg
, ptebit
);
2411 PMAP_PVO_CHECK(pvo
); /* sanity check */
2412 pmap_interrupts_restore(msr
);
2418 * No luck, now go thru the hard part of looking at the ptes
2419 * themselves. Sync so any pending REF/CHG bits are flushed
2423 LIST_FOREACH(pvo
, vm_page_to_pvoh(pg
), pvo_vlink
) {
2424 PMAP_PVO_CHECK(pvo
); /* sanity check */
2426 * See if this pvo have a valid PTE. If so, fetch the
2427 * REF/CHG bits from the valid PTE. If the appropriate
2428 * ptebit is set, cache, it and return success.
2430 pt
= pmap_pvo_to_pte(pvo
, -1);
2432 pmap_pte_synch(pt
, &pvo
->pvo_pte
);
2433 if (pvo
->pvo_pte
.pte_lo
& ptebit
) {
2434 pmap_attr_save(pg
, ptebit
);
2435 PMAP_PVO_CHECK(pvo
); /* sanity check */
2436 pmap_interrupts_restore(msr
);
2442 pmap_interrupts_restore(msr
);
2448 pmap_clear_bit(struct vm_page
*pg
, int ptebit
)
2450 struct pvo_head
*pvoh
= vm_page_to_pvoh(pg
);
2451 struct pvo_entry
*pvo
;
2452 volatile struct pte
*pt
;
2457 msr
= pmap_interrupts_off();
2460 * Fetch the cache value
2462 rv
|= pmap_attr_fetch(pg
);
2465 * Clear the cached value.
2467 pmap_attr_clear(pg
, ptebit
);
2470 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2471 * can reset the right ones). Note that since the pvo entries and
2472 * list heads are accessed via BAT0 and are never placed in the
2473 * page table, we don't have to worry about further accesses setting
2479 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2480 * valid PTE. If so, clear the ptebit from the valid PTE.
2482 LIST_FOREACH(pvo
, pvoh
, pvo_vlink
) {
2483 PMAP_PVO_CHECK(pvo
); /* sanity check */
2484 pt
= pmap_pvo_to_pte(pvo
, -1);
2487 * Only sync the PTE if the bit we are looking
2488 * for is not already set.
2490 if ((pvo
->pvo_pte
.pte_lo
& ptebit
) == 0)
2491 pmap_pte_synch(pt
, &pvo
->pvo_pte
);
2493 * If the bit we are looking for was already set,
2494 * clear that bit in the pte.
2496 if (pvo
->pvo_pte
.pte_lo
& ptebit
)
2497 pmap_pte_clear(pt
, PVO_VADDR(pvo
), ptebit
);
2499 rv
|= pvo
->pvo_pte
.pte_lo
& (PTE_CHG
|PTE_REF
);
2500 pvo
->pvo_pte
.pte_lo
&= ~ptebit
;
2501 PMAP_PVO_CHECK(pvo
); /* sanity check */
2503 pmap_interrupts_restore(msr
);
2506 * If we are clearing the modify bit and this page was marked EXEC
2507 * and the user of the page thinks the page was modified, then we
2508 * need to clean it from the icache if it's mapped or clear the EXEC
2509 * bit if it's not mapped. The page itself might not have the CHG
2510 * bit set if the modification was done via DMA to the page.
2512 if ((ptebit
& PTE_CHG
) && (rv
& PTE_EXEC
)) {
2513 if (LIST_EMPTY(pvoh
)) {
2514 DPRINTFN(EXEC
, ("[pmap_clear_bit: %#" _PRIxpa
": clear-exec]\n",
2515 VM_PAGE_TO_PHYS(pg
)));
2516 pmap_attr_clear(pg
, PTE_EXEC
);
2517 PMAPCOUNT(exec_uncached_clear_modify
);
2519 DPRINTFN(EXEC
, ("[pmap_clear_bit: %#" _PRIxpa
": syncicache]\n",
2520 VM_PAGE_TO_PHYS(pg
)));
2521 pmap_syncicache(VM_PAGE_TO_PHYS(pg
), PAGE_SIZE
);
2522 PMAPCOUNT(exec_synced_clear_modify
);
2526 return (rv
& ptebit
) != 0;
2530 pmap_procwr(struct proc
*p
, vaddr_t va
, size_t len
)
2532 struct pvo_entry
*pvo
;
2533 size_t offset
= va
& ADDR_POFF
;
2539 size_t seglen
= PAGE_SIZE
- offset
;
2542 pvo
= pmap_pvo_find_va(p
->p_vmspace
->vm_map
.pmap
, va
, NULL
);
2543 if (pvo
!= NULL
&& PVO_EXECUTABLE_P(pvo
)) {
2545 (pvo
->pvo_pte
.pte_lo
& PTE_RPGN
) | offset
, seglen
);
2546 PMAP_PVO_CHECK(pvo
);
2556 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2558 pmap_pte_print(volatile struct pte
*pt
)
2560 printf("PTE %p: ", pt
);
2562 #if defined(PMAP_OEA)
2564 printf("%#" _PRIxpte
": [", pt
->pte_hi
);
2566 printf("%#" _PRIxpte
": [", pt
->pte_hi
);
2567 #endif /* PMAP_OEA */
2569 printf("%c ", (pt
->pte_hi
& PTE_VALID
) ? 'v' : 'i');
2570 printf("%c ", (pt
->pte_hi
& PTE_HID
) ? 'h' : '-');
2572 printf("%#" _PRIxpte
" %#" _PRIxpte
"",
2573 (pt
->pte_hi
&~ PTE_VALID
)>>PTE_VSID_SHFT
,
2574 pt
->pte_hi
& PTE_API
);
2575 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2576 printf(" (va %#" _PRIxva
")] ", pmap_pte_to_va(pt
));
2578 printf(" (va %#" _PRIxva
")] ", pmap_pte_to_va(pt
));
2579 #endif /* PMAP_OEA */
2582 #if defined (PMAP_OEA)
2583 printf(" %#" _PRIxpte
": [", pt
->pte_lo
);
2584 printf("%#" _PRIxpte
"... ", pt
->pte_lo
>> 12);
2586 printf(" %#" _PRIxpte
": [", pt
->pte_lo
);
2587 printf("%#" _PRIxpte
"... ", pt
->pte_lo
>> 12);
2589 printf("%c ", (pt
->pte_lo
& PTE_REF
) ? 'r' : 'u');
2590 printf("%c ", (pt
->pte_lo
& PTE_CHG
) ? 'c' : 'n');
2591 printf("%c", (pt
->pte_lo
& PTE_W
) ? 'w' : '.');
2592 printf("%c", (pt
->pte_lo
& PTE_I
) ? 'i' : '.');
2593 printf("%c", (pt
->pte_lo
& PTE_M
) ? 'm' : '.');
2594 printf("%c ", (pt
->pte_lo
& PTE_G
) ? 'g' : '.');
2595 switch (pt
->pte_lo
& PTE_PP
) {
2596 case PTE_BR
: printf("br]\n"); break;
2597 case PTE_BW
: printf("bw]\n"); break;
2598 case PTE_SO
: printf("so]\n"); break;
2599 case PTE_SW
: printf("sw]\n"); break;
2606 pmap_pteg_check(void)
2608 volatile struct pte
*pt
;
2615 for (ptegidx
= 0; ptegidx
< pmap_pteg_cnt
; ptegidx
++) {
2616 for (pt
= pmap_pteg_table
[ptegidx
].pt
, i
= 8; --i
>= 0; pt
++) {
2617 if (pt
->pte_hi
& PTE_VALID
) {
2618 if (pt
->pte_hi
& PTE_HID
)
2628 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2629 p_valid
, p_valid
, s_valid
, s_valid
,
2634 pmap_print_mmuregs(void)
2640 register_t soft_sr
[16];
2642 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2643 struct bat soft_ibat
[4];
2644 struct bat soft_dbat
[4];
2648 cpuvers
= MFPVR() >> 16;
2649 __asm
volatile ("mfsdr1 %0" : "=r"(sdr1
));
2652 for (i
= 0; i
< 16; i
++) {
2653 soft_sr
[i
] = MFSRIN(addr
);
2654 addr
+= (1 << ADDR_SR_SHFT
);
2658 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2659 /* read iBAT (601: uBAT) registers */
2660 __asm
volatile ("mfibatu %0,0" : "=r"(soft_ibat
[0].batu
));
2661 __asm
volatile ("mfibatl %0,0" : "=r"(soft_ibat
[0].batl
));
2662 __asm
volatile ("mfibatu %0,1" : "=r"(soft_ibat
[1].batu
));
2663 __asm
volatile ("mfibatl %0,1" : "=r"(soft_ibat
[1].batl
));
2664 __asm
volatile ("mfibatu %0,2" : "=r"(soft_ibat
[2].batu
));
2665 __asm
volatile ("mfibatl %0,2" : "=r"(soft_ibat
[2].batl
));
2666 __asm
volatile ("mfibatu %0,3" : "=r"(soft_ibat
[3].batu
));
2667 __asm
volatile ("mfibatl %0,3" : "=r"(soft_ibat
[3].batl
));
2670 if (cpuvers
!= MPC601
) {
2671 /* read dBAT registers */
2672 __asm
volatile ("mfdbatu %0,0" : "=r"(soft_dbat
[0].batu
));
2673 __asm
volatile ("mfdbatl %0,0" : "=r"(soft_dbat
[0].batl
));
2674 __asm
volatile ("mfdbatu %0,1" : "=r"(soft_dbat
[1].batu
));
2675 __asm
volatile ("mfdbatl %0,1" : "=r"(soft_dbat
[1].batl
));
2676 __asm
volatile ("mfdbatu %0,2" : "=r"(soft_dbat
[2].batu
));
2677 __asm
volatile ("mfdbatl %0,2" : "=r"(soft_dbat
[2].batl
));
2678 __asm
volatile ("mfdbatu %0,3" : "=r"(soft_dbat
[3].batu
));
2679 __asm
volatile ("mfdbatl %0,3" : "=r"(soft_dbat
[3].batl
));
2683 printf("SDR1:\t%#" _PRIxpa
"\n", sdr1
);
2686 for (i
= 0; i
< 4; i
++)
2687 printf("0x%08lx, ", soft_sr
[i
]);
2690 printf("0x%08lx, ", soft_sr
[i
]);
2692 for ( ; i
< 12; i
++)
2693 printf("0x%08lx, ", soft_sr
[i
]);
2695 for ( ; i
< 16; i
++)
2696 printf("0x%08lx, ", soft_sr
[i
]);
2700 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
2701 printf("%cBAT[]:\t", cpuvers
== MPC601
? 'u' : 'i');
2702 for (i
= 0; i
< 4; i
++) {
2703 printf("0x%08lx 0x%08lx, ",
2704 soft_ibat
[i
].batu
, soft_ibat
[i
].batl
);
2708 if (cpuvers
!= MPC601
) {
2709 printf("\ndBAT[]:\t");
2710 for (i
= 0; i
< 4; i
++) {
2711 printf("0x%08lx 0x%08lx, ",
2712 soft_dbat
[i
].batu
, soft_dbat
[i
].batl
);
2718 #endif /* PMAP_OEA... */
2722 pmap_print_pte(pmap_t pm
, vaddr_t va
)
2724 struct pvo_entry
*pvo
;
2725 volatile struct pte
*pt
;
2728 pvo
= pmap_pvo_find_va(pm
, va
, &pteidx
);
2730 pt
= pmap_pvo_to_pte(pvo
, pteidx
);
2732 printf("VA %#" _PRIxva
" -> %p -> %s %#" _PRIxpte
", %#" _PRIxpte
"\n",
2734 pt
->pte_hi
& PTE_HID
? "(sec)" : "(pri)",
2735 pt
->pte_hi
, pt
->pte_lo
);
2737 printf("No valid PTE found\n");
2740 printf("Address not in pmap\n");
2745 pmap_pteg_dist(void)
2747 struct pvo_entry
*pvo
;
2751 unsigned int depths
[64];
2753 memset(depths
, 0, sizeof(depths
));
2754 for (ptegidx
= 0; ptegidx
< pmap_pteg_cnt
; ptegidx
++) {
2756 TAILQ_FOREACH(pvo
, &pmap_pvo_table
[ptegidx
], pvo_olink
) {
2759 if (depth
> max_depth
)
2766 for (depth
= 0; depth
< 64; depth
++) {
2767 printf(" [%2d]: %8u", depth
, depths
[depth
]);
2768 if ((depth
& 3) == 3)
2770 if (depth
== max_depth
)
2773 if ((depth
& 3) != 3)
2775 printf("Max depth found was %d\n", max_depth
);
2779 #if defined(PMAPCHECK) || defined(DEBUG)
2781 pmap_pvo_verify(void)
2787 for (ptegidx
= 0; ptegidx
< pmap_pteg_cnt
; ptegidx
++) {
2788 struct pvo_entry
*pvo
;
2789 TAILQ_FOREACH(pvo
, &pmap_pvo_table
[ptegidx
], pvo_olink
) {
2790 if ((uintptr_t) pvo
>= SEGMENT_LENGTH
)
2791 panic("pmap_pvo_verify: invalid pvo %p "
2792 "on list %#x", pvo
, ptegidx
);
2793 pmap_pvo_check(pvo
);
2798 #endif /* PMAPCHECK */
2802 pmap_pool_ualloc(struct pool
*pp
, int flags
)
2804 struct pvo_page
*pvop
;
2806 if (uvm
.page_init_done
!= true) {
2807 return (void *) uvm_pageboot_alloc(PAGE_SIZE
);
2811 pvop
= SIMPLEQ_FIRST(&pmap_upvop_head
);
2814 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head
, pvop_link
);
2819 return pmap_pool_malloc(pp
, flags
);
2823 pmap_pool_malloc(struct pool
*pp
, int flags
)
2825 struct pvo_page
*pvop
;
2829 pvop
= SIMPLEQ_FIRST(&pmap_mpvop_head
);
2832 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head
, pvop_link
);
2838 pg
= uvm_pagealloc_strat(NULL
, 0, NULL
, UVM_PGA_USERESERVE
,
2839 UVM_PGA_STRAT_ONLY
, VM_FREELIST_FIRST256
);
2840 if (__predict_false(pg
== NULL
)) {
2841 if (flags
& PR_WAITOK
) {
2848 KDASSERT(VM_PAGE_TO_PHYS(pg
) == (uintptr_t)VM_PAGE_TO_PHYS(pg
));
2849 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg
);
2853 pmap_pool_ufree(struct pool
*pp
, void *va
)
2855 struct pvo_page
*pvop
;
2857 if (PHYS_TO_VM_PAGE((paddr_t
) va
) != NULL
) {
2858 pmap_pool_mfree(va
, size
, tag
);
2864 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head
, pvop
, pvop_link
);
2866 if (pmap_upvop_free
> pmap_upvop_maxfree
)
2867 pmap_upvop_maxfree
= pmap_upvop_free
;
2872 pmap_pool_mfree(struct pool
*pp
, void *va
)
2874 struct pvo_page
*pvop
;
2878 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head
, pvop
, pvop_link
);
2880 if (pmap_mpvop_free
> pmap_mpvop_maxfree
)
2881 pmap_mpvop_maxfree
= pmap_mpvop_free
;
2884 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t
) va
));
2889 * This routine in bootstraping to steal to-be-managed memory (which will
2890 * then be unmanaged). We use it to grab from the first 256MB for our
2891 * pmap needs and above 256MB for other stuff.
2894 pmap_steal_memory(vsize_t vsize
, vaddr_t
*vstartp
, vaddr_t
*vendp
)
2900 struct vm_physseg
*ps
;
2902 if (uvm
.page_init_done
== true)
2903 panic("pmap_steal_memory: called _after_ bootstrap");
2905 *vstartp
= VM_MIN_KERNEL_ADDRESS
;
2906 *vendp
= VM_MAX_KERNEL_ADDRESS
;
2908 size
= round_page(vsize
);
2912 * PA 0 will never be among those given to UVM so we can use it
2913 * to indicate we couldn't steal any memory.
2915 for (ps
= vm_physmem
, bank
= 0; bank
< vm_nphysseg
; bank
++, ps
++) {
2916 if (ps
->free_list
== VM_FREELIST_FIRST256
&&
2917 ps
->avail_end
- ps
->avail_start
>= npgs
) {
2918 pa
= ptoa(ps
->avail_start
);
2924 panic("pmap_steal_memory: no approriate memory to steal!");
2926 ps
->avail_start
+= npgs
;
2930 * If we've used up all the pages in the segment, remove it and
2933 if (ps
->avail_start
== ps
->end
) {
2935 * If this was the last one, then a very bad thing has occurred
2937 if (--vm_nphysseg
== 0)
2938 panic("pmap_steal_memory: out of memory!");
2940 printf("pmap_steal_memory: consumed bank %d\n", bank
);
2941 for (; bank
< vm_nphysseg
; bank
++, ps
++) {
2947 memset((void *) va
, 0, size
);
2948 pmap_pages_stolen
+= npgs
;
2950 if (pmapdebug
&& npgs
> 1) {
2952 for (bank
= 0, ps
= vm_physmem
; bank
< vm_nphysseg
; bank
++, ps
++)
2953 cnt
+= ps
->avail_end
- ps
->avail_start
;
2954 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2955 npgs
, pmap_pages_stolen
, cnt
);
2963 * Find a chuck of memory with right size and alignment.
2966 pmap_boot_find_memory(psize_t size
, psize_t alignment
, int at_end
)
2968 struct mem_region
*mp
;
2972 size
= round_page(size
);
2975 ("pmap_boot_find_memory: size=%#" _PRIxpa
", alignment=%#" _PRIxpa
", at_end=%d",
2976 size
, alignment
, at_end
));
2978 if (alignment
< PAGE_SIZE
|| (alignment
& (alignment
-1)) != 0)
2979 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa
,
2983 if (alignment
!= PAGE_SIZE
)
2984 panic("pmap_boot_find_memory: invalid ending "
2985 "alignment %#" _PRIxpa
, alignment
);
2987 for (mp
= &avail
[avail_cnt
-1]; mp
>= avail
; mp
--) {
2988 s
= mp
->start
+ mp
->size
- size
;
2989 if (s
>= mp
->start
&& mp
->size
>= size
) {
2990 DPRINTFN(BOOT
,(": %#" _PRIxpa
"\n", s
));
2992 ("pmap_boot_find_memory: b-avail[%d] start "
2993 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", mp
- avail
,
2994 mp
->start
, mp
->size
));
2997 ("pmap_boot_find_memory: a-avail[%d] start "
2998 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", mp
- avail
,
2999 mp
->start
, mp
->size
));
3003 panic("pmap_boot_find_memory: no available memory");
3006 for (mp
= avail
, i
= 0; i
< avail_cnt
; i
++, mp
++) {
3007 s
= (mp
->start
+ alignment
- 1) & ~(alignment
-1);
3011 * Is the calculated region entirely within the region?
3013 if (s
< mp
->start
|| e
> mp
->start
+ mp
->size
)
3016 DPRINTFN(BOOT
,(": %#" _PRIxpa
"\n", s
));
3017 if (s
== mp
->start
) {
3019 * If the block starts at the beginning of region,
3020 * adjust the size & start. (the region may now be
3024 ("pmap_boot_find_memory: b-avail[%d] start "
3025 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", i
, mp
->start
, mp
->size
));
3029 ("pmap_boot_find_memory: a-avail[%d] start "
3030 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", i
, mp
->start
, mp
->size
));
3031 } else if (e
== mp
->start
+ mp
->size
) {
3033 * If the block starts at the beginning of region,
3034 * adjust only the size.
3037 ("pmap_boot_find_memory: b-avail[%d] start "
3038 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", i
, mp
->start
, mp
->size
));
3041 ("pmap_boot_find_memory: a-avail[%d] start "
3042 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", i
, mp
->start
, mp
->size
));
3045 * Block is in the middle of the region, so we
3046 * have to split it in two.
3048 for (j
= avail_cnt
; j
> i
+ 1; j
--) {
3049 avail
[j
] = avail
[j
-1];
3052 ("pmap_boot_find_memory: b-avail[%d] start "
3053 "%#" _PRIxpa
" size %#" _PRIxpa
"\n", i
, mp
->start
, mp
->size
));
3055 mp
[1].size
= mp
[0].start
+ mp
[0].size
- e
;
3056 mp
[0].size
= s
- mp
[0].start
;
3058 for (; i
< avail_cnt
; i
++) {
3060 ("pmap_boot_find_memory: a-avail[%d] "
3061 "start %#" _PRIxpa
" size %#" _PRIxpa
"\n", i
,
3062 avail
[i
].start
, avail
[i
].size
));
3065 KASSERT(s
== (uintptr_t) s
);
3068 panic("pmap_boot_find_memory: not enough memory for "
3069 "%#" _PRIxpa
"/%#" _PRIxpa
" allocation?", size
, alignment
);
3072 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3073 #if defined (PMAP_OEA64_BRIDGE)
3075 pmap_setup_segment0_map(int use_large_pages
, ...)
3079 register_t pte_lo
= 0x0;
3080 int ptegidx
= 0, i
= 0;
3084 /* Coherent + Supervisor RW, no user access */
3088 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3089 * these have to take priority.
3091 for (va
= 0x0; va
< SEGMENT_LENGTH
; va
+= 0x1000) {
3092 ptegidx
= va_to_pteg(pmap_kernel(), va
);
3093 pmap_pte_create(&pte
, pmap_kernel(), va
, va
| pte_lo
);
3094 i
= pmap_pte_insert(ptegidx
, &pte
);
3097 va_start(ap
, use_large_pages
);
3102 va
= va_arg(ap
, vaddr_t
);
3107 pa
= va_arg(ap
, paddr_t
);
3108 size
= va_arg(ap
, size_t);
3110 for (; va
< (va
+ size
); va
+= 0x1000, pa
+= 0x1000) {
3112 printf("%s: Inserting: va: %#" _PRIxva
", pa: %#" _PRIxpa
"\n", __func__
, va
, pa
);
3114 ptegidx
= va_to_pteg(pmap_kernel(), va
);
3115 pmap_pte_create(&pte
, pmap_kernel(), va
, pa
| pte_lo
);
3116 i
= pmap_pte_insert(ptegidx
, &pte
);
3124 #endif /* PMAP_OEA64_BRIDGE */
3127 * This is not part of the defined PMAP interface and is specific to the
3128 * PowerPC architecture. This is called during initppc, before the system
3129 * is really initialized.
3132 pmap_bootstrap(paddr_t kernelstart
, paddr_t kernelend
)
3134 struct mem_region
*mp
, tmp
;
3142 mem_regions(&mem
, &avail
);
3144 if (pmapdebug
& PMAPDEBUG_BOOT
) {
3145 printf("pmap_bootstrap: memory configuration:\n");
3146 for (mp
= mem
; mp
->size
; mp
++) {
3147 printf("pmap_bootstrap: mem start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3148 mp
->start
, mp
->size
);
3150 for (mp
= avail
; mp
->size
; mp
++) {
3151 printf("pmap_bootstrap: avail start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3152 mp
->start
, mp
->size
);
3158 * Find out how much physical memory we have and in how many chunks.
3160 for (mem_cnt
= 0, mp
= mem
; mp
->size
; mp
++) {
3161 if (mp
->start
>= pmap_memlimit
)
3163 if (mp
->start
+ mp
->size
> pmap_memlimit
) {
3164 size
= pmap_memlimit
- mp
->start
;
3165 physmem
+= btoc(size
);
3167 physmem
+= btoc(mp
->size
);
3173 * Count the number of available entries.
3175 for (avail_cnt
= 0, mp
= avail
; mp
->size
; mp
++)
3179 * Page align all regions.
3181 kernelstart
= trunc_page(kernelstart
);
3182 kernelend
= round_page(kernelend
);
3183 for (mp
= avail
, i
= 0; i
< avail_cnt
; i
++, mp
++) {
3184 s
= round_page(mp
->start
);
3185 mp
->size
-= (s
- mp
->start
);
3186 mp
->size
= trunc_page(mp
->size
);
3188 e
= mp
->start
+ mp
->size
;
3191 ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3192 i
, mp
->start
, mp
->size
));
3195 * Don't allow the end to run beyond our artificial limit
3197 if (e
> pmap_memlimit
)
3201 * Is this region empty or strange? skip it.
3210 * Does this overlap the beginning of kernel?
3211 * Does extend past the end of the kernel?
3213 else if (s
< kernelstart
&& e
> kernelstart
) {
3214 if (e
> kernelend
) {
3215 avail
[avail_cnt
].start
= kernelend
;
3216 avail
[avail_cnt
].size
= e
- kernelend
;
3219 mp
->size
= kernelstart
- s
;
3222 * Check whether this region overlaps the end of the kernel.
3224 else if (s
< kernelend
&& e
> kernelend
) {
3225 mp
->start
= kernelend
;
3226 mp
->size
= e
- kernelend
;
3229 * Look whether this regions is completely inside the kernel.
3230 * Nuke it if it does.
3232 else if (s
>= kernelstart
&& e
<= kernelend
) {
3237 * If the user imposed a memory limit, enforce it.
3239 else if (s
>= pmap_memlimit
) {
3240 mp
->start
= -PAGE_SIZE
; /* let's know why */
3248 ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3249 i
, mp
->start
, mp
->size
));
3253 * Move (and uncount) all the null return to the end.
3255 for (mp
= avail
, i
= 0; i
< avail_cnt
; i
++, mp
++) {
3256 if (mp
->size
== 0) {
3258 avail
[i
] = avail
[--avail_cnt
];
3259 avail
[avail_cnt
] = avail
[i
];
3264 * (Bubble)sort them into ascending order.
3266 for (i
= 0; i
< avail_cnt
; i
++) {
3267 for (j
= i
+ 1; j
< avail_cnt
; j
++) {
3268 if (avail
[i
].start
> avail
[j
].start
) {
3270 avail
[i
] = avail
[j
];
3277 * Make sure they don't overlap.
3279 for (mp
= avail
, i
= 0; i
< avail_cnt
- 1; i
++, mp
++) {
3280 if (mp
[0].start
+ mp
[0].size
> mp
[1].start
) {
3281 mp
[0].size
= mp
[1].start
- mp
[0].start
;
3284 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3285 i
, mp
->start
, mp
->size
));
3288 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa
" size %#" _PRIxpa
"\n",
3289 i
, mp
->start
, mp
->size
));
3292 pmap_pteg_cnt
= PTEGCOUNT
;
3293 #else /* PTEGCOUNT */
3295 pmap_pteg_cnt
= 0x1000;
3297 while (pmap_pteg_cnt
< physmem
)
3298 pmap_pteg_cnt
<<= 1;
3300 pmap_pteg_cnt
>>= 1;
3301 #endif /* PTEGCOUNT */
3305 ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt
));
3309 * Find suitably aligned memory for PTEG hash table.
3311 size
= pmap_pteg_cnt
* sizeof(struct pteg
);
3312 pmap_pteg_table
= (void *)(uintptr_t) pmap_boot_find_memory(size
, size
, 0);
3316 ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt
, (unsigned int)size
, pmap_pteg_table
));
3320 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3321 if ( (uintptr_t) pmap_pteg_table
+ size
> SEGMENT_LENGTH
)
3322 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa
") > 256MB",
3323 pmap_pteg_table
, size
);
3326 memset(__UNVOLATILE(pmap_pteg_table
), 0,
3327 pmap_pteg_cnt
* sizeof(struct pteg
));
3328 pmap_pteg_mask
= pmap_pteg_cnt
- 1;
3331 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3332 * with pages. So we just steal them before giving them to UVM.
3334 size
= sizeof(pmap_pvo_table
[0]) * pmap_pteg_cnt
;
3335 pmap_pvo_table
= (void *)(uintptr_t) pmap_boot_find_memory(size
, PAGE_SIZE
, 0);
3336 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3337 if ( (uintptr_t) pmap_pvo_table
+ size
> SEGMENT_LENGTH
)
3338 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa
") > 256MB",
3339 pmap_pvo_table
, size
);
3342 for (i
= 0; i
< pmap_pteg_cnt
; i
++)
3343 TAILQ_INIT(&pmap_pvo_table
[i
]);
3347 * Allocate msgbuf in high memory.
3349 msgbuf_paddr
= pmap_boot_find_memory(MSGBUFSIZE
, PAGE_SIZE
, 1);
3352 for (mp
= avail
, i
= 0; i
< avail_cnt
; mp
++, i
++) {
3353 paddr_t pfstart
= atop(mp
->start
);
3354 paddr_t pfend
= atop(mp
->start
+ mp
->size
);
3357 if (mp
->start
+ mp
->size
<= SEGMENT_LENGTH
) {
3358 uvm_page_physload(pfstart
, pfend
, pfstart
, pfend
,
3359 VM_FREELIST_FIRST256
);
3360 } else if (mp
->start
>= SEGMENT_LENGTH
) {
3361 uvm_page_physload(pfstart
, pfend
, pfstart
, pfend
,
3362 VM_FREELIST_DEFAULT
);
3364 pfend
= atop(SEGMENT_LENGTH
);
3365 uvm_page_physload(pfstart
, pfend
, pfstart
, pfend
,
3366 VM_FREELIST_FIRST256
);
3367 pfstart
= atop(SEGMENT_LENGTH
);
3368 pfend
= atop(mp
->start
+ mp
->size
);
3369 uvm_page_physload(pfstart
, pfend
, pfstart
, pfend
,
3370 VM_FREELIST_DEFAULT
);
3375 * Make sure kernel vsid is allocated as well as VSID 0.
3377 pmap_vsid_bitmap
[(KERNEL_VSIDBITS
& (NPMAPS
-1)) / VSID_NBPW
]
3378 |= 1 << (KERNEL_VSIDBITS
% VSID_NBPW
);
3379 pmap_vsid_bitmap
[(PHYSMAP_VSIDBITS
& (NPMAPS
-1)) / VSID_NBPW
]
3380 |= 1 << (PHYSMAP_VSIDBITS
% VSID_NBPW
);
3381 pmap_vsid_bitmap
[0] |= 1;
3384 * Initialize kernel pmap and hardware.
3387 /* PMAP_OEA64_BRIDGE does support these instructions */
3388 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
3389 for (i
= 0; i
< 16; i
++) {
3390 pmap_kernel()->pm_sr
[i
] = KERNELN_SEGMENT(i
)|SR_PRKEY
;
3391 __asm
volatile ("mtsrin %0,%1"
3392 :: "r"(KERNELN_SEGMENT(i
)|SR_PRKEY
), "r"(i
<< ADDR_SR_SHFT
));
3395 pmap_kernel()->pm_sr
[KERNEL_SR
] = KERNEL_SEGMENT
|SR_SUKEY
|SR_PRKEY
;
3396 __asm
volatile ("mtsr %0,%1"
3397 :: "n"(KERNEL_SR
), "r"(KERNEL_SEGMENT
));
3399 pmap_kernel()->pm_sr
[KERNEL2_SR
] = KERNEL2_SEGMENT
|SR_SUKEY
|SR_PRKEY
;
3400 __asm
volatile ("mtsr %0,%1"
3401 :: "n"(KERNEL2_SR
), "r"(KERNEL2_SEGMENT
));
3403 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3404 #if defined (PMAP_OEA)
3405 for (i
= 0; i
< 16; i
++) {
3406 if (iosrtable
[i
] & SR601_T
) {
3407 pmap_kernel()->pm_sr
[i
] = iosrtable
[i
];
3408 __asm
volatile ("mtsrin %0,%1"
3409 :: "r"(iosrtable
[i
]), "r"(i
<< ADDR_SR_SHFT
));
3412 __asm
volatile ("sync; mtsdr1 %0; isync"
3413 :: "r"((uintptr_t)pmap_pteg_table
| (pmap_pteg_mask
>> 10)));
3414 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
3415 __asm
__volatile ("sync; mtsdr1 %0; isync"
3416 :: "r"((uintptr_t)pmap_pteg_table
| (32 - cntlzw(pmap_pteg_mask
>> 11))));
3421 pmap_use_altivec
= cpu_altivec
;
3425 if (pmapdebug
& PMAPDEBUG_BOOT
) {
3429 for (cnt
= 0, bank
= 0; bank
< vm_nphysseg
; bank
++) {
3430 cnt
+= vm_physmem
[bank
].avail_end
- vm_physmem
[bank
].avail_start
;
3431 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa
"-%#" _PRIxpa
"/%#" _PRIxpa
"\n",
3433 ptoa(vm_physmem
[bank
].avail_start
),
3434 ptoa(vm_physmem
[bank
].avail_end
),
3435 ptoa(vm_physmem
[bank
].avail_end
- vm_physmem
[bank
].avail_start
));
3437 format_bytes(pbuf
, sizeof(pbuf
), ptoa((u_int64_t
) cnt
));
3438 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3443 pool_init(&pmap_upvo_pool
, sizeof(struct pvo_entry
),
3444 sizeof(struct pvo_entry
), 0, 0, "pmap_upvopl",
3445 &pmap_pool_uallocator
, IPL_VM
);
3447 pool_setlowat(&pmap_upvo_pool
, 252);
3449 pool_init(&pmap_pool
, sizeof(struct pmap
),
3450 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator
,
3453 #if defined(PMAP_NEED_MAPKERNEL) || 1
3455 struct pmap
*pm
= pmap_kernel();
3456 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3457 extern int etext
[], kernel_text
[];
3458 vaddr_t va
, va_etext
= (paddr_t
) etext
;
3463 unsigned int ptegidx
;
3466 sr
= PHYSMAPN_SEGMENT(0) | SR_SUKEY
|SR_PRKEY
;
3469 for (bank
= 0; bank
< vm_nphysseg
; bank
++) {
3470 pa_end
= ptoa(vm_physmem
[bank
].avail_end
);
3471 pa
= ptoa(vm_physmem
[bank
].avail_start
);
3472 for (; pa
< pa_end
; pa
+= PAGE_SIZE
) {
3473 ptegidx
= va_to_pteg(pm
, pa
);
3474 pmap_pte_create(&pt
, pm
, pa
, pa
| PTE_M
|PTE_BW
);
3475 pmap_pte_insert(ptegidx
, &pt
);
3479 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3480 va
= (vaddr_t
) kernel_text
;
3482 for (pa
= kernelstart
; va
< va_etext
;
3483 pa
+= PAGE_SIZE
, va
+= PAGE_SIZE
) {
3484 ptegidx
= va_to_pteg(pm
, va
);
3485 pmap_pte_create(&pt
, pm
, va
, pa
| PTE_M
|PTE_BR
);
3486 pmap_pte_insert(ptegidx
, &pt
);
3489 for (; pa
< kernelend
;
3490 pa
+= PAGE_SIZE
, va
+= PAGE_SIZE
) {
3491 ptegidx
= va_to_pteg(pm
, va
);
3492 pmap_pte_create(&pt
, pm
, va
, pa
| PTE_M
|PTE_BW
);
3493 pmap_pte_insert(ptegidx
, &pt
);
3496 for (va
= 0, pa
= 0; va
< kernelstart
;
3497 pa
+= PAGE_SIZE
, va
+= PAGE_SIZE
) {
3498 ptegidx
= va_to_pteg(pm
, va
);
3500 pmap_pte_create(&pt
, pm
, va
, pa
| PTE_M
|PTE_BR
);
3502 pmap_pte_create(&pt
, pm
, va
, pa
| PTE_M
|PTE_BW
);
3503 pmap_pte_insert(ptegidx
, &pt
);
3505 for (va
= kernelend
, pa
= kernelend
; va
< SEGMENT_LENGTH
;
3506 pa
+= PAGE_SIZE
, va
+= PAGE_SIZE
) {
3507 ptegidx
= va_to_pteg(pm
, va
);
3508 pmap_pte_create(&pt
, pm
, va
, pa
| PTE_M
|PTE_BW
);
3509 pmap_pte_insert(ptegidx
, &pt
);
3513 __asm
volatile ("mtsrin %0,%1"
3514 :: "r"(sr
), "r"(kernelstart
));