Expand PMF_FN_* macros.
[netbsd-mini2440.git] / sys / arch / powerpc / oea / pmap.c
blobca7a76775b5081fd580a54fd400273ec175dc2d0
1 /* $NetBSD: pmap.c,v 1.68 2009/11/07 07:27:46 cegger Exp $ */
2 /*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved.
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10 * of Kyma Systems LLC.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
36 * Copyright (C) 1995, 1996 TooLs GmbH.
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by TooLs GmbH.
50 * 4. The name of TooLs GmbH may not be used to endorse or promote products
51 * derived from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 #include <sys/cdefs.h>
66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.68 2009/11/07 07:27:46 cegger Exp $");
68 #define PMAP_NOOPNAMES
70 #include "opt_ppcarch.h"
71 #include "opt_altivec.h"
72 #include "opt_multiprocessor.h"
73 #include "opt_pmap.h"
75 #include <sys/param.h>
76 #include <sys/malloc.h>
77 #include <sys/proc.h>
78 #include <sys/pool.h>
79 #include <sys/queue.h>
80 #include <sys/device.h> /* for evcnt */
81 #include <sys/systm.h>
82 #include <sys/atomic.h>
84 #include <uvm/uvm.h>
86 #include <machine/pcb.h>
87 #include <machine/powerpc.h>
88 #include <powerpc/spr.h>
89 #include <powerpc/oea/sr_601.h>
90 #include <powerpc/bat.h>
91 #include <powerpc/stdarg.h>
93 #ifdef ALTIVEC
94 int pmap_use_altivec;
95 #endif
97 volatile struct pteg *pmap_pteg_table;
98 unsigned int pmap_pteg_cnt;
99 unsigned int pmap_pteg_mask;
100 #ifdef PMAP_MEMLIMIT
101 static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
102 #else
103 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */
104 #endif
106 struct pmap kernel_pmap_;
107 unsigned int pmap_pages_stolen;
108 u_long pmap_pte_valid;
109 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
110 u_long pmap_pvo_enter_depth;
111 u_long pmap_pvo_remove_depth;
112 #endif
114 #ifndef MSGBUFADDR
115 extern paddr_t msgbuf_paddr;
116 #endif
118 static struct mem_region *mem, *avail;
119 static u_int mem_cnt, avail_cnt;
121 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
122 # define PMAP_OEA 1
123 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA64) && !defined(PPC_OEA64_BRIDGE)
124 # define PMAPNAME(name) pmap_##name
125 # endif
126 #endif
128 #if defined(PMAP_OEA64)
129 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64_BRIDGE)
130 # define PMAPNAME(name) pmap_##name
131 # endif
132 #endif
134 #if defined(PMAP_OEA64_BRIDGE)
135 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64)
136 # define PMAPNAME(name) pmap_##name
137 # endif
138 #endif
140 #if defined(PMAP_OEA)
141 #define _PRIxpte "lx"
142 #else
143 #define _PRIxpte PRIx64
144 #endif
145 #define _PRIxpa "lx"
146 #define _PRIxva "lx"
147 #define _PRIsr "lx"
149 #if defined(PMAP_EXCLUDE_DECLS) && !defined(PMAPNAME)
150 #if defined(PMAP_OEA)
151 #define PMAPNAME(name) pmap32_##name
152 #elif defined(PMAP_OEA64)
153 #define PMAPNAME(name) pmap64_##name
154 #elif defined(PMAP_OEA64_BRIDGE)
155 #define PMAPNAME(name) pmap64bridge_##name
156 #else
157 #error unknown variant for pmap
158 #endif
159 #endif /* PMAP_EXLCUDE_DECLS && !PMAPNAME */
161 #if defined(PMAPNAME)
162 #define STATIC static
163 #define pmap_pte_spill PMAPNAME(pte_spill)
164 #define pmap_real_memory PMAPNAME(real_memory)
165 #define pmap_init PMAPNAME(init)
166 #define pmap_virtual_space PMAPNAME(virtual_space)
167 #define pmap_create PMAPNAME(create)
168 #define pmap_reference PMAPNAME(reference)
169 #define pmap_destroy PMAPNAME(destroy)
170 #define pmap_copy PMAPNAME(copy)
171 #define pmap_update PMAPNAME(update)
172 #define pmap_enter PMAPNAME(enter)
173 #define pmap_remove PMAPNAME(remove)
174 #define pmap_kenter_pa PMAPNAME(kenter_pa)
175 #define pmap_kremove PMAPNAME(kremove)
176 #define pmap_extract PMAPNAME(extract)
177 #define pmap_protect PMAPNAME(protect)
178 #define pmap_unwire PMAPNAME(unwire)
179 #define pmap_page_protect PMAPNAME(page_protect)
180 #define pmap_query_bit PMAPNAME(query_bit)
181 #define pmap_clear_bit PMAPNAME(clear_bit)
183 #define pmap_activate PMAPNAME(activate)
184 #define pmap_deactivate PMAPNAME(deactivate)
186 #define pmap_pinit PMAPNAME(pinit)
187 #define pmap_procwr PMAPNAME(procwr)
189 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
190 #define pmap_pte_print PMAPNAME(pte_print)
191 #define pmap_pteg_check PMAPNAME(pteg_check)
192 #define pmap_print_mmruregs PMAPNAME(print_mmuregs)
193 #define pmap_print_pte PMAPNAME(print_pte)
194 #define pmap_pteg_dist PMAPNAME(pteg_dist)
195 #endif
196 #if defined(DEBUG) || defined(PMAPCHECK)
197 #define pmap_pvo_verify PMAPNAME(pvo_verify)
198 #define pmapcheck PMAPNAME(check)
199 #endif
200 #if defined(DEBUG) || defined(PMAPDEBUG)
201 #define pmapdebug PMAPNAME(debug)
202 #endif
203 #define pmap_steal_memory PMAPNAME(steal_memory)
204 #define pmap_bootstrap PMAPNAME(bootstrap)
205 #else
206 #define STATIC /* nothing */
207 #endif /* PMAPNAME */
209 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
210 STATIC void pmap_real_memory(paddr_t *, psize_t *);
211 STATIC void pmap_init(void);
212 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
213 STATIC pmap_t pmap_create(void);
214 STATIC void pmap_reference(pmap_t);
215 STATIC void pmap_destroy(pmap_t);
216 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
217 STATIC void pmap_update(pmap_t);
218 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
219 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
220 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
221 STATIC void pmap_kremove(vaddr_t, vsize_t);
222 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
224 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
225 STATIC void pmap_unwire(pmap_t, vaddr_t);
226 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
227 STATIC bool pmap_query_bit(struct vm_page *, int);
228 STATIC bool pmap_clear_bit(struct vm_page *, int);
230 STATIC void pmap_activate(struct lwp *);
231 STATIC void pmap_deactivate(struct lwp *);
233 STATIC void pmap_pinit(pmap_t pm);
234 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
236 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
237 STATIC void pmap_pte_print(volatile struct pte *);
238 STATIC void pmap_pteg_check(void);
239 STATIC void pmap_print_mmuregs(void);
240 STATIC void pmap_print_pte(pmap_t, vaddr_t);
241 STATIC void pmap_pteg_dist(void);
242 #endif
243 #if defined(DEBUG) || defined(PMAPCHECK)
244 STATIC void pmap_pvo_verify(void);
245 #endif
246 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
247 STATIC void pmap_bootstrap(paddr_t, paddr_t);
249 #ifdef PMAPNAME
250 const struct pmap_ops PMAPNAME(ops) = {
251 .pmapop_pte_spill = pmap_pte_spill,
252 .pmapop_real_memory = pmap_real_memory,
253 .pmapop_init = pmap_init,
254 .pmapop_virtual_space = pmap_virtual_space,
255 .pmapop_create = pmap_create,
256 .pmapop_reference = pmap_reference,
257 .pmapop_destroy = pmap_destroy,
258 .pmapop_copy = pmap_copy,
259 .pmapop_update = pmap_update,
260 .pmapop_enter = pmap_enter,
261 .pmapop_remove = pmap_remove,
262 .pmapop_kenter_pa = pmap_kenter_pa,
263 .pmapop_kremove = pmap_kremove,
264 .pmapop_extract = pmap_extract,
265 .pmapop_protect = pmap_protect,
266 .pmapop_unwire = pmap_unwire,
267 .pmapop_page_protect = pmap_page_protect,
268 .pmapop_query_bit = pmap_query_bit,
269 .pmapop_clear_bit = pmap_clear_bit,
270 .pmapop_activate = pmap_activate,
271 .pmapop_deactivate = pmap_deactivate,
272 .pmapop_pinit = pmap_pinit,
273 .pmapop_procwr = pmap_procwr,
274 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
275 .pmapop_pte_print = pmap_pte_print,
276 .pmapop_pteg_check = pmap_pteg_check,
277 .pmapop_print_mmuregs = pmap_print_mmuregs,
278 .pmapop_print_pte = pmap_print_pte,
279 .pmapop_pteg_dist = pmap_pteg_dist,
280 #else
281 .pmapop_pte_print = NULL,
282 .pmapop_pteg_check = NULL,
283 .pmapop_print_mmuregs = NULL,
284 .pmapop_print_pte = NULL,
285 .pmapop_pteg_dist = NULL,
286 #endif
287 #if defined(DEBUG) || defined(PMAPCHECK)
288 .pmapop_pvo_verify = pmap_pvo_verify,
289 #else
290 .pmapop_pvo_verify = NULL,
291 #endif
292 .pmapop_steal_memory = pmap_steal_memory,
293 .pmapop_bootstrap = pmap_bootstrap,
295 #endif /* !PMAPNAME */
298 * The following structure is aligned to 32 bytes
300 struct pvo_entry {
301 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */
302 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */
303 struct pte pvo_pte; /* Prebuilt PTE */
304 pmap_t pvo_pmap; /* ptr to owning pmap */
305 vaddr_t pvo_vaddr; /* VA of entry */
306 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */
307 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */
308 #define PVO_WIRED 0x0010 /* PVO entry is wired */
309 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */
310 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */
311 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED)
312 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED)
313 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
314 #define PVO_ENTER_INSERT 0 /* PVO has been removed */
315 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */
316 #define PVO_SPILL_SET 2 /* PVO has been spilled */
317 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */
318 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */
319 #define PVO_PMAP_PROTECT 5 /* PVO has changed */
320 #define PVO_REMOVE 6 /* PVO has been removed */
321 #define PVO_WHERE_MASK 15
322 #define PVO_WHERE_SHFT 8
323 } __attribute__ ((aligned (32)));
324 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
325 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
326 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
327 #define PVO_PTEGIDX_CLR(pvo) \
328 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
329 #define PVO_PTEGIDX_SET(pvo,i) \
330 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
331 #define PVO_WHERE(pvo,w) \
332 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
333 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
335 TAILQ_HEAD(pvo_tqhead, pvo_entry);
336 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
337 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
338 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
340 struct pool pmap_pool; /* pool for pmap structures */
341 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */
342 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */
345 * We keep a cache of unmanaged pages to be used for pvo entries for
346 * unmanaged pages.
348 struct pvo_page {
349 SIMPLEQ_ENTRY(pvo_page) pvop_link;
351 SIMPLEQ_HEAD(pvop_head, pvo_page);
352 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
353 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
354 u_long pmap_upvop_free;
355 u_long pmap_upvop_maxfree;
356 u_long pmap_mpvop_free;
357 u_long pmap_mpvop_maxfree;
359 static void *pmap_pool_ualloc(struct pool *, int);
360 static void *pmap_pool_malloc(struct pool *, int);
362 static void pmap_pool_ufree(struct pool *, void *);
363 static void pmap_pool_mfree(struct pool *, void *);
365 static struct pool_allocator pmap_pool_mallocator = {
366 .pa_alloc = pmap_pool_malloc,
367 .pa_free = pmap_pool_mfree,
368 .pa_pagesz = 0,
371 static struct pool_allocator pmap_pool_uallocator = {
372 .pa_alloc = pmap_pool_ualloc,
373 .pa_free = pmap_pool_ufree,
374 .pa_pagesz = 0,
377 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
378 void pmap_pte_print(volatile struct pte *);
379 void pmap_pteg_check(void);
380 void pmap_pteg_dist(void);
381 void pmap_print_pte(pmap_t, vaddr_t);
382 void pmap_print_mmuregs(void);
383 #endif
385 #if defined(DEBUG) || defined(PMAPCHECK)
386 #ifdef PMAPCHECK
387 int pmapcheck = 1;
388 #else
389 int pmapcheck = 0;
390 #endif
391 void pmap_pvo_verify(void);
392 static void pmap_pvo_check(const struct pvo_entry *);
393 #define PMAP_PVO_CHECK(pvo) \
394 do { \
395 if (pmapcheck) \
396 pmap_pvo_check(pvo); \
397 } while (0)
398 #else
399 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0)
400 #endif
401 static int pmap_pte_insert(int, struct pte *);
402 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
403 vaddr_t, paddr_t, register_t, int);
404 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
405 static void pmap_pvo_free(struct pvo_entry *);
406 static void pmap_pvo_free_list(struct pvo_head *);
407 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
408 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
409 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
410 static void pvo_set_exec(struct pvo_entry *);
411 static void pvo_clear_exec(struct pvo_entry *);
413 static void tlbia(void);
415 static void pmap_release(pmap_t);
416 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
418 static uint32_t pmap_pvo_reclaim_nextidx;
419 #ifdef DEBUG
420 static int pmap_pvo_reclaim_debugctr;
421 #endif
423 #define VSID_NBPW (sizeof(uint32_t) * 8)
424 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
426 static int pmap_initialized;
428 #if defined(DEBUG) || defined(PMAPDEBUG)
429 #define PMAPDEBUG_BOOT 0x0001
430 #define PMAPDEBUG_PTE 0x0002
431 #define PMAPDEBUG_EXEC 0x0008
432 #define PMAPDEBUG_PVOENTER 0x0010
433 #define PMAPDEBUG_PVOREMOVE 0x0020
434 #define PMAPDEBUG_ACTIVATE 0x0100
435 #define PMAPDEBUG_CREATE 0x0200
436 #define PMAPDEBUG_ENTER 0x1000
437 #define PMAPDEBUG_KENTER 0x2000
438 #define PMAPDEBUG_KREMOVE 0x4000
439 #define PMAPDEBUG_REMOVE 0x8000
441 unsigned int pmapdebug = 0;
443 # define DPRINTF(x) printf x
444 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x
445 #else
446 # define DPRINTF(x)
447 # define DPRINTFN(n, x)
448 #endif
451 #ifdef PMAPCOUNTERS
453 * From pmap_subr.c
455 extern struct evcnt pmap_evcnt_mappings;
456 extern struct evcnt pmap_evcnt_unmappings;
458 extern struct evcnt pmap_evcnt_kernel_mappings;
459 extern struct evcnt pmap_evcnt_kernel_unmappings;
461 extern struct evcnt pmap_evcnt_mappings_replaced;
463 extern struct evcnt pmap_evcnt_exec_mappings;
464 extern struct evcnt pmap_evcnt_exec_cached;
466 extern struct evcnt pmap_evcnt_exec_synced;
467 extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
468 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
470 extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
471 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
472 extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
473 extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
474 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
476 extern struct evcnt pmap_evcnt_updates;
477 extern struct evcnt pmap_evcnt_collects;
478 extern struct evcnt pmap_evcnt_copies;
480 extern struct evcnt pmap_evcnt_ptes_spilled;
481 extern struct evcnt pmap_evcnt_ptes_unspilled;
482 extern struct evcnt pmap_evcnt_ptes_evicted;
484 extern struct evcnt pmap_evcnt_ptes_primary[8];
485 extern struct evcnt pmap_evcnt_ptes_secondary[8];
486 extern struct evcnt pmap_evcnt_ptes_removed;
487 extern struct evcnt pmap_evcnt_ptes_changed;
488 extern struct evcnt pmap_evcnt_pvos_reclaimed;
489 extern struct evcnt pmap_evcnt_pvos_failed;
491 extern struct evcnt pmap_evcnt_zeroed_pages;
492 extern struct evcnt pmap_evcnt_copied_pages;
493 extern struct evcnt pmap_evcnt_idlezeroed_pages;
495 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++)
496 #define PMAPCOUNT2(ev) ((ev).ev_count++)
497 #else
498 #define PMAPCOUNT(ev) ((void) 0)
499 #define PMAPCOUNT2(ev) ((void) 0)
500 #endif
502 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va))
504 /* XXXSL: this needs to be moved to assembler */
505 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va))
507 #define TLBSYNC() __asm volatile("tlbsync")
508 #define SYNC() __asm volatile("sync")
509 #define EIEIO() __asm volatile("eieio")
510 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va))
511 #define MFMSR() mfmsr()
512 #define MTMSR(psl) mtmsr(psl)
513 #define MFPVR() mfpvr()
514 #define MFSRIN(va) mfsrin(va)
515 #define MFTB() mfrtcltbl()
517 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
518 static inline register_t
519 mfsrin(vaddr_t va)
521 register_t sr;
522 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
523 return sr;
525 #endif /* PMAP_OEA*/
527 #if defined (PMAP_OEA64_BRIDGE)
528 extern void mfmsr64 (register64_t *result);
529 #endif /* PMAP_OEA64_BRIDGE */
531 #define PMAP_LOCK() KERNEL_LOCK(1, NULL)
532 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL)
534 static inline register_t
535 pmap_interrupts_off(void)
537 register_t msr = MFMSR();
538 if (msr & PSL_EE)
539 MTMSR(msr & ~PSL_EE);
540 return msr;
543 static void
544 pmap_interrupts_restore(register_t msr)
546 if (msr & PSL_EE)
547 MTMSR(msr);
550 static inline u_int32_t
551 mfrtcltbl(void)
553 #ifdef PPC_OEA601
554 if ((MFPVR() >> 16) == MPC601)
555 return (mfrtcl() >> 7);
556 else
557 #endif
558 return (mftbl());
562 * These small routines may have to be replaced,
563 * if/when we support processors other that the 604.
566 void
567 tlbia(void)
569 char *i;
571 SYNC();
572 #if defined(PMAP_OEA)
574 * Why not use "tlbia"? Because not all processors implement it.
576 * This needs to be a per-CPU callback to do the appropriate thing
577 * for the CPU. XXX
579 for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
580 TLBIE(i);
581 EIEIO();
582 SYNC();
584 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
585 /* This is specifically for the 970, 970UM v1.6 pp. 140. */
586 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
587 TLBIEL(i);
588 EIEIO();
589 SYNC();
591 #endif
592 TLBSYNC();
593 SYNC();
596 static inline register_t
597 va_to_vsid(const struct pmap *pm, vaddr_t addr)
599 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
600 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
601 #else /* PMAP_OEA64 */
602 #if 0
603 const struct ste *ste;
604 register_t hash;
605 int i;
607 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
610 * Try the primary group first
612 ste = pm->pm_stes[hash].stes;
613 for (i = 0; i < 8; i++, ste++) {
614 if (ste->ste_hi & STE_V) &&
615 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
616 return ste;
620 * Then the secondary group.
622 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
623 for (i = 0; i < 8; i++, ste++) {
624 if (ste->ste_hi & STE_V) &&
625 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
626 return addr;
629 return NULL;
630 #else
632 * Rather than searching the STE groups for the VSID, we know
633 * how we generate that from the ESID and so do that.
635 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
636 #endif
637 #endif /* PMAP_OEA */
640 static inline register_t
641 va_to_pteg(const struct pmap *pm, vaddr_t addr)
643 register_t hash;
645 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
646 return hash & pmap_pteg_mask;
649 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
651 * Given a PTE in the page table, calculate the VADDR that hashes to it.
652 * The only bit of magic is that the top 4 bits of the address doesn't
653 * technically exist in the PTE. But we know we reserved 4 bits of the
654 * VSID for it so that's how we get it.
656 static vaddr_t
657 pmap_pte_to_va(volatile const struct pte *pt)
659 vaddr_t va;
660 uintptr_t ptaddr = (uintptr_t) pt;
662 if (pt->pte_hi & PTE_HID)
663 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
665 /* PPC Bits 10-19 PPC64 Bits 42-51 */
666 #if defined(PMAP_OEA)
667 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
668 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
669 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
670 #endif
671 va <<= ADDR_PIDX_SHFT;
673 /* PPC Bits 4-9 PPC64 Bits 36-41 */
674 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
676 #if defined(PMAP_OEA64)
677 /* PPC63 Bits 0-35 */
678 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
679 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
680 /* PPC Bits 0-3 */
681 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
682 #endif
684 return va;
686 #endif
688 static inline struct pvo_head *
689 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
691 struct vm_page *pg;
693 pg = PHYS_TO_VM_PAGE(pa);
694 if (pg_p != NULL)
695 *pg_p = pg;
696 if (pg == NULL)
697 return &pmap_pvo_unmanaged;
698 return &pg->mdpage.mdpg_pvoh;
701 static inline struct pvo_head *
702 vm_page_to_pvoh(struct vm_page *pg)
704 return &pg->mdpage.mdpg_pvoh;
708 static inline void
709 pmap_attr_clear(struct vm_page *pg, int ptebit)
711 pg->mdpage.mdpg_attrs &= ~ptebit;
714 static inline int
715 pmap_attr_fetch(struct vm_page *pg)
717 return pg->mdpage.mdpg_attrs;
720 static inline void
721 pmap_attr_save(struct vm_page *pg, int ptebit)
723 pg->mdpage.mdpg_attrs |= ptebit;
726 static inline int
727 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
729 if (pt->pte_hi == pvo_pt->pte_hi
730 #if 0
731 && ((pt->pte_lo ^ pvo_pt->pte_lo) &
732 ~(PTE_REF|PTE_CHG)) == 0
733 #endif
735 return 1;
736 return 0;
739 static inline void
740 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
743 * Construct the PTE. Default to IMB initially. Valid bit
744 * only gets set when the real pte is set in memory.
746 * Note: Don't set the valid bit for correct operation of tlb update.
748 #if defined(PMAP_OEA)
749 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
750 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
751 pt->pte_lo = pte_lo;
752 #elif defined (PMAP_OEA64_BRIDGE)
753 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
754 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
755 pt->pte_lo = (u_int64_t) pte_lo;
756 #elif defined (PMAP_OEA64)
757 #error PMAP_OEA64 not supported
758 #endif /* PMAP_OEA */
761 static inline void
762 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
764 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
767 static inline void
768 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
771 * As shown in Section 7.6.3.2.3
773 pt->pte_lo &= ~ptebit;
774 TLBIE(va);
775 SYNC();
776 EIEIO();
777 TLBSYNC();
778 SYNC();
779 #ifdef MULTIPROCESSOR
780 DCBST(pt);
781 #endif
784 static inline void
785 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
787 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
788 if (pvo_pt->pte_hi & PTE_VALID)
789 panic("pte_set: setting an already valid pte %p", pvo_pt);
790 #endif
791 pvo_pt->pte_hi |= PTE_VALID;
794 * Update the PTE as defined in section 7.6.3.1
795 * Note that the REF/CHG bits are from pvo_pt and thus should
796 * have been saved so this routine can restore them (if desired).
798 pt->pte_lo = pvo_pt->pte_lo;
799 EIEIO();
800 pt->pte_hi = pvo_pt->pte_hi;
801 TLBSYNC();
802 SYNC();
803 #ifdef MULTIPROCESSOR
804 DCBST(pt);
805 #endif
806 pmap_pte_valid++;
809 static inline void
810 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
812 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
813 if ((pvo_pt->pte_hi & PTE_VALID) == 0)
814 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
815 if ((pt->pte_hi & PTE_VALID) == 0)
816 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
817 #endif
819 pvo_pt->pte_hi &= ~PTE_VALID;
821 * Force the ref & chg bits back into the PTEs.
823 SYNC();
825 * Invalidate the pte ... (Section 7.6.3.3)
827 pt->pte_hi &= ~PTE_VALID;
828 SYNC();
829 TLBIE(va);
830 SYNC();
831 EIEIO();
832 TLBSYNC();
833 SYNC();
835 * Save the ref & chg bits ...
837 pmap_pte_synch(pt, pvo_pt);
838 pmap_pte_valid--;
841 static inline void
842 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
845 * Invalidate the PTE
847 pmap_pte_unset(pt, pvo_pt, va);
848 pmap_pte_set(pt, pvo_pt);
852 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
853 * (either primary or secondary location).
855 * Note: both the destination and source PTEs must not have PTE_VALID set.
858 static int
859 pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
861 volatile struct pte *pt;
862 int i;
864 #if defined(DEBUG)
865 DPRINTFN(PTE, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
866 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
867 #endif
869 * First try primary hash.
871 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
872 if ((pt->pte_hi & PTE_VALID) == 0) {
873 pvo_pt->pte_hi &= ~PTE_HID;
874 pmap_pte_set(pt, pvo_pt);
875 return i;
880 * Now try secondary hash.
882 ptegidx ^= pmap_pteg_mask;
883 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
884 if ((pt->pte_hi & PTE_VALID) == 0) {
885 pvo_pt->pte_hi |= PTE_HID;
886 pmap_pte_set(pt, pvo_pt);
887 return i;
890 return -1;
894 * Spill handler.
896 * Tries to spill a page table entry from the overflow area.
897 * This runs in either real mode (if dealing with a exception spill)
898 * or virtual mode when dealing with manually spilling one of the
899 * kernel's pte entries. In either case, interrupts are already
900 * disabled.
904 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
906 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
907 struct pvo_entry *pvo;
908 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
909 struct pvo_tqhead *pvoh, *vpvoh = NULL;
910 int ptegidx, i, j;
911 volatile struct pteg *pteg;
912 volatile struct pte *pt;
914 PMAP_LOCK();
916 ptegidx = va_to_pteg(pm, addr);
919 * Have to substitute some entry. Use the primary hash for this.
920 * Use low bits of timebase as random generator. Make sure we are
921 * not picking a kernel pte for replacement.
923 pteg = &pmap_pteg_table[ptegidx];
924 i = MFTB() & 7;
925 for (j = 0; j < 8; j++) {
926 pt = &pteg->pt[i];
927 if ((pt->pte_hi & PTE_VALID) == 0)
928 break;
929 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
930 < PHYSMAP_VSIDBITS)
931 break;
932 i = (i + 1) & 7;
934 KASSERT(j < 8);
936 source_pvo = NULL;
937 victim_pvo = NULL;
938 pvoh = &pmap_pvo_table[ptegidx];
939 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
942 * We need to find pvo entry for this address...
944 PMAP_PVO_CHECK(pvo); /* sanity check */
947 * If we haven't found the source and we come to a PVO with
948 * a valid PTE, then we know we can't find it because all
949 * evicted PVOs always are first in the list.
951 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
952 break;
953 if (source_pvo == NULL && pm == pvo->pvo_pmap &&
954 addr == PVO_VADDR(pvo)) {
957 * Now we have found the entry to be spilled into the
958 * pteg. Attempt to insert it into the page table.
960 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
961 if (j >= 0) {
962 PVO_PTEGIDX_SET(pvo, j);
963 PMAP_PVO_CHECK(pvo); /* sanity check */
964 PVO_WHERE(pvo, SPILL_INSERT);
965 pvo->pvo_pmap->pm_evictions--;
966 PMAPCOUNT(ptes_spilled);
967 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
968 ? pmap_evcnt_ptes_secondary
969 : pmap_evcnt_ptes_primary)[j]);
972 * Since we keep the evicted entries at the
973 * from of the PVO list, we need move this
974 * (now resident) PVO after the evicted
975 * entries.
977 next_pvo = TAILQ_NEXT(pvo, pvo_olink);
980 * If we don't have to move (either we were the
981 * last entry or the next entry was valid),
982 * don't change our position. Otherwise
983 * move ourselves to the tail of the queue.
985 if (next_pvo != NULL &&
986 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
987 TAILQ_REMOVE(pvoh, pvo, pvo_olink);
988 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
990 PMAP_UNLOCK();
991 return 1;
993 source_pvo = pvo;
994 if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
995 return 0;
997 if (victim_pvo != NULL)
998 break;
1002 * We also need the pvo entry of the victim we are replacing
1003 * so save the R & C bits of the PTE.
1005 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
1006 pmap_pte_compare(pt, &pvo->pvo_pte)) {
1007 vpvoh = pvoh; /* *1* */
1008 victim_pvo = pvo;
1009 if (source_pvo != NULL)
1010 break;
1014 if (source_pvo == NULL) {
1015 PMAPCOUNT(ptes_unspilled);
1016 PMAP_UNLOCK();
1017 return 0;
1020 if (victim_pvo == NULL) {
1021 if ((pt->pte_hi & PTE_HID) == 0)
1022 panic("pmap_pte_spill: victim p-pte (%p) has "
1023 "no pvo entry!", pt);
1026 * If this is a secondary PTE, we need to search
1027 * its primary pvo bucket for the matching PVO.
1029 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1030 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
1031 PMAP_PVO_CHECK(pvo); /* sanity check */
1034 * We also need the pvo entry of the victim we are
1035 * replacing so save the R & C bits of the PTE.
1037 if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
1038 victim_pvo = pvo;
1039 break;
1042 if (victim_pvo == NULL)
1043 panic("pmap_pte_spill: victim s-pte (%p) has "
1044 "no pvo entry!", pt);
1048 * The victim should be not be a kernel PVO/PTE entry.
1050 KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
1051 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
1052 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
1055 * We are invalidating the TLB entry for the EA for the
1056 * we are replacing even though its valid; If we don't
1057 * we lose any ref/chg bit changes contained in the TLB
1058 * entry.
1060 source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
1063 * To enforce the PVO list ordering constraint that all
1064 * evicted entries should come before all valid entries,
1065 * move the source PVO to the tail of its list and the
1066 * victim PVO to the head of its list (which might not be
1067 * the same list, if the victim was using the secondary hash).
1069 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
1070 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
1071 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
1072 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
1073 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
1074 pmap_pte_set(pt, &source_pvo->pvo_pte);
1075 victim_pvo->pvo_pmap->pm_evictions++;
1076 source_pvo->pvo_pmap->pm_evictions--;
1077 PVO_WHERE(victim_pvo, SPILL_UNSET);
1078 PVO_WHERE(source_pvo, SPILL_SET);
1080 PVO_PTEGIDX_CLR(victim_pvo);
1081 PVO_PTEGIDX_SET(source_pvo, i);
1082 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
1083 PMAPCOUNT(ptes_spilled);
1084 PMAPCOUNT(ptes_evicted);
1085 PMAPCOUNT(ptes_removed);
1087 PMAP_PVO_CHECK(victim_pvo);
1088 PMAP_PVO_CHECK(source_pvo);
1090 PMAP_UNLOCK();
1091 return 1;
1095 * Restrict given range to physical memory
1097 void
1098 pmap_real_memory(paddr_t *start, psize_t *size)
1100 struct mem_region *mp;
1102 for (mp = mem; mp->size; mp++) {
1103 if (*start + *size > mp->start
1104 && *start < mp->start + mp->size) {
1105 if (*start < mp->start) {
1106 *size -= mp->start - *start;
1107 *start = mp->start;
1109 if (*start + *size > mp->start + mp->size)
1110 *size = mp->start + mp->size - *start;
1111 return;
1114 *size = 0;
1118 * Initialize anything else for pmap handling.
1119 * Called during vm_init().
1121 void
1122 pmap_init(void)
1124 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
1125 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
1126 &pmap_pool_mallocator, IPL_NONE);
1128 pool_setlowat(&pmap_mpvo_pool, 1008);
1130 pmap_initialized = 1;
1135 * How much virtual space does the kernel get?
1137 void
1138 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1141 * For now, reserve one segment (minus some overhead) for kernel
1142 * virtual memory
1144 *start = VM_MIN_KERNEL_ADDRESS;
1145 *end = VM_MAX_KERNEL_ADDRESS;
1149 * Allocate, initialize, and return a new physical map.
1151 pmap_t
1152 pmap_create(void)
1154 pmap_t pm;
1156 pm = pool_get(&pmap_pool, PR_WAITOK);
1157 memset((void *)pm, 0, sizeof *pm);
1158 pmap_pinit(pm);
1160 DPRINTFN(CREATE,("pmap_create: pm %p:\n"
1161 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1162 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
1163 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
1164 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
1166 pm->pm_sr[0], pm->pm_sr[1],
1167 pm->pm_sr[2], pm->pm_sr[3],
1168 pm->pm_sr[4], pm->pm_sr[5],
1169 pm->pm_sr[6], pm->pm_sr[7],
1170 pm->pm_sr[8], pm->pm_sr[9],
1171 pm->pm_sr[10], pm->pm_sr[11],
1172 pm->pm_sr[12], pm->pm_sr[13],
1173 pm->pm_sr[14], pm->pm_sr[15]));
1174 return pm;
1178 * Initialize a preallocated and zeroed pmap structure.
1180 void
1181 pmap_pinit(pmap_t pm)
1183 register_t entropy = MFTB();
1184 register_t mask;
1185 int i;
1188 * Allocate some segment registers for this pmap.
1190 pm->pm_refs = 1;
1191 PMAP_LOCK();
1192 for (i = 0; i < NPMAPS; i += VSID_NBPW) {
1193 static register_t pmap_vsidcontext;
1194 register_t hash;
1195 unsigned int n;
1197 /* Create a new value by multiplying by a prime adding in
1198 * entropy from the timebase register. This is to make the
1199 * VSID more random so that the PT Hash function collides
1200 * less often. (note that the prime causes gcc to do shifts
1201 * instead of a multiply)
1203 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
1204 hash = pmap_vsidcontext & (NPMAPS - 1);
1205 if (hash == 0) { /* 0 is special, avoid it */
1206 entropy += 0xbadf00d;
1207 continue;
1209 n = hash >> 5;
1210 mask = 1L << (hash & (VSID_NBPW-1));
1211 hash = pmap_vsidcontext;
1212 if (pmap_vsid_bitmap[n] & mask) { /* collision? */
1213 /* anything free in this bucket? */
1214 if (~pmap_vsid_bitmap[n] == 0) {
1215 entropy = hash ^ (hash >> 16);
1216 continue;
1218 i = ffs(~pmap_vsid_bitmap[n]) - 1;
1219 mask = 1L << i;
1220 hash &= ~(VSID_NBPW-1);
1221 hash |= i;
1223 hash &= PTE_VSID >> PTE_VSID_SHFT;
1224 pmap_vsid_bitmap[n] |= mask;
1225 pm->pm_vsid = hash;
1226 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1227 for (i = 0; i < 16; i++)
1228 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
1229 SR_NOEXEC;
1230 #endif
1231 PMAP_UNLOCK();
1232 return;
1234 PMAP_UNLOCK();
1235 panic("pmap_pinit: out of segments");
1239 * Add a reference to the given pmap.
1241 void
1242 pmap_reference(pmap_t pm)
1244 atomic_inc_uint(&pm->pm_refs);
1248 * Retire the given pmap from service.
1249 * Should only be called if the map contains no valid mappings.
1251 void
1252 pmap_destroy(pmap_t pm)
1254 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1255 pmap_release(pm);
1256 pool_put(&pmap_pool, pm);
1261 * Release any resources held by the given physical map.
1262 * Called when a pmap initialized by pmap_pinit is being released.
1264 void
1265 pmap_release(pmap_t pm)
1267 int idx, mask;
1269 KASSERT(pm->pm_stats.resident_count == 0);
1270 KASSERT(pm->pm_stats.wired_count == 0);
1272 PMAP_LOCK();
1273 if (pm->pm_sr[0] == 0)
1274 panic("pmap_release");
1275 idx = pm->pm_vsid & (NPMAPS-1);
1276 mask = 1 << (idx % VSID_NBPW);
1277 idx /= VSID_NBPW;
1279 KASSERT(pmap_vsid_bitmap[idx] & mask);
1280 pmap_vsid_bitmap[idx] &= ~mask;
1281 PMAP_UNLOCK();
1285 * Copy the range specified by src_addr/len
1286 * from the source map to the range dst_addr/len
1287 * in the destination map.
1289 * This routine is only advisory and need not do anything.
1291 void
1292 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
1293 vsize_t len, vaddr_t src_addr)
1295 PMAPCOUNT(copies);
1299 * Require that all active physical maps contain no
1300 * incorrect entries NOW.
1302 void
1303 pmap_update(struct pmap *pmap)
1305 PMAPCOUNT(updates);
1306 TLBSYNC();
1309 static inline int
1310 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
1312 int pteidx;
1314 * We can find the actual pte entry without searching by
1315 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
1316 * and by noticing the HID bit.
1318 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
1319 if (pvo->pvo_pte.pte_hi & PTE_HID)
1320 pteidx ^= pmap_pteg_mask * 8;
1321 return pteidx;
1324 volatile struct pte *
1325 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
1327 volatile struct pte *pt;
1329 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1330 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
1331 return NULL;
1332 #endif
1335 * If we haven't been supplied the ptegidx, calculate it.
1337 if (pteidx == -1) {
1338 int ptegidx;
1339 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1340 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1343 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
1345 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
1346 return pt;
1347 #else
1348 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
1349 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1350 "pvo but no valid pte index", pvo);
1352 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
1353 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
1354 "pvo but no valid pte", pvo);
1357 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
1358 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
1359 #if defined(DEBUG) || defined(PMAPCHECK)
1360 pmap_pte_print(pt);
1361 #endif
1362 panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
1363 "pmap_pteg_table %p but invalid in pvo",
1364 pvo, pt);
1366 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
1367 #if defined(DEBUG) || defined(PMAPCHECK)
1368 pmap_pte_print(pt);
1369 #endif
1370 panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
1371 "not match pte %p in pmap_pteg_table",
1372 pvo, pt);
1374 return pt;
1377 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1378 #if defined(DEBUG) || defined(PMAPCHECK)
1379 pmap_pte_print(pt);
1380 #endif
1381 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1382 "pmap_pteg_table but valid in pvo", pvo, pt);
1384 return NULL;
1385 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
1388 struct pvo_entry *
1389 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
1391 struct pvo_entry *pvo;
1392 int ptegidx;
1394 va &= ~ADDR_POFF;
1395 ptegidx = va_to_pteg(pm, va);
1397 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1398 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1399 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
1400 panic("pmap_pvo_find_va: invalid pvo %p on "
1401 "list %#x (%p)", pvo, ptegidx,
1402 &pmap_pvo_table[ptegidx]);
1403 #endif
1404 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1405 if (pteidx_p)
1406 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
1407 return pvo;
1410 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1411 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1412 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1413 return NULL;
1416 #if defined(DEBUG) || defined(PMAPCHECK)
1417 void
1418 pmap_pvo_check(const struct pvo_entry *pvo)
1420 struct pvo_head *pvo_head;
1421 struct pvo_entry *pvo0;
1422 volatile struct pte *pt;
1423 int failed = 0;
1425 PMAP_LOCK();
1427 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
1428 panic("pmap_pvo_check: pvo %p: invalid address", pvo);
1430 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
1431 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
1432 pvo, pvo->pvo_pmap);
1433 failed = 1;
1436 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
1437 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
1438 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1439 pvo, TAILQ_NEXT(pvo, pvo_olink));
1440 failed = 1;
1443 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
1444 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
1445 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
1446 pvo, LIST_NEXT(pvo, pvo_vlink));
1447 failed = 1;
1450 if (PVO_MANAGED_P(pvo)) {
1451 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1452 } else {
1453 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
1454 printf("pmap_pvo_check: pvo %p: non kernel address "
1455 "on kernel unmanaged list\n", pvo);
1456 failed = 1;
1458 pvo_head = &pmap_pvo_kunmanaged;
1460 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
1461 if (pvo0 == pvo)
1462 break;
1464 if (pvo0 == NULL) {
1465 printf("pmap_pvo_check: pvo %p: not present "
1466 "on its vlist head %p\n", pvo, pvo_head);
1467 failed = 1;
1469 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
1470 printf("pmap_pvo_check: pvo %p: not present "
1471 "on its olist head\n", pvo);
1472 failed = 1;
1474 pt = pmap_pvo_to_pte(pvo, -1);
1475 if (pt == NULL) {
1476 if (pvo->pvo_pte.pte_hi & PTE_VALID) {
1477 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1478 "no PTE\n", pvo);
1479 failed = 1;
1481 } else {
1482 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
1483 (uintptr_t) pt >=
1484 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
1485 printf("pmap_pvo_check: pvo %p: pte %p not in "
1486 "pteg table\n", pvo, pt);
1487 failed = 1;
1489 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
1490 printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
1491 "no PTE\n", pvo);
1492 failed = 1;
1494 if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
1495 printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1496 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1497 pvo->pvo_pte.pte_hi,
1498 pt->pte_hi);
1499 failed = 1;
1501 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
1502 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
1503 printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1504 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
1505 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
1506 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1507 failed = 1;
1509 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1510 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
1511 " doesn't not match PVO's VA %#" _PRIxva "\n",
1512 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
1513 failed = 1;
1515 if (failed)
1516 pmap_pte_print(pt);
1518 if (failed)
1519 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
1520 pvo->pvo_pmap);
1522 PMAP_UNLOCK();
1524 #endif /* DEBUG || PMAPCHECK */
1527 * Search the PVO table looking for a non-wired entry.
1528 * If we find one, remove it and return it.
1531 struct pvo_entry *
1532 pmap_pvo_reclaim(struct pmap *pm)
1534 struct pvo_tqhead *pvoh;
1535 struct pvo_entry *pvo;
1536 uint32_t idx, endidx;
1538 endidx = pmap_pvo_reclaim_nextidx;
1539 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
1540 idx = (idx + 1) & pmap_pteg_mask) {
1541 pvoh = &pmap_pvo_table[idx];
1542 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1543 if (!PVO_WIRED_P(pvo)) {
1544 pmap_pvo_remove(pvo, -1, NULL);
1545 pmap_pvo_reclaim_nextidx = idx;
1546 PMAPCOUNT(pvos_reclaimed);
1547 return pvo;
1551 return NULL;
1555 * This returns whether this is the first mapping of a page.
1558 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1559 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1561 struct pvo_entry *pvo;
1562 struct pvo_tqhead *pvoh;
1563 register_t msr;
1564 int ptegidx;
1565 int i;
1566 int poolflags = PR_NOWAIT;
1569 * Compute the PTE Group index.
1571 va &= ~ADDR_POFF;
1572 ptegidx = va_to_pteg(pm, va);
1574 msr = pmap_interrupts_off();
1576 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1577 if (pmap_pvo_remove_depth > 0)
1578 panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
1579 if (++pmap_pvo_enter_depth > 1)
1580 panic("pmap_pvo_enter: called recursively!");
1581 #endif
1584 * Remove any existing mapping for this page. Reuse the
1585 * pvo entry if there a mapping.
1587 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
1588 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
1589 #ifdef DEBUG
1590 if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
1591 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
1592 ~(PTE_REF|PTE_CHG)) == 0 &&
1593 va < VM_MIN_KERNEL_ADDRESS) {
1594 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1595 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1596 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1597 pvo->pvo_pte.pte_hi,
1598 pm->pm_sr[va >> ADDR_SR_SHFT]);
1599 pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
1600 #ifdef DDBX
1601 Debugger();
1602 #endif
1604 #endif
1605 PMAPCOUNT(mappings_replaced);
1606 pmap_pvo_remove(pvo, -1, NULL);
1607 break;
1612 * If we aren't overwriting an mapping, try to allocate
1614 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1615 --pmap_pvo_enter_depth;
1616 #endif
1617 pmap_interrupts_restore(msr);
1618 if (pvo) {
1619 pmap_pvo_free(pvo);
1621 pvo = pool_get(pl, poolflags);
1623 #ifdef DEBUG
1625 * Exercise pmap_pvo_reclaim() a little.
1627 if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1628 pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1629 (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1630 pool_put(pl, pvo);
1631 pvo = NULL;
1633 #endif
1635 msr = pmap_interrupts_off();
1636 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1637 ++pmap_pvo_enter_depth;
1638 #endif
1639 if (pvo == NULL) {
1640 pvo = pmap_pvo_reclaim(pm);
1641 if (pvo == NULL) {
1642 if ((flags & PMAP_CANFAIL) == 0)
1643 panic("pmap_pvo_enter: failed");
1644 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1645 pmap_pvo_enter_depth--;
1646 #endif
1647 PMAPCOUNT(pvos_failed);
1648 pmap_interrupts_restore(msr);
1649 return ENOMEM;
1653 pvo->pvo_vaddr = va;
1654 pvo->pvo_pmap = pm;
1655 pvo->pvo_vaddr &= ~ADDR_POFF;
1656 if (flags & VM_PROT_EXECUTE) {
1657 PMAPCOUNT(exec_mappings);
1658 pvo_set_exec(pvo);
1660 if (flags & PMAP_WIRED)
1661 pvo->pvo_vaddr |= PVO_WIRED;
1662 if (pvo_head != &pmap_pvo_kunmanaged) {
1663 pvo->pvo_vaddr |= PVO_MANAGED;
1664 PMAPCOUNT(mappings);
1665 } else {
1666 PMAPCOUNT(kernel_mappings);
1668 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1670 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1671 if (PVO_WIRED_P(pvo))
1672 pvo->pvo_pmap->pm_stats.wired_count++;
1673 pvo->pvo_pmap->pm_stats.resident_count++;
1674 #if defined(DEBUG)
1675 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1676 DPRINTFN(PVOENTER,
1677 ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
1678 pvo, pm, va, pa));
1679 #endif
1682 * We hope this succeeds but it isn't required.
1684 pvoh = &pmap_pvo_table[ptegidx];
1685 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
1686 if (i >= 0) {
1687 PVO_PTEGIDX_SET(pvo, i);
1688 PVO_WHERE(pvo, ENTER_INSERT);
1689 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
1690 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
1691 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1693 } else {
1695 * Since we didn't have room for this entry (which makes it
1696 * and evicted entry), place it at the head of the list.
1698 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
1699 PMAPCOUNT(ptes_evicted);
1700 pm->pm_evictions++;
1702 * If this is a kernel page, make sure it's active.
1704 if (pm == pmap_kernel()) {
1705 i = pmap_pte_spill(pm, va, false);
1706 KASSERT(i);
1709 PMAP_PVO_CHECK(pvo); /* sanity check */
1710 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1711 pmap_pvo_enter_depth--;
1712 #endif
1713 pmap_interrupts_restore(msr);
1714 return 0;
1717 static void
1718 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1720 volatile struct pte *pt;
1721 int ptegidx;
1723 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1724 if (++pmap_pvo_remove_depth > 1)
1725 panic("pmap_pvo_remove: called recursively!");
1726 #endif
1729 * If we haven't been supplied the ptegidx, calculate it.
1731 if (pteidx == -1) {
1732 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1733 pteidx = pmap_pvo_pte_index(pvo, ptegidx);
1734 } else {
1735 ptegidx = pteidx >> 3;
1736 if (pvo->pvo_pte.pte_hi & PTE_HID)
1737 ptegidx ^= pmap_pteg_mask;
1739 PMAP_PVO_CHECK(pvo); /* sanity check */
1742 * If there is an active pte entry, we need to deactivate it
1743 * (and save the ref & chg bits).
1745 pt = pmap_pvo_to_pte(pvo, pteidx);
1746 if (pt != NULL) {
1747 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1748 PVO_WHERE(pvo, REMOVE);
1749 PVO_PTEGIDX_CLR(pvo);
1750 PMAPCOUNT(ptes_removed);
1751 } else {
1752 KASSERT(pvo->pvo_pmap->pm_evictions > 0);
1753 pvo->pvo_pmap->pm_evictions--;
1757 * Account for executable mappings.
1759 if (PVO_EXECUTABLE_P(pvo))
1760 pvo_clear_exec(pvo);
1763 * Update our statistics.
1765 pvo->pvo_pmap->pm_stats.resident_count--;
1766 if (PVO_WIRED_P(pvo))
1767 pvo->pvo_pmap->pm_stats.wired_count--;
1770 * Save the REF/CHG bits into their cache if the page is managed.
1772 if (PVO_MANAGED_P(pvo)) {
1773 register_t ptelo = pvo->pvo_pte.pte_lo;
1774 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
1776 if (pg != NULL) {
1778 * If this page was changed and it is mapped exec,
1779 * invalidate it.
1781 if ((ptelo & PTE_CHG) &&
1782 (pmap_attr_fetch(pg) & PTE_EXEC)) {
1783 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
1784 if (LIST_EMPTY(pvoh)) {
1785 DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1786 "%#" _PRIxpa ": clear-exec]\n",
1787 VM_PAGE_TO_PHYS(pg)));
1788 pmap_attr_clear(pg, PTE_EXEC);
1789 PMAPCOUNT(exec_uncached_pvo_remove);
1790 } else {
1791 DPRINTFN(EXEC, ("[pmap_pvo_remove: "
1792 "%#" _PRIxpa ": syncicache]\n",
1793 VM_PAGE_TO_PHYS(pg)));
1794 pmap_syncicache(VM_PAGE_TO_PHYS(pg),
1795 PAGE_SIZE);
1796 PMAPCOUNT(exec_synced_pvo_remove);
1800 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
1802 PMAPCOUNT(unmappings);
1803 } else {
1804 PMAPCOUNT(kernel_unmappings);
1808 * Remove the PVO from its lists and return it to the pool.
1810 LIST_REMOVE(pvo, pvo_vlink);
1811 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1812 if (pvol) {
1813 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1815 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1816 pmap_pvo_remove_depth--;
1817 #endif
1820 void
1821 pmap_pvo_free(struct pvo_entry *pvo)
1824 pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
1827 void
1828 pmap_pvo_free_list(struct pvo_head *pvol)
1830 struct pvo_entry *pvo, *npvo;
1832 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1833 npvo = LIST_NEXT(pvo, pvo_vlink);
1834 LIST_REMOVE(pvo, pvo_vlink);
1835 pmap_pvo_free(pvo);
1840 * Mark a mapping as executable.
1841 * If this is the first executable mapping in the segment,
1842 * clear the noexec flag.
1844 static void
1845 pvo_set_exec(struct pvo_entry *pvo)
1847 struct pmap *pm = pvo->pvo_pmap;
1849 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1850 return;
1852 pvo->pvo_vaddr |= PVO_EXECUTABLE;
1853 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1855 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1856 if (pm->pm_exec[sr]++ == 0) {
1857 pm->pm_sr[sr] &= ~SR_NOEXEC;
1860 #endif
1864 * Mark a mapping as non-executable.
1865 * If this was the last executable mapping in the segment,
1866 * set the noexec flag.
1868 static void
1869 pvo_clear_exec(struct pvo_entry *pvo)
1871 struct pmap *pm = pvo->pvo_pmap;
1873 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1874 return;
1876 pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1877 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1879 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
1880 if (--pm->pm_exec[sr] == 0) {
1881 pm->pm_sr[sr] |= SR_NOEXEC;
1884 #endif
1888 * Insert physical page at pa into the given pmap at virtual address va.
1891 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1893 struct mem_region *mp;
1894 struct pvo_head *pvo_head;
1895 struct vm_page *pg;
1896 struct pool *pl;
1897 register_t pte_lo;
1898 int error;
1899 u_int pvo_flags;
1900 u_int was_exec = 0;
1902 PMAP_LOCK();
1904 if (__predict_false(!pmap_initialized)) {
1905 pvo_head = &pmap_pvo_kunmanaged;
1906 pl = &pmap_upvo_pool;
1907 pvo_flags = 0;
1908 pg = NULL;
1909 was_exec = PTE_EXEC;
1910 } else {
1911 pvo_head = pa_to_pvoh(pa, &pg);
1912 pl = &pmap_mpvo_pool;
1913 pvo_flags = PVO_MANAGED;
1916 DPRINTFN(ENTER,
1917 ("pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1918 pm, va, pa, prot, flags));
1921 * If this is a managed page, and it's the first reference to the
1922 * page clear the execness of the page. Otherwise fetch the execness.
1924 if (pg != NULL)
1925 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1927 DPRINTFN(ENTER, (" was_exec=%d", was_exec));
1930 * Assume the page is cache inhibited and access is guarded unless
1931 * it's in our available memory array. If it is in the memory array,
1932 * asssume it's in memory coherent memory.
1934 pte_lo = PTE_IG;
1935 if ((flags & PMAP_NC) == 0) {
1936 for (mp = mem; mp->size; mp++) {
1937 if (pa >= mp->start && pa < mp->start + mp->size) {
1938 pte_lo = PTE_M;
1939 break;
1944 if (prot & VM_PROT_WRITE)
1945 pte_lo |= PTE_BW;
1946 else
1947 pte_lo |= PTE_BR;
1950 * If this was in response to a fault, "pre-fault" the PTE's
1951 * changed/referenced bit appropriately.
1953 if (flags & VM_PROT_WRITE)
1954 pte_lo |= PTE_CHG;
1955 if (flags & VM_PROT_ALL)
1956 pte_lo |= PTE_REF;
1959 * We need to know if this page can be executable
1961 flags |= (prot & VM_PROT_EXECUTE);
1964 * Record mapping for later back-translation and pte spilling.
1965 * This will overwrite any existing mapping.
1967 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
1970 * Flush the real page from the instruction cache if this page is
1971 * mapped executable and cacheable and has not been flushed since
1972 * the last time it was modified.
1974 if (error == 0 &&
1975 (flags & VM_PROT_EXECUTE) &&
1976 (pte_lo & PTE_I) == 0 &&
1977 was_exec == 0) {
1978 DPRINTFN(ENTER, (" syncicache"));
1979 PMAPCOUNT(exec_synced);
1980 pmap_syncicache(pa, PAGE_SIZE);
1981 if (pg != NULL) {
1982 pmap_attr_save(pg, PTE_EXEC);
1983 PMAPCOUNT(exec_cached);
1984 #if defined(DEBUG) || defined(PMAPDEBUG)
1985 if (pmapdebug & PMAPDEBUG_ENTER)
1986 printf(" marked-as-exec");
1987 else if (pmapdebug & PMAPDEBUG_EXEC)
1988 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
1989 VM_PAGE_TO_PHYS(pg));
1991 #endif
1995 DPRINTFN(ENTER, (": error=%d\n", error));
1997 PMAP_UNLOCK();
1999 return error;
2002 void
2003 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2005 struct mem_region *mp;
2006 register_t pte_lo;
2007 int error;
2009 #if defined (PMAP_OEA64_BRIDGE)
2010 if (va < VM_MIN_KERNEL_ADDRESS)
2011 panic("pmap_kenter_pa: attempt to enter "
2012 "non-kernel address %#" _PRIxva "!", va);
2013 #endif
2015 DPRINTFN(KENTER,
2016 ("pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot));
2018 PMAP_LOCK();
2021 * Assume the page is cache inhibited and access is guarded unless
2022 * it's in our available memory array. If it is in the memory array,
2023 * asssume it's in memory coherent memory.
2025 pte_lo = PTE_IG;
2026 if ((prot & PMAP_NC) == 0) {
2027 for (mp = mem; mp->size; mp++) {
2028 if (pa >= mp->start && pa < mp->start + mp->size) {
2029 pte_lo = PTE_M;
2030 break;
2035 if (prot & VM_PROT_WRITE)
2036 pte_lo |= PTE_BW;
2037 else
2038 pte_lo |= PTE_BR;
2041 * We don't care about REF/CHG on PVOs on the unmanaged list.
2043 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
2044 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2046 if (error != 0)
2047 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
2048 va, pa, error);
2050 PMAP_UNLOCK();
2053 void
2054 pmap_kremove(vaddr_t va, vsize_t len)
2056 if (va < VM_MIN_KERNEL_ADDRESS)
2057 panic("pmap_kremove: attempt to remove "
2058 "non-kernel address %#" _PRIxva "!", va);
2060 DPRINTFN(KREMOVE,("pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len));
2061 pmap_remove(pmap_kernel(), va, va + len);
2065 * Remove the given range of mapping entries.
2067 void
2068 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
2070 struct pvo_head pvol;
2071 struct pvo_entry *pvo;
2072 register_t msr;
2073 int pteidx;
2075 PMAP_LOCK();
2076 LIST_INIT(&pvol);
2077 msr = pmap_interrupts_off();
2078 for (; va < endva; va += PAGE_SIZE) {
2079 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2080 if (pvo != NULL) {
2081 pmap_pvo_remove(pvo, pteidx, &pvol);
2084 pmap_interrupts_restore(msr);
2085 pmap_pvo_free_list(&pvol);
2086 PMAP_UNLOCK();
2090 * Get the physical page address for the given pmap/virtual address.
2092 bool
2093 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
2095 struct pvo_entry *pvo;
2096 register_t msr;
2098 PMAP_LOCK();
2101 * If this is a kernel pmap lookup, also check the battable
2102 * and if we get a hit, translate the VA to a PA using the
2103 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is
2104 * that will wrap back to 0.
2106 if (pm == pmap_kernel() &&
2107 (va < VM_MIN_KERNEL_ADDRESS ||
2108 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
2109 KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
2110 #if defined (PMAP_OEA)
2111 #ifdef PPC_OEA601
2112 if ((MFPVR() >> 16) == MPC601) {
2113 register_t batu = battable[va >> 23].batu;
2114 register_t batl = battable[va >> 23].batl;
2115 register_t sr = iosrtable[va >> ADDR_SR_SHFT];
2116 if (BAT601_VALID_P(batl) &&
2117 BAT601_VA_MATCH_P(batu, batl, va)) {
2118 register_t mask =
2119 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
2120 if (pap)
2121 *pap = (batl & mask) | (va & ~mask);
2122 PMAP_UNLOCK();
2123 return true;
2124 } else if (SR601_VALID_P(sr) &&
2125 SR601_PA_MATCH_P(sr, va)) {
2126 if (pap)
2127 *pap = va;
2128 PMAP_UNLOCK();
2129 return true;
2131 } else
2132 #endif /* PPC_OEA601 */
2134 register_t batu = battable[va >> ADDR_SR_SHFT].batu;
2135 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
2136 register_t batl =
2137 battable[va >> ADDR_SR_SHFT].batl;
2138 register_t mask =
2139 (~(batu & BAT_BL) << 15) & ~0x1ffffL;
2140 if (pap)
2141 *pap = (batl & mask) | (va & ~mask);
2142 PMAP_UNLOCK();
2143 return true;
2146 return false;
2147 #elif defined (PMAP_OEA64_BRIDGE)
2148 if (va >= SEGMENT_LENGTH)
2149 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
2150 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
2151 else {
2152 if (pap)
2153 *pap = va;
2154 PMAP_UNLOCK();
2155 return true;
2157 #elif defined (PMAP_OEA64)
2158 #error PPC_OEA64 not supported
2159 #endif /* PPC_OEA */
2162 msr = pmap_interrupts_off();
2163 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
2164 if (pvo != NULL) {
2165 PMAP_PVO_CHECK(pvo); /* sanity check */
2166 if (pap)
2167 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
2168 | (va & ADDR_POFF);
2170 pmap_interrupts_restore(msr);
2171 PMAP_UNLOCK();
2172 return pvo != NULL;
2176 * Lower the protection on the specified range of this pmap.
2178 void
2179 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
2181 struct pvo_entry *pvo;
2182 volatile struct pte *pt;
2183 register_t msr;
2184 int pteidx;
2187 * Since this routine only downgrades protection, we should
2188 * always be called with at least one bit not set.
2190 KASSERT(prot != VM_PROT_ALL);
2193 * If there is no protection, this is equivalent to
2194 * remove the pmap from the pmap.
2196 if ((prot & VM_PROT_READ) == 0) {
2197 pmap_remove(pm, va, endva);
2198 return;
2201 PMAP_LOCK();
2203 msr = pmap_interrupts_off();
2204 for (; va < endva; va += PAGE_SIZE) {
2205 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2206 if (pvo == NULL)
2207 continue;
2208 PMAP_PVO_CHECK(pvo); /* sanity check */
2211 * Revoke executable if asked to do so.
2213 if ((prot & VM_PROT_EXECUTE) == 0)
2214 pvo_clear_exec(pvo);
2216 #if 0
2218 * If the page is already read-only, no change
2219 * needs to be made.
2221 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
2222 continue;
2223 #endif
2225 * Grab the PTE pointer before we diddle with
2226 * the cached PTE copy.
2228 pt = pmap_pvo_to_pte(pvo, pteidx);
2230 * Change the protection of the page.
2232 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2233 pvo->pvo_pte.pte_lo |= PTE_BR;
2236 * If the PVO is in the page table, update
2237 * that pte at well.
2239 if (pt != NULL) {
2240 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2241 PVO_WHERE(pvo, PMAP_PROTECT);
2242 PMAPCOUNT(ptes_changed);
2245 PMAP_PVO_CHECK(pvo); /* sanity check */
2247 pmap_interrupts_restore(msr);
2248 PMAP_UNLOCK();
2251 void
2252 pmap_unwire(pmap_t pm, vaddr_t va)
2254 struct pvo_entry *pvo;
2255 register_t msr;
2257 PMAP_LOCK();
2258 msr = pmap_interrupts_off();
2259 pvo = pmap_pvo_find_va(pm, va, NULL);
2260 if (pvo != NULL) {
2261 if (PVO_WIRED_P(pvo)) {
2262 pvo->pvo_vaddr &= ~PVO_WIRED;
2263 pm->pm_stats.wired_count--;
2265 PMAP_PVO_CHECK(pvo); /* sanity check */
2267 pmap_interrupts_restore(msr);
2268 PMAP_UNLOCK();
2272 * Lower the protection on the specified physical page.
2274 void
2275 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2277 struct pvo_head *pvo_head, pvol;
2278 struct pvo_entry *pvo, *next_pvo;
2279 volatile struct pte *pt;
2280 register_t msr;
2282 PMAP_LOCK();
2284 KASSERT(prot != VM_PROT_ALL);
2285 LIST_INIT(&pvol);
2286 msr = pmap_interrupts_off();
2289 * When UVM reuses a page, it does a pmap_page_protect with
2290 * VM_PROT_NONE. At that point, we can clear the exec flag
2291 * since we know the page will have different contents.
2293 if ((prot & VM_PROT_READ) == 0) {
2294 DPRINTFN(EXEC, ("[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
2295 VM_PAGE_TO_PHYS(pg)));
2296 if (pmap_attr_fetch(pg) & PTE_EXEC) {
2297 PMAPCOUNT(exec_uncached_page_protect);
2298 pmap_attr_clear(pg, PTE_EXEC);
2302 pvo_head = vm_page_to_pvoh(pg);
2303 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2304 next_pvo = LIST_NEXT(pvo, pvo_vlink);
2305 PMAP_PVO_CHECK(pvo); /* sanity check */
2308 * Downgrading to no mapping at all, we just remove the entry.
2310 if ((prot & VM_PROT_READ) == 0) {
2311 pmap_pvo_remove(pvo, -1, &pvol);
2312 continue;
2316 * If EXEC permission is being revoked, just clear the
2317 * flag in the PVO.
2319 if ((prot & VM_PROT_EXECUTE) == 0)
2320 pvo_clear_exec(pvo);
2323 * If this entry is already RO, don't diddle with the
2324 * page table.
2326 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
2327 PMAP_PVO_CHECK(pvo);
2328 continue;
2332 * Grab the PTE before the we diddle the bits so
2333 * pvo_to_pte can verify the pte contents are as
2334 * expected.
2336 pt = pmap_pvo_to_pte(pvo, -1);
2337 pvo->pvo_pte.pte_lo &= ~PTE_PP;
2338 pvo->pvo_pte.pte_lo |= PTE_BR;
2339 if (pt != NULL) {
2340 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
2341 PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
2342 PMAPCOUNT(ptes_changed);
2344 PMAP_PVO_CHECK(pvo); /* sanity check */
2346 pmap_interrupts_restore(msr);
2347 pmap_pvo_free_list(&pvol);
2349 PMAP_UNLOCK();
2353 * Activate the address space for the specified process. If the process
2354 * is the current process, load the new MMU context.
2356 void
2357 pmap_activate(struct lwp *l)
2359 struct pcb *pcb = lwp_getpcb(l);
2360 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
2362 DPRINTFN(ACTIVATE,
2363 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
2366 * XXX Normally performed in cpu_fork().
2368 pcb->pcb_pm = pmap;
2371 * In theory, the SR registers need only be valid on return
2372 * to user space wait to do them there.
2374 if (l == curlwp) {
2375 /* Store pointer to new current pmap. */
2376 curpm = pmap;
2381 * Deactivate the specified process's address space.
2383 void
2384 pmap_deactivate(struct lwp *l)
2388 bool
2389 pmap_query_bit(struct vm_page *pg, int ptebit)
2391 struct pvo_entry *pvo;
2392 volatile struct pte *pt;
2393 register_t msr;
2395 PMAP_LOCK();
2397 if (pmap_attr_fetch(pg) & ptebit) {
2398 PMAP_UNLOCK();
2399 return true;
2402 msr = pmap_interrupts_off();
2403 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2404 PMAP_PVO_CHECK(pvo); /* sanity check */
2406 * See if we saved the bit off. If so cache, it and return
2407 * success.
2409 if (pvo->pvo_pte.pte_lo & ptebit) {
2410 pmap_attr_save(pg, ptebit);
2411 PMAP_PVO_CHECK(pvo); /* sanity check */
2412 pmap_interrupts_restore(msr);
2413 PMAP_UNLOCK();
2414 return true;
2418 * No luck, now go thru the hard part of looking at the ptes
2419 * themselves. Sync so any pending REF/CHG bits are flushed
2420 * to the PTEs.
2422 SYNC();
2423 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
2424 PMAP_PVO_CHECK(pvo); /* sanity check */
2426 * See if this pvo have a valid PTE. If so, fetch the
2427 * REF/CHG bits from the valid PTE. If the appropriate
2428 * ptebit is set, cache, it and return success.
2430 pt = pmap_pvo_to_pte(pvo, -1);
2431 if (pt != NULL) {
2432 pmap_pte_synch(pt, &pvo->pvo_pte);
2433 if (pvo->pvo_pte.pte_lo & ptebit) {
2434 pmap_attr_save(pg, ptebit);
2435 PMAP_PVO_CHECK(pvo); /* sanity check */
2436 pmap_interrupts_restore(msr);
2437 PMAP_UNLOCK();
2438 return true;
2442 pmap_interrupts_restore(msr);
2443 PMAP_UNLOCK();
2444 return false;
2447 bool
2448 pmap_clear_bit(struct vm_page *pg, int ptebit)
2450 struct pvo_head *pvoh = vm_page_to_pvoh(pg);
2451 struct pvo_entry *pvo;
2452 volatile struct pte *pt;
2453 register_t msr;
2454 int rv = 0;
2456 PMAP_LOCK();
2457 msr = pmap_interrupts_off();
2460 * Fetch the cache value
2462 rv |= pmap_attr_fetch(pg);
2465 * Clear the cached value.
2467 pmap_attr_clear(pg, ptebit);
2470 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
2471 * can reset the right ones). Note that since the pvo entries and
2472 * list heads are accessed via BAT0 and are never placed in the
2473 * page table, we don't have to worry about further accesses setting
2474 * the REF/CHG bits.
2476 SYNC();
2479 * For each pvo entry, clear pvo's ptebit. If this pvo have a
2480 * valid PTE. If so, clear the ptebit from the valid PTE.
2482 LIST_FOREACH(pvo, pvoh, pvo_vlink) {
2483 PMAP_PVO_CHECK(pvo); /* sanity check */
2484 pt = pmap_pvo_to_pte(pvo, -1);
2485 if (pt != NULL) {
2487 * Only sync the PTE if the bit we are looking
2488 * for is not already set.
2490 if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
2491 pmap_pte_synch(pt, &pvo->pvo_pte);
2493 * If the bit we are looking for was already set,
2494 * clear that bit in the pte.
2496 if (pvo->pvo_pte.pte_lo & ptebit)
2497 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
2499 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
2500 pvo->pvo_pte.pte_lo &= ~ptebit;
2501 PMAP_PVO_CHECK(pvo); /* sanity check */
2503 pmap_interrupts_restore(msr);
2506 * If we are clearing the modify bit and this page was marked EXEC
2507 * and the user of the page thinks the page was modified, then we
2508 * need to clean it from the icache if it's mapped or clear the EXEC
2509 * bit if it's not mapped. The page itself might not have the CHG
2510 * bit set if the modification was done via DMA to the page.
2512 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
2513 if (LIST_EMPTY(pvoh)) {
2514 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
2515 VM_PAGE_TO_PHYS(pg)));
2516 pmap_attr_clear(pg, PTE_EXEC);
2517 PMAPCOUNT(exec_uncached_clear_modify);
2518 } else {
2519 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
2520 VM_PAGE_TO_PHYS(pg)));
2521 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2522 PMAPCOUNT(exec_synced_clear_modify);
2525 PMAP_UNLOCK();
2526 return (rv & ptebit) != 0;
2529 void
2530 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
2532 struct pvo_entry *pvo;
2533 size_t offset = va & ADDR_POFF;
2534 int s;
2536 PMAP_LOCK();
2537 s = splvm();
2538 while (len > 0) {
2539 size_t seglen = PAGE_SIZE - offset;
2540 if (seglen > len)
2541 seglen = len;
2542 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
2543 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
2544 pmap_syncicache(
2545 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
2546 PMAP_PVO_CHECK(pvo);
2548 va += seglen;
2549 len -= seglen;
2550 offset = 0;
2552 splx(s);
2553 PMAP_UNLOCK();
2556 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
2557 void
2558 pmap_pte_print(volatile struct pte *pt)
2560 printf("PTE %p: ", pt);
2562 #if defined(PMAP_OEA)
2563 /* High word: */
2564 printf("%#" _PRIxpte ": [", pt->pte_hi);
2565 #else
2566 printf("%#" _PRIxpte ": [", pt->pte_hi);
2567 #endif /* PMAP_OEA */
2569 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
2570 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
2572 printf("%#" _PRIxpte " %#" _PRIxpte "",
2573 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
2574 pt->pte_hi & PTE_API);
2575 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
2576 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2577 #else
2578 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
2579 #endif /* PMAP_OEA */
2581 /* Low word: */
2582 #if defined (PMAP_OEA)
2583 printf(" %#" _PRIxpte ": [", pt->pte_lo);
2584 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2585 #else
2586 printf(" %#" _PRIxpte ": [", pt->pte_lo);
2587 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
2588 #endif
2589 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
2590 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
2591 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
2592 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
2593 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
2594 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
2595 switch (pt->pte_lo & PTE_PP) {
2596 case PTE_BR: printf("br]\n"); break;
2597 case PTE_BW: printf("bw]\n"); break;
2598 case PTE_SO: printf("so]\n"); break;
2599 case PTE_SW: printf("sw]\n"); break;
2602 #endif
2604 #if defined(DDB)
2605 void
2606 pmap_pteg_check(void)
2608 volatile struct pte *pt;
2609 int i;
2610 int ptegidx;
2611 u_int p_valid = 0;
2612 u_int s_valid = 0;
2613 u_int invalid = 0;
2615 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2616 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
2617 if (pt->pte_hi & PTE_VALID) {
2618 if (pt->pte_hi & PTE_HID)
2619 s_valid++;
2620 else
2622 p_valid++;
2624 } else
2625 invalid++;
2628 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
2629 p_valid, p_valid, s_valid, s_valid,
2630 invalid, invalid);
2633 void
2634 pmap_print_mmuregs(void)
2636 int i;
2637 u_int cpuvers;
2638 #ifndef PMAP_OEA64
2639 vaddr_t addr;
2640 register_t soft_sr[16];
2641 #endif
2642 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2643 struct bat soft_ibat[4];
2644 struct bat soft_dbat[4];
2645 #endif
2646 paddr_t sdr1;
2648 cpuvers = MFPVR() >> 16;
2649 __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
2650 #ifndef PMAP_OEA64
2651 addr = 0;
2652 for (i = 0; i < 16; i++) {
2653 soft_sr[i] = MFSRIN(addr);
2654 addr += (1 << ADDR_SR_SHFT);
2656 #endif
2658 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
2659 /* read iBAT (601: uBAT) registers */
2660 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
2661 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
2662 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
2663 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
2664 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
2665 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
2666 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
2667 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
2670 if (cpuvers != MPC601) {
2671 /* read dBAT registers */
2672 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
2673 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
2674 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
2675 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
2676 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
2677 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
2678 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
2679 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
2681 #endif
2683 printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
2684 #ifndef PMAP_OEA64
2685 printf("SR[]:\t");
2686 for (i = 0; i < 4; i++)
2687 printf("0x%08lx, ", soft_sr[i]);
2688 printf("\n\t");
2689 for ( ; i < 8; i++)
2690 printf("0x%08lx, ", soft_sr[i]);
2691 printf("\n\t");
2692 for ( ; i < 12; i++)
2693 printf("0x%08lx, ", soft_sr[i]);
2694 printf("\n\t");
2695 for ( ; i < 16; i++)
2696 printf("0x%08lx, ", soft_sr[i]);
2697 printf("\n");
2698 #endif
2700 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
2701 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
2702 for (i = 0; i < 4; i++) {
2703 printf("0x%08lx 0x%08lx, ",
2704 soft_ibat[i].batu, soft_ibat[i].batl);
2705 if (i == 1)
2706 printf("\n\t");
2708 if (cpuvers != MPC601) {
2709 printf("\ndBAT[]:\t");
2710 for (i = 0; i < 4; i++) {
2711 printf("0x%08lx 0x%08lx, ",
2712 soft_dbat[i].batu, soft_dbat[i].batl);
2713 if (i == 1)
2714 printf("\n\t");
2717 printf("\n");
2718 #endif /* PMAP_OEA... */
2721 void
2722 pmap_print_pte(pmap_t pm, vaddr_t va)
2724 struct pvo_entry *pvo;
2725 volatile struct pte *pt;
2726 int pteidx;
2728 pvo = pmap_pvo_find_va(pm, va, &pteidx);
2729 if (pvo != NULL) {
2730 pt = pmap_pvo_to_pte(pvo, pteidx);
2731 if (pt != NULL) {
2732 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
2733 va, pt,
2734 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
2735 pt->pte_hi, pt->pte_lo);
2736 } else {
2737 printf("No valid PTE found\n");
2739 } else {
2740 printf("Address not in pmap\n");
2744 void
2745 pmap_pteg_dist(void)
2747 struct pvo_entry *pvo;
2748 int ptegidx;
2749 int depth;
2750 int max_depth = 0;
2751 unsigned int depths[64];
2753 memset(depths, 0, sizeof(depths));
2754 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2755 depth = 0;
2756 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2757 depth++;
2759 if (depth > max_depth)
2760 max_depth = depth;
2761 if (depth > 63)
2762 depth = 63;
2763 depths[depth]++;
2766 for (depth = 0; depth < 64; depth++) {
2767 printf(" [%2d]: %8u", depth, depths[depth]);
2768 if ((depth & 3) == 3)
2769 printf("\n");
2770 if (depth == max_depth)
2771 break;
2773 if ((depth & 3) != 3)
2774 printf("\n");
2775 printf("Max depth found was %d\n", max_depth);
2777 #endif /* DEBUG */
2779 #if defined(PMAPCHECK) || defined(DEBUG)
2780 void
2781 pmap_pvo_verify(void)
2783 int ptegidx;
2784 int s;
2786 s = splvm();
2787 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2788 struct pvo_entry *pvo;
2789 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2790 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2791 panic("pmap_pvo_verify: invalid pvo %p "
2792 "on list %#x", pvo, ptegidx);
2793 pmap_pvo_check(pvo);
2796 splx(s);
2798 #endif /* PMAPCHECK */
2801 void *
2802 pmap_pool_ualloc(struct pool *pp, int flags)
2804 struct pvo_page *pvop;
2806 if (uvm.page_init_done != true) {
2807 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2810 PMAP_LOCK();
2811 pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
2812 if (pvop != NULL) {
2813 pmap_upvop_free--;
2814 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
2815 PMAP_UNLOCK();
2816 return pvop;
2818 PMAP_UNLOCK();
2819 return pmap_pool_malloc(pp, flags);
2822 void *
2823 pmap_pool_malloc(struct pool *pp, int flags)
2825 struct pvo_page *pvop;
2826 struct vm_page *pg;
2828 PMAP_LOCK();
2829 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
2830 if (pvop != NULL) {
2831 pmap_mpvop_free--;
2832 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
2833 PMAP_UNLOCK();
2834 return pvop;
2836 PMAP_UNLOCK();
2837 again:
2838 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2839 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2840 if (__predict_false(pg == NULL)) {
2841 if (flags & PR_WAITOK) {
2842 uvm_wait("plpg");
2843 goto again;
2844 } else {
2845 return (0);
2848 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2849 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
2852 void
2853 pmap_pool_ufree(struct pool *pp, void *va)
2855 struct pvo_page *pvop;
2856 #if 0
2857 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
2858 pmap_pool_mfree(va, size, tag);
2859 return;
2861 #endif
2862 PMAP_LOCK();
2863 pvop = va;
2864 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
2865 pmap_upvop_free++;
2866 if (pmap_upvop_free > pmap_upvop_maxfree)
2867 pmap_upvop_maxfree = pmap_upvop_free;
2868 PMAP_UNLOCK();
2871 void
2872 pmap_pool_mfree(struct pool *pp, void *va)
2874 struct pvo_page *pvop;
2876 PMAP_LOCK();
2877 pvop = va;
2878 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
2879 pmap_mpvop_free++;
2880 if (pmap_mpvop_free > pmap_mpvop_maxfree)
2881 pmap_mpvop_maxfree = pmap_mpvop_free;
2882 PMAP_UNLOCK();
2883 #if 0
2884 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2885 #endif
2889 * This routine in bootstraping to steal to-be-managed memory (which will
2890 * then be unmanaged). We use it to grab from the first 256MB for our
2891 * pmap needs and above 256MB for other stuff.
2893 vaddr_t
2894 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
2896 vsize_t size;
2897 vaddr_t va;
2898 paddr_t pa = 0;
2899 int npgs, bank;
2900 struct vm_physseg *ps;
2902 if (uvm.page_init_done == true)
2903 panic("pmap_steal_memory: called _after_ bootstrap");
2905 *vstartp = VM_MIN_KERNEL_ADDRESS;
2906 *vendp = VM_MAX_KERNEL_ADDRESS;
2908 size = round_page(vsize);
2909 npgs = atop(size);
2912 * PA 0 will never be among those given to UVM so we can use it
2913 * to indicate we couldn't steal any memory.
2915 for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
2916 if (ps->free_list == VM_FREELIST_FIRST256 &&
2917 ps->avail_end - ps->avail_start >= npgs) {
2918 pa = ptoa(ps->avail_start);
2919 break;
2923 if (pa == 0)
2924 panic("pmap_steal_memory: no approriate memory to steal!");
2926 ps->avail_start += npgs;
2927 ps->start += npgs;
2930 * If we've used up all the pages in the segment, remove it and
2931 * compact the list.
2933 if (ps->avail_start == ps->end) {
2935 * If this was the last one, then a very bad thing has occurred
2937 if (--vm_nphysseg == 0)
2938 panic("pmap_steal_memory: out of memory!");
2940 printf("pmap_steal_memory: consumed bank %d\n", bank);
2941 for (; bank < vm_nphysseg; bank++, ps++) {
2942 ps[0] = ps[1];
2946 va = (vaddr_t) pa;
2947 memset((void *) va, 0, size);
2948 pmap_pages_stolen += npgs;
2949 #ifdef DEBUG
2950 if (pmapdebug && npgs > 1) {
2951 u_int cnt = 0;
2952 for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
2953 cnt += ps->avail_end - ps->avail_start;
2954 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
2955 npgs, pmap_pages_stolen, cnt);
2957 #endif
2959 return va;
2963 * Find a chuck of memory with right size and alignment.
2965 paddr_t
2966 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
2968 struct mem_region *mp;
2969 paddr_t s, e;
2970 int i, j;
2972 size = round_page(size);
2974 DPRINTFN(BOOT,
2975 ("pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
2976 size, alignment, at_end));
2978 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
2979 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
2980 alignment);
2982 if (at_end) {
2983 if (alignment != PAGE_SIZE)
2984 panic("pmap_boot_find_memory: invalid ending "
2985 "alignment %#" _PRIxpa, alignment);
2987 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
2988 s = mp->start + mp->size - size;
2989 if (s >= mp->start && mp->size >= size) {
2990 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
2991 DPRINTFN(BOOT,
2992 ("pmap_boot_find_memory: b-avail[%d] start "
2993 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
2994 mp->start, mp->size));
2995 mp->size -= size;
2996 DPRINTFN(BOOT,
2997 ("pmap_boot_find_memory: a-avail[%d] start "
2998 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
2999 mp->start, mp->size));
3000 return s;
3003 panic("pmap_boot_find_memory: no available memory");
3006 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3007 s = (mp->start + alignment - 1) & ~(alignment-1);
3008 e = s + size;
3011 * Is the calculated region entirely within the region?
3013 if (s < mp->start || e > mp->start + mp->size)
3014 continue;
3016 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
3017 if (s == mp->start) {
3019 * If the block starts at the beginning of region,
3020 * adjust the size & start. (the region may now be
3021 * zero in length)
3023 DPRINTFN(BOOT,
3024 ("pmap_boot_find_memory: b-avail[%d] start "
3025 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
3026 mp->start += size;
3027 mp->size -= size;
3028 DPRINTFN(BOOT,
3029 ("pmap_boot_find_memory: a-avail[%d] start "
3030 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
3031 } else if (e == mp->start + mp->size) {
3033 * If the block starts at the beginning of region,
3034 * adjust only the size.
3036 DPRINTFN(BOOT,
3037 ("pmap_boot_find_memory: b-avail[%d] start "
3038 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
3039 mp->size -= size;
3040 DPRINTFN(BOOT,
3041 ("pmap_boot_find_memory: a-avail[%d] start "
3042 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
3043 } else {
3045 * Block is in the middle of the region, so we
3046 * have to split it in two.
3048 for (j = avail_cnt; j > i + 1; j--) {
3049 avail[j] = avail[j-1];
3051 DPRINTFN(BOOT,
3052 ("pmap_boot_find_memory: b-avail[%d] start "
3053 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
3054 mp[1].start = e;
3055 mp[1].size = mp[0].start + mp[0].size - e;
3056 mp[0].size = s - mp[0].start;
3057 avail_cnt++;
3058 for (; i < avail_cnt; i++) {
3059 DPRINTFN(BOOT,
3060 ("pmap_boot_find_memory: a-avail[%d] "
3061 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
3062 avail[i].start, avail[i].size));
3065 KASSERT(s == (uintptr_t) s);
3066 return s;
3068 panic("pmap_boot_find_memory: not enough memory for "
3069 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
3072 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
3073 #if defined (PMAP_OEA64_BRIDGE)
3075 pmap_setup_segment0_map(int use_large_pages, ...)
3077 vaddr_t va;
3079 register_t pte_lo = 0x0;
3080 int ptegidx = 0, i = 0;
3081 struct pte pte;
3082 va_list ap;
3084 /* Coherent + Supervisor RW, no user access */
3085 pte_lo = PTE_M;
3087 /* XXXSL
3088 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
3089 * these have to take priority.
3091 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
3092 ptegidx = va_to_pteg(pmap_kernel(), va);
3093 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
3094 i = pmap_pte_insert(ptegidx, &pte);
3097 va_start(ap, use_large_pages);
3098 while (1) {
3099 paddr_t pa;
3100 size_t size;
3102 va = va_arg(ap, vaddr_t);
3104 if (va == 0)
3105 break;
3107 pa = va_arg(ap, paddr_t);
3108 size = va_arg(ap, size_t);
3110 for (; va < (va + size); va += 0x1000, pa += 0x1000) {
3111 #if 0
3112 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa);
3113 #endif
3114 ptegidx = va_to_pteg(pmap_kernel(), va);
3115 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
3116 i = pmap_pte_insert(ptegidx, &pte);
3120 TLBSYNC();
3121 SYNC();
3122 return (0);
3124 #endif /* PMAP_OEA64_BRIDGE */
3127 * This is not part of the defined PMAP interface and is specific to the
3128 * PowerPC architecture. This is called during initppc, before the system
3129 * is really initialized.
3131 void
3132 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
3134 struct mem_region *mp, tmp;
3135 paddr_t s, e;
3136 psize_t size;
3137 int i, j;
3140 * Get memory.
3142 mem_regions(&mem, &avail);
3143 #if defined(DEBUG)
3144 if (pmapdebug & PMAPDEBUG_BOOT) {
3145 printf("pmap_bootstrap: memory configuration:\n");
3146 for (mp = mem; mp->size; mp++) {
3147 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
3148 mp->start, mp->size);
3150 for (mp = avail; mp->size; mp++) {
3151 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
3152 mp->start, mp->size);
3155 #endif
3158 * Find out how much physical memory we have and in how many chunks.
3160 for (mem_cnt = 0, mp = mem; mp->size; mp++) {
3161 if (mp->start >= pmap_memlimit)
3162 continue;
3163 if (mp->start + mp->size > pmap_memlimit) {
3164 size = pmap_memlimit - mp->start;
3165 physmem += btoc(size);
3166 } else {
3167 physmem += btoc(mp->size);
3169 mem_cnt++;
3173 * Count the number of available entries.
3175 for (avail_cnt = 0, mp = avail; mp->size; mp++)
3176 avail_cnt++;
3179 * Page align all regions.
3181 kernelstart = trunc_page(kernelstart);
3182 kernelend = round_page(kernelend);
3183 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3184 s = round_page(mp->start);
3185 mp->size -= (s - mp->start);
3186 mp->size = trunc_page(mp->size);
3187 mp->start = s;
3188 e = mp->start + mp->size;
3190 DPRINTFN(BOOT,
3191 ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3192 i, mp->start, mp->size));
3195 * Don't allow the end to run beyond our artificial limit
3197 if (e > pmap_memlimit)
3198 e = pmap_memlimit;
3201 * Is this region empty or strange? skip it.
3203 if (e <= s) {
3204 mp->start = 0;
3205 mp->size = 0;
3206 continue;
3210 * Does this overlap the beginning of kernel?
3211 * Does extend past the end of the kernel?
3213 else if (s < kernelstart && e > kernelstart) {
3214 if (e > kernelend) {
3215 avail[avail_cnt].start = kernelend;
3216 avail[avail_cnt].size = e - kernelend;
3217 avail_cnt++;
3219 mp->size = kernelstart - s;
3222 * Check whether this region overlaps the end of the kernel.
3224 else if (s < kernelend && e > kernelend) {
3225 mp->start = kernelend;
3226 mp->size = e - kernelend;
3229 * Look whether this regions is completely inside the kernel.
3230 * Nuke it if it does.
3232 else if (s >= kernelstart && e <= kernelend) {
3233 mp->start = 0;
3234 mp->size = 0;
3237 * If the user imposed a memory limit, enforce it.
3239 else if (s >= pmap_memlimit) {
3240 mp->start = -PAGE_SIZE; /* let's know why */
3241 mp->size = 0;
3243 else {
3244 mp->start = s;
3245 mp->size = e - s;
3247 DPRINTFN(BOOT,
3248 ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3249 i, mp->start, mp->size));
3253 * Move (and uncount) all the null return to the end.
3255 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
3256 if (mp->size == 0) {
3257 tmp = avail[i];
3258 avail[i] = avail[--avail_cnt];
3259 avail[avail_cnt] = avail[i];
3264 * (Bubble)sort them into ascending order.
3266 for (i = 0; i < avail_cnt; i++) {
3267 for (j = i + 1; j < avail_cnt; j++) {
3268 if (avail[i].start > avail[j].start) {
3269 tmp = avail[i];
3270 avail[i] = avail[j];
3271 avail[j] = tmp;
3277 * Make sure they don't overlap.
3279 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
3280 if (mp[0].start + mp[0].size > mp[1].start) {
3281 mp[0].size = mp[1].start - mp[0].start;
3283 DPRINTFN(BOOT,
3284 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3285 i, mp->start, mp->size));
3287 DPRINTFN(BOOT,
3288 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
3289 i, mp->start, mp->size));
3291 #ifdef PTEGCOUNT
3292 pmap_pteg_cnt = PTEGCOUNT;
3293 #else /* PTEGCOUNT */
3295 pmap_pteg_cnt = 0x1000;
3297 while (pmap_pteg_cnt < physmem)
3298 pmap_pteg_cnt <<= 1;
3300 pmap_pteg_cnt >>= 1;
3301 #endif /* PTEGCOUNT */
3303 #ifdef DEBUG
3304 DPRINTFN(BOOT,
3305 ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt));
3306 #endif
3309 * Find suitably aligned memory for PTEG hash table.
3311 size = pmap_pteg_cnt * sizeof(struct pteg);
3312 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
3314 #ifdef DEBUG
3315 DPRINTFN(BOOT,
3316 ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table));
3317 #endif
3320 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3321 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
3322 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
3323 pmap_pteg_table, size);
3324 #endif
3326 memset(__UNVOLATILE(pmap_pteg_table), 0,
3327 pmap_pteg_cnt * sizeof(struct pteg));
3328 pmap_pteg_mask = pmap_pteg_cnt - 1;
3331 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
3332 * with pages. So we just steal them before giving them to UVM.
3334 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
3335 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
3336 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
3337 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
3338 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
3339 pmap_pvo_table, size);
3340 #endif
3342 for (i = 0; i < pmap_pteg_cnt; i++)
3343 TAILQ_INIT(&pmap_pvo_table[i]);
3345 #ifndef MSGBUFADDR
3347 * Allocate msgbuf in high memory.
3349 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
3350 #endif
3352 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
3353 paddr_t pfstart = atop(mp->start);
3354 paddr_t pfend = atop(mp->start + mp->size);
3355 if (mp->size == 0)
3356 continue;
3357 if (mp->start + mp->size <= SEGMENT_LENGTH) {
3358 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3359 VM_FREELIST_FIRST256);
3360 } else if (mp->start >= SEGMENT_LENGTH) {
3361 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3362 VM_FREELIST_DEFAULT);
3363 } else {
3364 pfend = atop(SEGMENT_LENGTH);
3365 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3366 VM_FREELIST_FIRST256);
3367 pfstart = atop(SEGMENT_LENGTH);
3368 pfend = atop(mp->start + mp->size);
3369 uvm_page_physload(pfstart, pfend, pfstart, pfend,
3370 VM_FREELIST_DEFAULT);
3375 * Make sure kernel vsid is allocated as well as VSID 0.
3377 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3378 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
3379 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
3380 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
3381 pmap_vsid_bitmap[0] |= 1;
3384 * Initialize kernel pmap and hardware.
3387 /* PMAP_OEA64_BRIDGE does support these instructions */
3388 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
3389 for (i = 0; i < 16; i++) {
3390 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
3391 __asm volatile ("mtsrin %0,%1"
3392 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
3395 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
3396 __asm volatile ("mtsr %0,%1"
3397 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
3398 #ifdef KERNEL2_SR
3399 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
3400 __asm volatile ("mtsr %0,%1"
3401 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
3402 #endif
3403 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
3404 #if defined (PMAP_OEA)
3405 for (i = 0; i < 16; i++) {
3406 if (iosrtable[i] & SR601_T) {
3407 pmap_kernel()->pm_sr[i] = iosrtable[i];
3408 __asm volatile ("mtsrin %0,%1"
3409 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
3412 __asm volatile ("sync; mtsdr1 %0; isync"
3413 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
3414 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
3415 __asm __volatile ("sync; mtsdr1 %0; isync"
3416 :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11))));
3417 #endif
3418 tlbia();
3420 #ifdef ALTIVEC
3421 pmap_use_altivec = cpu_altivec;
3422 #endif
3424 #ifdef DEBUG
3425 if (pmapdebug & PMAPDEBUG_BOOT) {
3426 u_int cnt;
3427 int bank;
3428 char pbuf[9];
3429 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
3430 cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
3431 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
3432 bank,
3433 ptoa(vm_physmem[bank].avail_start),
3434 ptoa(vm_physmem[bank].avail_end),
3435 ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
3437 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3438 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3439 pbuf, cnt);
3441 #endif
3443 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
3444 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
3445 &pmap_pool_uallocator, IPL_VM);
3447 pool_setlowat(&pmap_upvo_pool, 252);
3449 pool_init(&pmap_pool, sizeof(struct pmap),
3450 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
3451 IPL_NONE);
3453 #if defined(PMAP_NEED_MAPKERNEL) || 1
3455 struct pmap *pm = pmap_kernel();
3456 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3457 extern int etext[], kernel_text[];
3458 vaddr_t va, va_etext = (paddr_t) etext;
3459 #endif
3460 paddr_t pa, pa_end;
3461 register_t sr;
3462 struct pte pt;
3463 unsigned int ptegidx;
3464 int bank;
3466 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
3467 pm->pm_sr[0] = sr;
3469 for (bank = 0; bank < vm_nphysseg; bank++) {
3470 pa_end = ptoa(vm_physmem[bank].avail_end);
3471 pa = ptoa(vm_physmem[bank].avail_start);
3472 for (; pa < pa_end; pa += PAGE_SIZE) {
3473 ptegidx = va_to_pteg(pm, pa);
3474 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
3475 pmap_pte_insert(ptegidx, &pt);
3479 #if defined(PMAP_NEED_FULL_MAPKERNEL)
3480 va = (vaddr_t) kernel_text;
3482 for (pa = kernelstart; va < va_etext;
3483 pa += PAGE_SIZE, va += PAGE_SIZE) {
3484 ptegidx = va_to_pteg(pm, va);
3485 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3486 pmap_pte_insert(ptegidx, &pt);
3489 for (; pa < kernelend;
3490 pa += PAGE_SIZE, va += PAGE_SIZE) {
3491 ptegidx = va_to_pteg(pm, va);
3492 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3493 pmap_pte_insert(ptegidx, &pt);
3496 for (va = 0, pa = 0; va < kernelstart;
3497 pa += PAGE_SIZE, va += PAGE_SIZE) {
3498 ptegidx = va_to_pteg(pm, va);
3499 if (va < 0x3000)
3500 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
3501 else
3502 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3503 pmap_pte_insert(ptegidx, &pt);
3505 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
3506 pa += PAGE_SIZE, va += PAGE_SIZE) {
3507 ptegidx = va_to_pteg(pm, va);
3508 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
3509 pmap_pte_insert(ptegidx, &pt);
3511 #endif
3513 __asm volatile ("mtsrin %0,%1"
3514 :: "r"(sr), "r"(kernelstart));
3516 #endif