1 /* $NetBSD: pmap.c,v 1.210 2010/01/01 02:32:28 uebayasi Exp $ */
4 * Copyright 2003 Wasabi Systems, Inc.
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40 * Copyright (c) 2001 Richard Earnshaw
41 * Copyright (c) 2001-2002 Christopher Gilbert
42 * All rights reserved.
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. The name of the company nor the name of the author may be used to
50 * endorse or promote products derived from this software without specific
51 * prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * Copyright (c) 1999 The NetBSD Foundation, Inc.
68 * All rights reserved.
70 * This code is derived from software contributed to The NetBSD Foundation
71 * by Charles M. Hannum.
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions
76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution.
82 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
83 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
85 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
86 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
92 * POSSIBILITY OF SUCH DAMAGE.
96 * Copyright (c) 1994-1998 Mark Brinicombe.
97 * Copyright (c) 1994 Brini.
98 * All rights reserved.
100 * This code is derived from software written for Brini by Mark Brinicombe
102 * Redistribution and use in source and binary forms, with or without
103 * modification, are permitted provided that the following conditions
105 * 1. Redistributions of source code must retain the above copyright
106 * notice, this list of conditions and the following disclaimer.
107 * 2. Redistributions in binary form must reproduce the above copyright
108 * notice, this list of conditions and the following disclaimer in the
109 * documentation and/or other materials provided with the distribution.
110 * 3. All advertising materials mentioning features or use of this software
111 * must display the following acknowledgement:
112 * This product includes software developed by Mark Brinicombe.
113 * 4. The name of the author may not be used to endorse or promote products
114 * derived from this software without specific prior written permission.
116 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
117 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
118 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
119 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
120 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
121 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
122 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
123 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
124 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
126 * RiscBSD kernel project
130 * Machine dependant vm stuff
136 * armv6 and VIPT cache support by 3am Software Foundry,
137 * Copyright (c) 2007 Microsoft
141 * Performance improvements, UVM changes, overhauls and part-rewrites
142 * were contributed by Neil A. Carson <neil@causality.com>.
146 * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
147 * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
150 * There are still a few things outstanding at this time:
152 * - There are some unresolved issues for MP systems:
154 * o The L1 metadata needs a lock, or more specifically, some places
155 * need to acquire an exclusive lock when modifying L1 translation
158 * o When one cpu modifies an L1 entry, and that L1 table is also
159 * being used by another cpu, then the latter will need to be told
160 * that a tlb invalidation may be necessary. (But only if the old
161 * domain number in the L1 entry being over-written is currently
162 * the active domain on that cpu). I guess there are lots more tlb
163 * shootdown issues too...
165 * o If the vector_page is at 0x00000000 instead of 0xffff0000, then
166 * MP systems will lose big-time because of the MMU domain hack.
167 * The only way this can be solved (apart from moving the vector
168 * page to 0xffff0000) is to reserve the first 1MB of user address
169 * space for kernel use only. This would require re-linking all
170 * applications so that the text section starts above this 1MB
173 * o Tracking which VM space is resident in the cache/tlb has not yet
174 * been implemented for MP systems.
176 * o Finally, there is a pathological condition where two cpus running
177 * two separate processes (not lwps) which happen to share an L1
178 * can get into a fight over one or more L1 entries. This will result
179 * in a significant slow-down if both processes are in tight loops.
183 * Special compilation symbols
184 * PMAP_DEBUG - Build in pmap_debug_level code
187 /* Include header files */
189 #include "opt_cpuoptions.h"
190 #include "opt_pmap_debug.h"
192 #include "opt_lockdebug.h"
193 #include "opt_multiprocessor.h"
195 #include <sys/param.h>
196 #include <sys/types.h>
197 #include <sys/kernel.h>
198 #include <sys/systm.h>
199 #include <sys/proc.h>
200 #include <sys/malloc.h>
201 #include <sys/pool.h>
202 #include <sys/cdefs.h>
204 #include <sys/sysctl.h>
208 #include <machine/bus.h>
209 #include <machine/pmap.h>
210 #include <machine/pcb.h>
211 #include <machine/param.h>
212 #include <arm/arm32/katelib.h>
214 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.210 2010/01/01 02:32:28 uebayasi Exp $");
218 /* XXX need to get rid of all refs to this */
219 int pmap_debug_level
= 0;
222 * for switching to potentially finer grained debugging
224 #define PDB_FOLLOW 0x0001
225 #define PDB_INIT 0x0002
226 #define PDB_ENTER 0x0004
227 #define PDB_REMOVE 0x0008
228 #define PDB_CREATE 0x0010
229 #define PDB_PTPAGE 0x0020
230 #define PDB_GROWKERN 0x0040
231 #define PDB_BITS 0x0080
232 #define PDB_COLLECT 0x0100
233 #define PDB_PROTECT 0x0200
234 #define PDB_MAP_L1 0x0400
235 #define PDB_BOOTSTRAP 0x1000
236 #define PDB_PARANOIA 0x2000
237 #define PDB_WIRING 0x4000
238 #define PDB_PVDUMP 0x8000
239 #define PDB_VAC 0x10000
240 #define PDB_KENTER 0x20000
241 #define PDB_KREMOVE 0x40000
242 #define PDB_EXEC 0x80000
246 #define NPDEBUG(_lev_,_stat_) \
247 if (pmapdebug & (_lev_)) \
250 #else /* PMAP_DEBUG */
251 #define NPDEBUG(_lev_,_stat_) /* Nothing */
252 #endif /* PMAP_DEBUG */
255 * pmap_kernel() points here
257 static struct pmap kernel_pmap_store
;
258 struct pmap
*const kernel_pmap_ptr
= &kernel_pmap_store
;
261 * Which pmap is currently 'live' in the cache
263 * XXXSCW: Fix for SMP ...
265 static pmap_t pmap_recent_user
;
268 * Pointer to last active lwp, or NULL if it exited.
270 struct lwp
*pmap_previous_active_lwp
;
273 * Pool and cache that pmap structures are allocated from.
274 * We use a cache to avoid clearing the pm_l2[] array (1KB)
277 static struct pool_cache pmap_cache
;
278 static LIST_HEAD(, pmap
) pmap_pmaps
;
281 * Pool of PV structures
283 static struct pool pmap_pv_pool
;
284 static void *pmap_bootstrap_pv_page_alloc(struct pool
*, int);
285 static void pmap_bootstrap_pv_page_free(struct pool
*, void *);
286 static struct pool_allocator pmap_bootstrap_pv_allocator
= {
287 pmap_bootstrap_pv_page_alloc
, pmap_bootstrap_pv_page_free
291 * Pool and cache of l2_dtable structures.
292 * We use a cache to avoid clearing the structures when they're
293 * allocated. (196 bytes)
295 static struct pool_cache pmap_l2dtable_cache
;
296 static vaddr_t pmap_kernel_l2dtable_kva
;
299 * Pool and cache of L2 page descriptors.
300 * We use a cache to avoid clearing the descriptor table
301 * when they're allocated. (1KB)
303 static struct pool_cache pmap_l2ptp_cache
;
304 static vaddr_t pmap_kernel_l2ptp_kva
;
305 static paddr_t pmap_kernel_l2ptp_phys
;
308 #define PMAP_EVCNT_INITIALIZER(name) \
309 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
311 #ifdef PMAP_CACHE_VIPT
312 static struct evcnt pmap_ev_vac_clean_one
=
313 PMAP_EVCNT_INITIALIZER("clean page (1 color)");
314 static struct evcnt pmap_ev_vac_flush_one
=
315 PMAP_EVCNT_INITIALIZER("flush page (1 color)");
316 static struct evcnt pmap_ev_vac_flush_lots
=
317 PMAP_EVCNT_INITIALIZER("flush page (2+ colors)");
318 static struct evcnt pmap_ev_vac_flush_lots2
=
319 PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)");
320 EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one
);
321 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one
);
322 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots
);
323 EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2
);
325 static struct evcnt pmap_ev_vac_color_new
=
326 PMAP_EVCNT_INITIALIZER("new page color");
327 static struct evcnt pmap_ev_vac_color_reuse
=
328 PMAP_EVCNT_INITIALIZER("ok first page color");
329 static struct evcnt pmap_ev_vac_color_ok
=
330 PMAP_EVCNT_INITIALIZER("ok page color");
331 static struct evcnt pmap_ev_vac_color_blind
=
332 PMAP_EVCNT_INITIALIZER("blind page color");
333 static struct evcnt pmap_ev_vac_color_change
=
334 PMAP_EVCNT_INITIALIZER("change page color");
335 static struct evcnt pmap_ev_vac_color_erase
=
336 PMAP_EVCNT_INITIALIZER("erase page color");
337 static struct evcnt pmap_ev_vac_color_none
=
338 PMAP_EVCNT_INITIALIZER("no page color");
339 static struct evcnt pmap_ev_vac_color_restore
=
340 PMAP_EVCNT_INITIALIZER("restore page color");
342 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new
);
343 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse
);
344 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok
);
345 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind
);
346 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change
);
347 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase
);
348 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none
);
349 EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore
);
352 static struct evcnt pmap_ev_mappings
=
353 PMAP_EVCNT_INITIALIZER("pages mapped");
354 static struct evcnt pmap_ev_unmappings
=
355 PMAP_EVCNT_INITIALIZER("pages unmapped");
356 static struct evcnt pmap_ev_remappings
=
357 PMAP_EVCNT_INITIALIZER("pages remapped");
359 EVCNT_ATTACH_STATIC(pmap_ev_mappings
);
360 EVCNT_ATTACH_STATIC(pmap_ev_unmappings
);
361 EVCNT_ATTACH_STATIC(pmap_ev_remappings
);
363 static struct evcnt pmap_ev_kernel_mappings
=
364 PMAP_EVCNT_INITIALIZER("kernel pages mapped");
365 static struct evcnt pmap_ev_kernel_unmappings
=
366 PMAP_EVCNT_INITIALIZER("kernel pages unmapped");
367 static struct evcnt pmap_ev_kernel_remappings
=
368 PMAP_EVCNT_INITIALIZER("kernel pages remapped");
370 EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings
);
371 EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings
);
372 EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings
);
374 static struct evcnt pmap_ev_kenter_mappings
=
375 PMAP_EVCNT_INITIALIZER("kenter pages mapped");
376 static struct evcnt pmap_ev_kenter_unmappings
=
377 PMAP_EVCNT_INITIALIZER("kenter pages unmapped");
378 static struct evcnt pmap_ev_kenter_remappings
=
379 PMAP_EVCNT_INITIALIZER("kenter pages remapped");
380 static struct evcnt pmap_ev_pt_mappings
=
381 PMAP_EVCNT_INITIALIZER("page table pages mapped");
383 EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings
);
384 EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings
);
385 EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings
);
386 EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings
);
388 #ifdef PMAP_CACHE_VIPT
389 static struct evcnt pmap_ev_exec_mappings
=
390 PMAP_EVCNT_INITIALIZER("exec pages mapped");
391 static struct evcnt pmap_ev_exec_cached
=
392 PMAP_EVCNT_INITIALIZER("exec pages cached");
394 EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings
);
395 EVCNT_ATTACH_STATIC(pmap_ev_exec_cached
);
397 static struct evcnt pmap_ev_exec_synced
=
398 PMAP_EVCNT_INITIALIZER("exec pages synced");
399 static struct evcnt pmap_ev_exec_synced_map
=
400 PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
401 static struct evcnt pmap_ev_exec_synced_unmap
=
402 PMAP_EVCNT_INITIALIZER("exec pages synced (UM)");
403 static struct evcnt pmap_ev_exec_synced_remap
=
404 PMAP_EVCNT_INITIALIZER("exec pages synced (RM)");
405 static struct evcnt pmap_ev_exec_synced_clearbit
=
406 PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
407 static struct evcnt pmap_ev_exec_synced_kremove
=
408 PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
410 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced
);
411 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map
);
412 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap
);
413 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap
);
414 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit
);
415 EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove
);
417 static struct evcnt pmap_ev_exec_discarded_unmap
=
418 PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
419 static struct evcnt pmap_ev_exec_discarded_zero
=
420 PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)");
421 static struct evcnt pmap_ev_exec_discarded_copy
=
422 PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)");
423 static struct evcnt pmap_ev_exec_discarded_page_protect
=
424 PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)");
425 static struct evcnt pmap_ev_exec_discarded_clearbit
=
426 PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
427 static struct evcnt pmap_ev_exec_discarded_kremove
=
428 PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
430 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap
);
431 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero
);
432 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy
);
433 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect
);
434 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit
);
435 EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove
);
436 #endif /* PMAP_CACHE_VIPT */
438 static struct evcnt pmap_ev_updates
= PMAP_EVCNT_INITIALIZER("updates");
439 static struct evcnt pmap_ev_collects
= PMAP_EVCNT_INITIALIZER("collects");
440 static struct evcnt pmap_ev_activations
= PMAP_EVCNT_INITIALIZER("activations");
442 EVCNT_ATTACH_STATIC(pmap_ev_updates
);
443 EVCNT_ATTACH_STATIC(pmap_ev_collects
);
444 EVCNT_ATTACH_STATIC(pmap_ev_activations
);
446 #define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++))
448 #define PMAPCOUNT(x) ((void)0)
452 * pmap copy/zero page, and mem(5) hook point
454 static pt_entry_t
*csrc_pte
, *cdst_pte
;
455 static vaddr_t csrcp
, cdstp
;
456 vaddr_t memhook
; /* used by mem.c */
457 kmutex_t memlock
; /* used by mem.c */
458 void *zeropage
; /* used by mem.c */
459 extern void *msgbufaddr
;
462 * Flag to indicate if pmap_init() has done its thing
464 bool pmap_initialized
;
467 * Misc. locking data structures
470 #if 0 /* defined(MULTIPROCESSOR) || defined(LOCKDEBUG) */
471 static struct lock pmap_main_lock
;
473 #define PMAP_MAP_TO_HEAD_LOCK() \
474 (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
475 #define PMAP_MAP_TO_HEAD_UNLOCK() \
476 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
477 #define PMAP_HEAD_TO_MAP_LOCK() \
478 (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
479 #define PMAP_HEAD_TO_MAP_UNLOCK() \
480 spinlockmgr(&pmap_main_lock, LK_RELEASE, (void *) 0)
482 #define PMAP_MAP_TO_HEAD_LOCK() /* null */
483 #define PMAP_MAP_TO_HEAD_UNLOCK() /* null */
484 #define PMAP_HEAD_TO_MAP_LOCK() /* null */
485 #define PMAP_HEAD_TO_MAP_UNLOCK() /* null */
488 #define pmap_acquire_pmap_lock(pm) \
490 if ((pm) != pmap_kernel()) \
491 mutex_enter(&(pm)->pm_lock); \
492 } while (/*CONSTCOND*/0)
494 #define pmap_release_pmap_lock(pm) \
496 if ((pm) != pmap_kernel()) \
497 mutex_exit(&(pm)->pm_lock); \
498 } while (/*CONSTCOND*/0)
502 * Metadata for L1 translation tables.
505 /* Entry on the L1 Table list */
506 SLIST_ENTRY(l1_ttable
) l1_link
;
508 /* Entry on the L1 Least Recently Used list */
509 TAILQ_ENTRY(l1_ttable
) l1_lru
;
511 /* Track how many domains are allocated from this L1 */
512 volatile u_int l1_domain_use_count
;
515 * A free-list of domain numbers for this L1.
516 * We avoid using ffs() and a bitmap to track domains since ffs()
519 u_int8_t l1_domain_first
;
520 u_int8_t l1_domain_free
[PMAP_DOMAINS
];
522 /* Physical address of this L1 page table */
525 /* KVA of this L1 page table */
530 * Convert a virtual address into its L1 table index. That is, the
531 * index used to locate the L2 descriptor table pointer in an L1 table.
532 * This is basically used to index l1->l1_kva[].
534 * Each L2 descriptor table represents 1MB of VA space.
536 #define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT)
539 * L1 Page Tables are tracked using a Least Recently Used list.
540 * - New L1s are allocated from the HEAD.
541 * - Freed L1s are added to the TAIl.
542 * - Recently accessed L1s (where an 'access' is some change to one of
543 * the userland pmaps which owns this L1) are moved to the TAIL.
545 static TAILQ_HEAD(, l1_ttable
) l1_lru_list
;
546 static struct simplelock l1_lru_lock
;
549 * A list of all L1 tables
551 static SLIST_HEAD(, l1_ttable
) l1_list
;
554 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
556 * This is normally 16MB worth L2 page descriptors for any given pmap.
557 * Reference counts are maintained for L2 descriptors so they can be
561 /* The number of L2 page descriptors allocated to this l2_dtable */
564 /* List of L2 page descriptors */
566 pt_entry_t
*l2b_kva
; /* KVA of L2 Descriptor Table */
567 paddr_t l2b_phys
; /* Physical address of same */
568 u_short l2b_l1idx
; /* This L2 table's L1 index */
569 u_short l2b_occupancy
; /* How many active descriptors */
570 } l2_bucket
[L2_BUCKET_SIZE
];
574 * Given an L1 table index, calculate the corresponding l2_dtable index
575 * and bucket index within the l2_dtable.
577 #define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
579 #define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
582 * Given a virtual address, this macro returns the
583 * virtual address required to drop into the next L2 bucket.
585 #define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
590 #define pmap_alloc_l2_dtable() \
591 pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
592 #define pmap_free_l2_dtable(l2) \
593 pool_cache_put(&pmap_l2dtable_cache, (l2))
594 #define pmap_alloc_l2_ptp(pap) \
595 ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
599 * We try to map the page tables write-through, if possible. However, not
600 * all CPUs have a write-through cache mode, so on those we have to sync
601 * the cache when we frob page tables.
603 * We try to evaluate this at compile time, if possible. However, it's
604 * not always possible to do that, hence this run-time var.
606 int pmap_needs_pte_sync
;
609 * Real definition of pv_entry.
612 SLIST_ENTRY(pv_entry
) pv_link
; /* next pv_entry */
613 pmap_t pv_pmap
; /* pmap where mapping lies */
614 vaddr_t pv_va
; /* virtual address for mapping */
615 u_int pv_flags
; /* flags */
619 * Macro to determine if a mapping might be resident in the
620 * instruction cache and/or TLB
622 #define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
623 #define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0)
626 * Macro to determine if a mapping might be resident in the
627 * data cache and/or TLB
629 #define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
634 static int pmap_set_pt_cache_mode(pd_entry_t
*, vaddr_t
);
635 static void pmap_alloc_specials(vaddr_t
*, int, vaddr_t
*,
637 static bool pmap_is_current(pmap_t
);
638 static bool pmap_is_cached(pmap_t
);
639 static void pmap_enter_pv(struct vm_page
*, struct pv_entry
*,
640 pmap_t
, vaddr_t
, u_int
);
641 static struct pv_entry
*pmap_find_pv(struct vm_page
*, pmap_t
, vaddr_t
);
642 static struct pv_entry
*pmap_remove_pv(struct vm_page
*, pmap_t
, vaddr_t
);
643 static u_int
pmap_modify_pv(struct vm_page
*, pmap_t
, vaddr_t
,
646 static void pmap_pinit(pmap_t
);
647 static int pmap_pmap_ctor(void *, void *, int);
649 static void pmap_alloc_l1(pmap_t
);
650 static void pmap_free_l1(pmap_t
);
651 static void pmap_use_l1(pmap_t
);
653 static struct l2_bucket
*pmap_get_l2_bucket(pmap_t
, vaddr_t
);
654 static struct l2_bucket
*pmap_alloc_l2_bucket(pmap_t
, vaddr_t
);
655 static void pmap_free_l2_bucket(pmap_t
, struct l2_bucket
*, u_int
);
656 static int pmap_l2ptp_ctor(void *, void *, int);
657 static int pmap_l2dtable_ctor(void *, void *, int);
659 static void pmap_vac_me_harder(struct vm_page
*, pmap_t
, vaddr_t
);
660 #ifdef PMAP_CACHE_VIVT
661 static void pmap_vac_me_kpmap(struct vm_page
*, pmap_t
, vaddr_t
);
662 static void pmap_vac_me_user(struct vm_page
*, pmap_t
, vaddr_t
);
665 static void pmap_clearbit(struct vm_page
*, u_int
);
666 #ifdef PMAP_CACHE_VIVT
667 static int pmap_clean_page(struct pv_entry
*, bool);
669 #ifdef PMAP_CACHE_VIPT
670 static void pmap_syncicache_page(struct vm_page
*);
673 PMAP_FLUSH_SECONDARY
,
676 static void pmap_flush_page(struct vm_page
*, enum pmap_flush_op
);
678 static void pmap_page_remove(struct vm_page
*);
680 static void pmap_init_l1(struct l1_ttable
*, pd_entry_t
*);
681 static vaddr_t
kernel_pt_lookup(paddr_t
);
685 * External function prototypes
687 extern void bzero_page(vaddr_t
);
688 extern void bcopy_page(vaddr_t
, vaddr_t
);
693 vaddr_t virtual_avail
;
695 vaddr_t pmap_curmaxkvaddr
;
700 pv_addrqh_t pmap_boot_freeq
= SLIST_HEAD_INITIALIZER(&pmap_boot_freeq
);
701 pv_addr_t kernelpages
;
702 pv_addr_t kernel_l1pt
;
703 pv_addr_t systempage
;
705 /* Function to set the debug level of the pmap code */
709 pmap_debug(int level
)
711 pmap_debug_level
= level
;
712 printf("pmap_debug: level=%d\n", pmap_debug_level
);
714 #endif /* PMAP_DEBUG */
717 * A bunch of routines to conditionally flush the caches/TLB depending
718 * on whether the specified pmap actually needs to be flushed at any
722 pmap_tlb_flushID_SE(pmap_t pm
, vaddr_t va
)
725 if (pm
->pm_cstate
.cs_tlb_id
)
726 cpu_tlb_flushID_SE(va
);
730 pmap_tlb_flushD_SE(pmap_t pm
, vaddr_t va
)
733 if (pm
->pm_cstate
.cs_tlb_d
)
734 cpu_tlb_flushD_SE(va
);
738 pmap_tlb_flushID(pmap_t pm
)
741 if (pm
->pm_cstate
.cs_tlb_id
) {
743 pm
->pm_cstate
.cs_tlb
= 0;
748 pmap_tlb_flushD(pmap_t pm
)
751 if (pm
->pm_cstate
.cs_tlb_d
) {
753 pm
->pm_cstate
.cs_tlb_d
= 0;
757 #ifdef PMAP_CACHE_VIVT
759 pmap_idcache_wbinv_range(pmap_t pm
, vaddr_t va
, vsize_t len
)
761 if (pm
->pm_cstate
.cs_cache_id
) {
762 cpu_idcache_wbinv_range(va
, len
);
767 pmap_dcache_wb_range(pmap_t pm
, vaddr_t va
, vsize_t len
,
768 bool do_inv
, bool rd_only
)
771 if (pm
->pm_cstate
.cs_cache_d
) {
774 cpu_dcache_inv_range(va
, len
);
776 cpu_dcache_wbinv_range(va
, len
);
779 cpu_dcache_wb_range(va
, len
);
784 pmap_idcache_wbinv_all(pmap_t pm
)
786 if (pm
->pm_cstate
.cs_cache_id
) {
787 cpu_idcache_wbinv_all();
788 pm
->pm_cstate
.cs_cache
= 0;
793 pmap_dcache_wbinv_all(pmap_t pm
)
795 if (pm
->pm_cstate
.cs_cache_d
) {
796 cpu_dcache_wbinv_all();
797 pm
->pm_cstate
.cs_cache_d
= 0;
800 #endif /* PMAP_CACHE_VIVT */
803 pmap_is_current(pmap_t pm
)
806 if (pm
== pmap_kernel() || curproc
->p_vmspace
->vm_map
.pmap
== pm
)
813 pmap_is_cached(pmap_t pm
)
816 if (pm
== pmap_kernel() || pmap_recent_user
== NULL
||
817 pmap_recent_user
== pm
)
826 * Make sure the pte is written out to RAM.
827 * We need to do this for one of two cases:
828 * - We're dealing with the kernel pmap
829 * - There is no pmap active in the cache/tlb.
830 * - The specified pmap is 'active' in the cache/tlb.
832 #ifdef PMAP_INCLUDE_PTE_SYNC
833 #define PTE_SYNC_CURRENT(pm, ptep) \
835 if (PMAP_NEEDS_PTE_SYNC && \
836 pmap_is_cached(pm)) \
838 } while (/*CONSTCOND*/0)
840 #define PTE_SYNC_CURRENT(pm, ptep) /* nothing */
844 * main pv_entry manipulation functions:
845 * pmap_enter_pv: enter a mapping onto a vm_page list
846 * pmap_remove_pv: remove a mappiing from a vm_page list
848 * NOTE: pmap_enter_pv expects to lock the pvh itself
849 * pmap_remove_pv expects te caller to lock the pvh before calling
853 * pmap_enter_pv: enter a mapping onto a vm_page lst
855 * => caller should hold the proper lock on pmap_main_lock
856 * => caller should have pmap locked
857 * => we will gain the lock on the vm_page and allocate the new pv_entry
858 * => caller should adjust ptp's wire_count before calling
859 * => caller should not adjust pmap's wire_count
862 pmap_enter_pv(struct vm_page
*pg
, struct pv_entry
*pv
, pmap_t pm
,
863 vaddr_t va
, u_int flags
)
865 struct pv_entry
**pvp
;
868 printf("pmap_enter_pv: pm %p, pg %p, flags 0x%x\n", pm
, pg
, flags
));
872 pv
->pv_flags
= flags
;
874 simple_lock(&pg
->mdpage
.pvh_slock
); /* lock vm_page */
875 pvp
= &SLIST_FIRST(&pg
->mdpage
.pvh_list
);
876 #ifdef PMAP_CACHE_VIPT
878 * Insert unmanaged entries, writeable first, at the head of
881 if (__predict_true((flags
& PVF_KENTRY
) == 0)) {
882 while (*pvp
!= NULL
&& (*pvp
)->pv_flags
& PVF_KENTRY
)
883 pvp
= &SLIST_NEXT(*pvp
, pv_link
);
884 } else if ((flags
& PVF_WRITE
) == 0) {
885 while (*pvp
!= NULL
&& (*pvp
)->pv_flags
& PVF_WRITE
)
886 pvp
= &SLIST_NEXT(*pvp
, pv_link
);
889 SLIST_NEXT(pv
, pv_link
) = *pvp
; /* add to ... */
890 *pvp
= pv
; /* ... locked list */
891 pg
->mdpage
.pvh_attrs
|= flags
& (PVF_REF
| PVF_MOD
);
892 #ifdef PMAP_CACHE_VIPT
893 if ((pv
->pv_flags
& PVF_KWRITE
) == PVF_KWRITE
)
894 pg
->mdpage
.pvh_attrs
|= PVF_KMOD
;
895 if ((pg
->mdpage
.pvh_attrs
& (PVF_DMOD
|PVF_NC
)) != PVF_NC
)
896 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
897 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
899 if (pm
== pmap_kernel()) {
900 PMAPCOUNT(kernel_mappings
);
901 if (flags
& PVF_WRITE
)
902 pg
->mdpage
.krw_mappings
++;
904 pg
->mdpage
.kro_mappings
++;
906 if (flags
& PVF_WRITE
)
907 pg
->mdpage
.urw_mappings
++;
909 pg
->mdpage
.uro_mappings
++;
912 #ifdef PMAP_CACHE_VIPT
914 * If this is an exec mapping and its the first exec mapping
915 * for this page, make sure to sync the I-cache.
917 if (PV_IS_EXEC_P(flags
)) {
918 if (!PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
)) {
919 pmap_syncicache_page(pg
);
920 PMAPCOUNT(exec_synced_map
);
922 PMAPCOUNT(exec_mappings
);
927 simple_unlock(&pg
->mdpage
.pvh_slock
); /* unlock, done! */
929 if (pv
->pv_flags
& PVF_WIRED
)
930 ++pm
->pm_stats
.wired_count
;
935 * pmap_find_pv: Find a pv entry
937 * => caller should hold lock on vm_page
939 static inline struct pv_entry
*
940 pmap_find_pv(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
944 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
945 if (pm
== pv
->pv_pmap
&& va
== pv
->pv_va
)
953 * pmap_remove_pv: try to remove a mapping from a pv_list
955 * => caller should hold proper lock on pmap_main_lock
956 * => pmap should be locked
957 * => caller should hold lock on vm_page [so that attrs can be adjusted]
958 * => caller should adjust ptp's wire_count and free PTP if needed
959 * => caller should NOT adjust pmap's wire_count
960 * => we return the removed pv
962 static struct pv_entry
*
963 pmap_remove_pv(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
965 struct pv_entry
*pv
, **prevptr
;
968 printf("pmap_remove_pv: pm %p, pg %p, va 0x%08lx\n", pm
, pg
, va
));
970 prevptr
= &SLIST_FIRST(&pg
->mdpage
.pvh_list
); /* prev pv_entry ptr */
974 if (pv
->pv_pmap
== pm
&& pv
->pv_va
== va
) { /* match? */
975 NPDEBUG(PDB_PVDUMP
, printf("pmap_remove_pv: pm %p, pg "
976 "%p, flags 0x%x\n", pm
, pg
, pv
->pv_flags
));
977 if (pv
->pv_flags
& PVF_WIRED
) {
978 --pm
->pm_stats
.wired_count
;
980 *prevptr
= SLIST_NEXT(pv
, pv_link
); /* remove it! */
981 if (pm
== pmap_kernel()) {
982 PMAPCOUNT(kernel_unmappings
);
983 if (pv
->pv_flags
& PVF_WRITE
)
984 pg
->mdpage
.krw_mappings
--;
986 pg
->mdpage
.kro_mappings
--;
988 if (pv
->pv_flags
& PVF_WRITE
)
989 pg
->mdpage
.urw_mappings
--;
991 pg
->mdpage
.uro_mappings
--;
994 PMAPCOUNT(unmappings
);
995 #ifdef PMAP_CACHE_VIPT
996 if (!(pv
->pv_flags
& PVF_WRITE
))
999 * If this page has had an exec mapping, then if
1000 * this was the last mapping, discard the contents,
1001 * otherwise sync the i-cache for this page.
1003 if (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
)) {
1004 if (SLIST_EMPTY(&pg
->mdpage
.pvh_list
)) {
1005 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
1006 PMAPCOUNT(exec_discarded_unmap
);
1008 pmap_syncicache_page(pg
);
1009 PMAPCOUNT(exec_synced_unmap
);
1012 #endif /* PMAP_CACHE_VIPT */
1015 prevptr
= &SLIST_NEXT(pv
, pv_link
); /* previous pointer */
1016 pv
= *prevptr
; /* advance */
1019 #ifdef PMAP_CACHE_VIPT
1021 * If we no longer have a WRITEABLE KENTRY at the head of list,
1022 * clear the KMOD attribute from the page.
1024 if (SLIST_FIRST(&pg
->mdpage
.pvh_list
) == NULL
1025 || (SLIST_FIRST(&pg
->mdpage
.pvh_list
)->pv_flags
& PVF_KWRITE
) != PVF_KWRITE
)
1026 pg
->mdpage
.pvh_attrs
&= ~PVF_KMOD
;
1029 * If this was a writeable page and there are no more writeable
1030 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
1031 * the contents to memory.
1033 if (pg
->mdpage
.krw_mappings
+ pg
->mdpage
.urw_mappings
== 0)
1034 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
1035 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1036 #endif /* PMAP_CACHE_VIPT */
1038 return(pv
); /* return removed pv */
1043 * pmap_modify_pv: Update pv flags
1045 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1046 * => caller should NOT adjust pmap's wire_count
1047 * => caller must call pmap_vac_me_harder() if writable status of a page
1049 * => we return the old flags
1051 * Modify a physical-virtual mapping in the pv table
1054 pmap_modify_pv(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
,
1055 u_int clr_mask
, u_int set_mask
)
1057 struct pv_entry
*npv
;
1058 u_int flags
, oflags
;
1060 KASSERT((clr_mask
& PVF_KENTRY
) == 0);
1061 KASSERT((set_mask
& PVF_KENTRY
) == 0);
1063 if ((npv
= pmap_find_pv(pg
, pm
, va
)) == NULL
)
1067 printf("pmap_modify_pv: pm %p, pg %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm
, pg
, clr_mask
, set_mask
, npv
->pv_flags
));
1070 * There is at least one VA mapping this page.
1073 if (clr_mask
& (PVF_REF
| PVF_MOD
)) {
1074 pg
->mdpage
.pvh_attrs
|= set_mask
& (PVF_REF
| PVF_MOD
);
1075 #ifdef PMAP_CACHE_VIPT
1076 if ((pg
->mdpage
.pvh_attrs
& (PVF_DMOD
|PVF_NC
)) != PVF_NC
)
1077 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
1078 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1082 oflags
= npv
->pv_flags
;
1083 npv
->pv_flags
= flags
= (oflags
& ~clr_mask
) | set_mask
;
1085 if ((flags
^ oflags
) & PVF_WIRED
) {
1086 if (flags
& PVF_WIRED
)
1087 ++pm
->pm_stats
.wired_count
;
1089 --pm
->pm_stats
.wired_count
;
1092 if ((flags
^ oflags
) & PVF_WRITE
) {
1093 if (pm
== pmap_kernel()) {
1094 if (flags
& PVF_WRITE
) {
1095 pg
->mdpage
.krw_mappings
++;
1096 pg
->mdpage
.kro_mappings
--;
1098 pg
->mdpage
.kro_mappings
++;
1099 pg
->mdpage
.krw_mappings
--;
1102 if (flags
& PVF_WRITE
) {
1103 pg
->mdpage
.urw_mappings
++;
1104 pg
->mdpage
.uro_mappings
--;
1106 pg
->mdpage
.uro_mappings
++;
1107 pg
->mdpage
.urw_mappings
--;
1111 #ifdef PMAP_CACHE_VIPT
1112 if (pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
== 0)
1113 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
1115 * We have two cases here: the first is from enter_pv (new exec
1116 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
1117 * Since in latter, pmap_enter_pv won't do anything, we just have
1118 * to do what pmap_remove_pv would do.
1120 if ((PV_IS_EXEC_P(flags
) && !PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
))
1121 || (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
)
1122 || (!(flags
& PVF_WRITE
) && (oflags
& PVF_WRITE
)))) {
1123 pmap_syncicache_page(pg
);
1124 PMAPCOUNT(exec_synced_remap
);
1126 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1129 PMAPCOUNT(remappings
);
1135 * Allocate an L1 translation table for the specified pmap.
1136 * This is called at pmap creation time.
1139 pmap_alloc_l1(pmap_t pm
)
1141 struct l1_ttable
*l1
;
1145 * Remove the L1 at the head of the LRU list
1147 simple_lock(&l1_lru_lock
);
1148 l1
= TAILQ_FIRST(&l1_lru_list
);
1149 KDASSERT(l1
!= NULL
);
1150 TAILQ_REMOVE(&l1_lru_list
, l1
, l1_lru
);
1153 * Pick the first available domain number, and update
1154 * the link to the next number.
1156 domain
= l1
->l1_domain_first
;
1157 l1
->l1_domain_first
= l1
->l1_domain_free
[domain
];
1160 * If there are still free domain numbers in this L1,
1161 * put it back on the TAIL of the LRU list.
1163 if (++l1
->l1_domain_use_count
< PMAP_DOMAINS
)
1164 TAILQ_INSERT_TAIL(&l1_lru_list
, l1
, l1_lru
);
1166 simple_unlock(&l1_lru_lock
);
1169 * Fix up the relevant bits in the pmap structure
1172 pm
->pm_domain
= domain
;
1176 * Free an L1 translation table.
1177 * This is called at pmap destruction time.
1180 pmap_free_l1(pmap_t pm
)
1182 struct l1_ttable
*l1
= pm
->pm_l1
;
1184 simple_lock(&l1_lru_lock
);
1187 * If this L1 is currently on the LRU list, remove it.
1189 if (l1
->l1_domain_use_count
< PMAP_DOMAINS
)
1190 TAILQ_REMOVE(&l1_lru_list
, l1
, l1_lru
);
1193 * Free up the domain number which was allocated to the pmap
1195 l1
->l1_domain_free
[pm
->pm_domain
] = l1
->l1_domain_first
;
1196 l1
->l1_domain_first
= pm
->pm_domain
;
1197 l1
->l1_domain_use_count
--;
1200 * The L1 now must have at least 1 free domain, so add
1201 * it back to the LRU list. If the use count is zero,
1202 * put it at the head of the list, otherwise it goes
1205 if (l1
->l1_domain_use_count
== 0)
1206 TAILQ_INSERT_HEAD(&l1_lru_list
, l1
, l1_lru
);
1208 TAILQ_INSERT_TAIL(&l1_lru_list
, l1
, l1_lru
);
1210 simple_unlock(&l1_lru_lock
);
1214 pmap_use_l1(pmap_t pm
)
1216 struct l1_ttable
*l1
;
1219 * Do nothing if we're in interrupt context.
1220 * Access to an L1 by the kernel pmap must not affect
1223 if (cpu_intr_p() || pm
== pmap_kernel())
1229 * If the L1 is not currently on the LRU list, just return
1231 if (l1
->l1_domain_use_count
== PMAP_DOMAINS
)
1234 simple_lock(&l1_lru_lock
);
1237 * Check the use count again, now that we've acquired the lock
1239 if (l1
->l1_domain_use_count
== PMAP_DOMAINS
) {
1240 simple_unlock(&l1_lru_lock
);
1245 * Move the L1 to the back of the LRU list
1247 TAILQ_REMOVE(&l1_lru_list
, l1
, l1_lru
);
1248 TAILQ_INSERT_TAIL(&l1_lru_list
, l1
, l1_lru
);
1250 simple_unlock(&l1_lru_lock
);
1254 * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
1256 * Free an L2 descriptor table.
1259 #ifndef PMAP_INCLUDE_PTE_SYNC
1260 pmap_free_l2_ptp(pt_entry_t
*l2
, paddr_t pa
)
1262 pmap_free_l2_ptp(bool need_sync
, pt_entry_t
*l2
, paddr_t pa
)
1265 #ifdef PMAP_INCLUDE_PTE_SYNC
1266 #ifdef PMAP_CACHE_VIVT
1268 * Note: With a write-back cache, we may need to sync this
1269 * L2 table before re-using it.
1270 * This is because it may have belonged to a non-current
1271 * pmap, in which case the cache syncs would have been
1272 * skipped for the pages that were being unmapped. If the
1273 * L2 table were then to be immediately re-allocated to
1274 * the *current* pmap, it may well contain stale mappings
1275 * which have not yet been cleared by a cache write-back
1276 * and so would still be visible to the mmu.
1279 PTE_SYNC_RANGE(l2
, L2_TABLE_SIZE_REAL
/ sizeof(pt_entry_t
));
1280 #endif /* PMAP_CACHE_VIVT */
1281 #endif /* PMAP_INCLUDE_PTE_SYNC */
1282 pool_cache_put_paddr(&pmap_l2ptp_cache
, (void *)l2
, pa
);
1286 * Returns a pointer to the L2 bucket associated with the specified pmap
1287 * and VA, or NULL if no L2 bucket exists for the address.
1289 static inline struct l2_bucket
*
1290 pmap_get_l2_bucket(pmap_t pm
, vaddr_t va
)
1292 struct l2_dtable
*l2
;
1293 struct l2_bucket
*l2b
;
1298 if ((l2
= pm
->pm_l2
[L2_IDX(l1idx
)]) == NULL
||
1299 (l2b
= &l2
->l2_bucket
[L2_BUCKET(l1idx
)])->l2b_kva
== NULL
)
1306 * Returns a pointer to the L2 bucket associated with the specified pmap
1309 * If no L2 bucket exists, perform the necessary allocations to put an L2
1310 * bucket/page table in place.
1312 * Note that if a new L2 bucket/page was allocated, the caller *must*
1313 * increment the bucket occupancy counter appropriately *before*
1314 * releasing the pmap's lock to ensure no other thread or cpu deallocates
1315 * the bucket/page in the meantime.
1317 static struct l2_bucket
*
1318 pmap_alloc_l2_bucket(pmap_t pm
, vaddr_t va
)
1320 struct l2_dtable
*l2
;
1321 struct l2_bucket
*l2b
;
1326 if ((l2
= pm
->pm_l2
[L2_IDX(l1idx
)]) == NULL
) {
1328 * No mapping at this address, as there is
1329 * no entry in the L1 table.
1330 * Need to allocate a new l2_dtable.
1332 if ((l2
= pmap_alloc_l2_dtable()) == NULL
)
1336 * Link it into the parent pmap
1338 pm
->pm_l2
[L2_IDX(l1idx
)] = l2
;
1341 l2b
= &l2
->l2_bucket
[L2_BUCKET(l1idx
)];
1344 * Fetch pointer to the L2 page table associated with the address.
1346 if (l2b
->l2b_kva
== NULL
) {
1350 * No L2 page table has been allocated. Chances are, this
1351 * is because we just allocated the l2_dtable, above.
1353 if ((ptep
= pmap_alloc_l2_ptp(&l2b
->l2b_phys
)) == NULL
) {
1355 * Oops, no more L2 page tables available at this
1356 * time. We may need to deallocate the l2_dtable
1357 * if we allocated a new one above.
1359 if (l2
->l2_occupancy
== 0) {
1360 pm
->pm_l2
[L2_IDX(l1idx
)] = NULL
;
1361 pmap_free_l2_dtable(l2
);
1367 l2b
->l2b_kva
= ptep
;
1368 l2b
->l2b_l1idx
= l1idx
;
1375 * One or more mappings in the specified L2 descriptor table have just been
1378 * Garbage collect the metadata and descriptor table itself if necessary.
1380 * The pmap lock must be acquired when this is called (not necessary
1381 * for the kernel pmap).
1384 pmap_free_l2_bucket(pmap_t pm
, struct l2_bucket
*l2b
, u_int count
)
1386 struct l2_dtable
*l2
;
1387 pd_entry_t
*pl1pd
, l1pd
;
1391 KDASSERT(count
<= l2b
->l2b_occupancy
);
1394 * Update the bucket's reference count according to how many
1395 * PTEs the caller has just invalidated.
1397 l2b
->l2b_occupancy
-= count
;
1402 * Level 2 page tables allocated to the kernel pmap are never freed
1403 * as that would require checking all Level 1 page tables and
1404 * removing any references to the Level 2 page table. See also the
1405 * comment elsewhere about never freeing bootstrap L2 descriptors.
1407 * We make do with just invalidating the mapping in the L2 table.
1409 * This isn't really a big deal in practice and, in fact, leads
1410 * to a performance win over time as we don't need to continually
1413 if (l2b
->l2b_occupancy
> 0 || pm
== pmap_kernel())
1417 * There are no more valid mappings in this level 2 page table.
1418 * Go ahead and NULL-out the pointer in the bucket, then
1419 * free the page table.
1421 l1idx
= l2b
->l2b_l1idx
;
1422 ptep
= l2b
->l2b_kva
;
1423 l2b
->l2b_kva
= NULL
;
1425 pl1pd
= &pm
->pm_l1
->l1_kva
[l1idx
];
1428 * If the L1 slot matches the pmap's domain
1429 * number, then invalidate it.
1431 l1pd
= *pl1pd
& (L1_TYPE_MASK
| L1_C_DOM_MASK
);
1432 if (l1pd
== (L1_C_DOM(pm
->pm_domain
) | L1_TYPE_C
)) {
1438 * Release the L2 descriptor table back to the pool cache.
1440 #ifndef PMAP_INCLUDE_PTE_SYNC
1441 pmap_free_l2_ptp(ptep
, l2b
->l2b_phys
);
1443 pmap_free_l2_ptp(!pmap_is_cached(pm
), ptep
, l2b
->l2b_phys
);
1447 * Update the reference count in the associated l2_dtable
1449 l2
= pm
->pm_l2
[L2_IDX(l1idx
)];
1450 if (--l2
->l2_occupancy
> 0)
1454 * There are no more valid mappings in any of the Level 1
1455 * slots managed by this l2_dtable. Go ahead and NULL-out
1456 * the pointer in the parent pmap and free the l2_dtable.
1458 pm
->pm_l2
[L2_IDX(l1idx
)] = NULL
;
1459 pmap_free_l2_dtable(l2
);
1463 * Pool cache constructors for L2 descriptor tables, metadata and pmap
1467 pmap_l2ptp_ctor(void *arg
, void *v
, int flags
)
1469 #ifndef PMAP_INCLUDE_PTE_SYNC
1470 struct l2_bucket
*l2b
;
1471 pt_entry_t
*ptep
, pte
;
1472 vaddr_t va
= (vaddr_t
)v
& ~PGOFSET
;
1475 * The mappings for these page tables were initially made using
1476 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
1477 * mode will not be right for page table mappings. To avoid
1478 * polluting the pmap_kenter_pa() code with a special case for
1479 * page tables, we simply fix up the cache-mode here if it's not
1482 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
1483 KDASSERT(l2b
!= NULL
);
1484 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
1487 if ((pte
& L2_S_CACHE_MASK
) != pte_l2_s_cache_mode_pt
) {
1489 * Page tables must have the cache-mode set to Write-Thru.
1491 *ptep
= (pte
& ~L2_S_CACHE_MASK
) | pte_l2_s_cache_mode_pt
;
1493 cpu_tlb_flushD_SE(va
);
1498 memset(v
, 0, L2_TABLE_SIZE_REAL
);
1499 PTE_SYNC_RANGE(v
, L2_TABLE_SIZE_REAL
/ sizeof(pt_entry_t
));
1504 pmap_l2dtable_ctor(void *arg
, void *v
, int flags
)
1507 memset(v
, 0, sizeof(struct l2_dtable
));
1512 pmap_pmap_ctor(void *arg
, void *v
, int flags
)
1515 memset(v
, 0, sizeof(struct pmap
));
1520 pmap_pinit(pmap_t pm
)
1522 struct l2_bucket
*l2b
;
1524 if (vector_page
< KERNEL_BASE
) {
1526 * Map the vector page.
1528 pmap_enter(pm
, vector_page
, systempage
.pv_pa
,
1529 VM_PROT_READ
, VM_PROT_READ
| PMAP_WIRED
);
1532 pm
->pm_pl1vec
= &pm
->pm_l1
->l1_kva
[L1_IDX(vector_page
)];
1533 l2b
= pmap_get_l2_bucket(pm
, vector_page
);
1534 KDASSERT(l2b
!= NULL
);
1535 pm
->pm_l1vec
= l2b
->l2b_phys
| L1_C_PROTO
|
1536 L1_C_DOM(pm
->pm_domain
);
1538 pm
->pm_pl1vec
= NULL
;
1541 #ifdef PMAP_CACHE_VIVT
1543 * Since we have a virtually indexed cache, we may need to inhibit caching if
1544 * there is more than one mapping and at least one of them is writable.
1545 * Since we purge the cache on every context switch, we only need to check for
1546 * other mappings within the same pmap, or kernel_pmap.
1547 * This function is also called when a page is unmapped, to possibly reenable
1548 * caching on any remaining mappings.
1550 * The code implements the following logic, where:
1552 * KW = # of kernel read/write pages
1553 * KR = # of kernel read only pages
1554 * UW = # of user read/write pages
1555 * UR = # of user read only pages
1557 * KC = kernel mapping is cacheable
1558 * UC = user mapping is cacheable
1560 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
1561 * +---------------------------------------------
1562 * UW=0,UR=0 | --- KC=1 KC=1 KC=0
1563 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
1564 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1565 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1568 static const int pmap_vac_flags
[4][4] = {
1569 {-1, 0, 0, PVF_KNC
},
1570 {0, 0, PVF_NC
, PVF_NC
},
1571 {0, PVF_NC
, PVF_NC
, PVF_NC
},
1572 {PVF_UNC
, PVF_NC
, PVF_NC
, PVF_NC
}
1576 pmap_get_vac_flags(const struct vm_page
*pg
)
1581 if (pg
->mdpage
.kro_mappings
|| pg
->mdpage
.krw_mappings
> 1)
1583 if (pg
->mdpage
.krw_mappings
)
1587 if (pg
->mdpage
.uro_mappings
|| pg
->mdpage
.urw_mappings
> 1)
1589 if (pg
->mdpage
.urw_mappings
)
1592 return (pmap_vac_flags
[uidx
][kidx
]);
1596 pmap_vac_me_harder(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
1600 nattr
= pmap_get_vac_flags(pg
);
1603 pg
->mdpage
.pvh_attrs
&= ~PVF_NC
;
1607 if (nattr
== 0 && (pg
->mdpage
.pvh_attrs
& PVF_NC
) == 0)
1610 if (pm
== pmap_kernel())
1611 pmap_vac_me_kpmap(pg
, pm
, va
);
1613 pmap_vac_me_user(pg
, pm
, va
);
1615 pg
->mdpage
.pvh_attrs
= (pg
->mdpage
.pvh_attrs
& ~PVF_NC
) | nattr
;
1619 pmap_vac_me_kpmap(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
1621 u_int u_cacheable
, u_entries
;
1622 struct pv_entry
*pv
;
1623 pmap_t last_pmap
= pm
;
1626 * Pass one, see if there are both kernel and user pmaps for
1627 * this page. Calculate whether there are user-writable or
1628 * kernel-writable pages.
1631 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
1632 if (pv
->pv_pmap
!= pm
&& (pv
->pv_flags
& PVF_NC
) == 0)
1636 u_entries
= pg
->mdpage
.urw_mappings
+ pg
->mdpage
.uro_mappings
;
1639 * We know we have just been updating a kernel entry, so if
1640 * all user pages are already cacheable, then there is nothing
1643 if (pg
->mdpage
.k_mappings
== 0 && u_cacheable
== u_entries
)
1648 * Scan over the list again, for each entry, if it
1649 * might not be set correctly, call pmap_vac_me_user
1650 * to recalculate the settings.
1652 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
1654 * We know kernel mappings will get set
1655 * correctly in other calls. We also know
1656 * that if the pmap is the same as last_pmap
1657 * then we've just handled this entry.
1659 if (pv
->pv_pmap
== pm
|| pv
->pv_pmap
== last_pmap
)
1663 * If there are kernel entries and this page
1664 * is writable but non-cacheable, then we can
1665 * skip this entry also.
1667 if (pg
->mdpage
.k_mappings
&&
1668 (pv
->pv_flags
& (PVF_NC
| PVF_WRITE
)) ==
1669 (PVF_NC
| PVF_WRITE
))
1673 * Similarly if there are no kernel-writable
1674 * entries and the page is already
1675 * read-only/cacheable.
1677 if (pg
->mdpage
.krw_mappings
== 0 &&
1678 (pv
->pv_flags
& (PVF_NC
| PVF_WRITE
)) == 0)
1682 * For some of the remaining cases, we know
1683 * that we must recalculate, but for others we
1684 * can't tell if they are correct or not, so
1685 * we recalculate anyway.
1687 pmap_vac_me_user(pg
, (last_pmap
= pv
->pv_pmap
), 0);
1690 if (pg
->mdpage
.k_mappings
== 0)
1694 pmap_vac_me_user(pg
, pm
, va
);
1698 pmap_vac_me_user(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
1700 pmap_t kpmap
= pmap_kernel();
1701 struct pv_entry
*pv
, *npv
= NULL
;
1702 struct l2_bucket
*l2b
;
1703 pt_entry_t
*ptep
, pte
;
1706 u_int cacheable_entries
= 0;
1707 u_int kern_cacheable
= 0;
1708 u_int other_writable
= 0;
1711 * Count mappings and writable mappings in this pmap.
1712 * Include kernel mappings as part of our own.
1713 * Keep a pointer to the first one.
1716 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
1717 /* Count mappings in the same pmap */
1718 if (pm
== pv
->pv_pmap
|| kpmap
== pv
->pv_pmap
) {
1722 /* Cacheable mappings */
1723 if ((pv
->pv_flags
& PVF_NC
) == 0) {
1724 cacheable_entries
++;
1725 if (kpmap
== pv
->pv_pmap
)
1729 /* Writable mappings */
1730 if (pv
->pv_flags
& PVF_WRITE
)
1733 if (pv
->pv_flags
& PVF_WRITE
)
1738 * Enable or disable caching as necessary.
1739 * Note: the first entry might be part of the kernel pmap,
1740 * so we can't assume this is indicative of the state of the
1741 * other (maybe non-kpmap) entries.
1743 if ((entries
> 1 && writable
) ||
1744 (entries
> 0 && pm
== kpmap
&& other_writable
)) {
1745 if (cacheable_entries
== 0)
1748 for (pv
= npv
; pv
; pv
= SLIST_NEXT(pv
, pv_link
)) {
1749 if ((pm
!= pv
->pv_pmap
&& kpmap
!= pv
->pv_pmap
) ||
1750 (pv
->pv_flags
& PVF_NC
))
1753 pv
->pv_flags
|= PVF_NC
;
1755 l2b
= pmap_get_l2_bucket(pv
->pv_pmap
, pv
->pv_va
);
1756 KDASSERT(l2b
!= NULL
);
1757 ptep
= &l2b
->l2b_kva
[l2pte_index(pv
->pv_va
)];
1758 pte
= *ptep
& ~L2_S_CACHE_MASK
;
1760 if ((va
!= pv
->pv_va
|| pm
!= pv
->pv_pmap
) &&
1762 if (PV_BEEN_EXECD(pv
->pv_flags
)) {
1763 #ifdef PMAP_CACHE_VIVT
1764 pmap_idcache_wbinv_range(pv
->pv_pmap
,
1765 pv
->pv_va
, PAGE_SIZE
);
1767 pmap_tlb_flushID_SE(pv
->pv_pmap
,
1770 if (PV_BEEN_REFD(pv
->pv_flags
)) {
1771 #ifdef PMAP_CACHE_VIVT
1772 pmap_dcache_wb_range(pv
->pv_pmap
,
1773 pv
->pv_va
, PAGE_SIZE
, true,
1774 (pv
->pv_flags
& PVF_WRITE
) == 0);
1776 pmap_tlb_flushD_SE(pv
->pv_pmap
,
1782 PTE_SYNC_CURRENT(pv
->pv_pmap
, ptep
);
1786 if (entries
> cacheable_entries
) {
1788 * Turn cacheing back on for some pages. If it is a kernel
1789 * page, only do so if there are no other writable pages.
1791 for (pv
= npv
; pv
; pv
= SLIST_NEXT(pv
, pv_link
)) {
1792 if (!(pv
->pv_flags
& PVF_NC
) || (pm
!= pv
->pv_pmap
&&
1793 (kpmap
!= pv
->pv_pmap
|| other_writable
)))
1796 pv
->pv_flags
&= ~PVF_NC
;
1798 l2b
= pmap_get_l2_bucket(pv
->pv_pmap
, pv
->pv_va
);
1799 KDASSERT(l2b
!= NULL
);
1800 ptep
= &l2b
->l2b_kva
[l2pte_index(pv
->pv_va
)];
1801 pte
= (*ptep
& ~L2_S_CACHE_MASK
) | pte_l2_s_cache_mode
;
1803 if (l2pte_valid(pte
)) {
1804 if (PV_BEEN_EXECD(pv
->pv_flags
)) {
1805 pmap_tlb_flushID_SE(pv
->pv_pmap
,
1808 if (PV_BEEN_REFD(pv
->pv_flags
)) {
1809 pmap_tlb_flushD_SE(pv
->pv_pmap
,
1815 PTE_SYNC_CURRENT(pv
->pv_pmap
, ptep
);
1821 #ifdef PMAP_CACHE_VIPT
1823 pmap_vac_me_harder(struct vm_page
*pg
, pmap_t pm
, vaddr_t va
)
1825 struct pv_entry
*pv
;
1828 struct l2_bucket
*l2b
;
1829 pt_entry_t
*ptep
, pte
, opte
;
1831 rw_mappings
= pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
,
1832 ro_mappings
= pg
->mdpage
.uro_mappings
+ pg
->mdpage
.kro_mappings
;
1834 /* do we need to do anything? */
1835 if (arm_cache_prefer_mask
== 0)
1838 NPDEBUG(PDB_VAC
, printf("pmap_vac_me_harder: pg=%p, pmap=%p va=%08lx\n",
1842 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1844 /* Already a conflict? */
1845 if (__predict_false(pg
->mdpage
.pvh_attrs
& PVF_NC
)) {
1846 /* just an add, things are already non-cached */
1847 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_DIRTY
));
1848 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
));
1851 PMAPCOUNT(vac_color_none
);
1853 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
1856 pv
= SLIST_FIRST(&pg
->mdpage
.pvh_list
);
1857 /* the list can't be empty because it would be cachable */
1858 if (pg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) {
1859 tst_mask
= pg
->mdpage
.pvh_attrs
;
1862 tst_mask
= pv
->pv_va
;
1863 pv
= SLIST_NEXT(pv
, pv_link
);
1866 * Only check for a bad alias if we have writable mappings.
1868 tst_mask
&= arm_cache_prefer_mask
;
1869 if (rw_mappings
> 0 && arm_cache_prefer_mask
) {
1870 for (; pv
&& !bad_alias
; pv
= SLIST_NEXT(pv
, pv_link
)) {
1871 /* if there's a bad alias, stop checking. */
1872 if (tst_mask
!= (pv
->pv_va
& arm_cache_prefer_mask
))
1875 pg
->mdpage
.pvh_attrs
|= PVF_WRITE
;
1877 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
1880 * We have only read-only mappings. Let's see if there
1881 * are multiple colors in use or if we mapped a KMPAGE.
1882 * If the latter, we have a bad alias. If the former,
1883 * we need to remember that.
1885 for (; pv
; pv
= SLIST_NEXT(pv
, pv_link
)) {
1886 if (tst_mask
!= (pv
->pv_va
& arm_cache_prefer_mask
)) {
1887 if (pg
->mdpage
.pvh_attrs
& PVF_KMPAGE
)
1892 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
1894 * No KMPAGE and we exited early, so we must have
1895 * multiple color mappings.
1897 if (!bad_alias
&& pv
!= NULL
)
1898 pg
->mdpage
.pvh_attrs
|= PVF_MULTCLR
;
1901 /* If no conflicting colors, set everything back to cached */
1904 if ((pg
->mdpage
.pvh_attrs
& PVF_WRITE
)
1905 || ro_mappings
< 2) {
1906 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
)
1907 KDASSERT(((tst_mask
^ pv
->pv_va
) & arm_cache_prefer_mask
) == 0);
1910 pg
->mdpage
.pvh_attrs
&= (PAGE_SIZE
- 1) & ~PVF_NC
;
1911 pg
->mdpage
.pvh_attrs
|= tst_mask
| PVF_COLORED
;
1913 * Restore DIRTY bit if page is modified
1915 if (pg
->mdpage
.pvh_attrs
& PVF_DMOD
)
1916 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
1917 PMAPCOUNT(vac_color_restore
);
1919 KASSERT(SLIST_FIRST(&pg
->mdpage
.pvh_list
) != NULL
);
1920 KASSERT(SLIST_NEXT(SLIST_FIRST(&pg
->mdpage
.pvh_list
), pv_link
) != NULL
);
1922 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1923 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
1925 KASSERT(arm_cache_prefer_mask
== 0 || pmap_is_page_colored_p(pg
));
1926 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_WRITE
)
1927 || (pg
->mdpage
.pvh_attrs
& PVF_DIRTY
));
1928 if (rw_mappings
== 0) {
1929 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
1930 if (ro_mappings
== 1
1931 && (pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
)) {
1933 * If this is the last readonly mapping
1934 * but it doesn't match the current color
1935 * for the page, change the current color
1936 * to match this last readonly mapping.
1938 pv
= SLIST_FIRST(&pg
->mdpage
.pvh_list
);
1939 tst_mask
= (pg
->mdpage
.pvh_attrs
^ pv
->pv_va
)
1940 & arm_cache_prefer_mask
;
1942 pg
->mdpage
.pvh_attrs
^= tst_mask
;
1943 PMAPCOUNT(vac_color_change
);
1947 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1948 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
1950 } else if (!pmap_is_page_colored_p(pg
)) {
1951 /* not colored so we just use its color */
1952 KASSERT(pg
->mdpage
.pvh_attrs
& (PVF_WRITE
|PVF_DIRTY
));
1953 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
));
1954 PMAPCOUNT(vac_color_new
);
1955 pg
->mdpage
.pvh_attrs
&= PAGE_SIZE
- 1;
1956 pg
->mdpage
.pvh_attrs
|= PVF_COLORED
1957 | (va
& arm_cache_prefer_mask
)
1958 | (rw_mappings
> 0 ? PVF_WRITE
: 0);
1959 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
1960 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
1962 } else if (((pg
->mdpage
.pvh_attrs
^ va
) & arm_cache_prefer_mask
) == 0) {
1964 if (rw_mappings
> 0) {
1966 * We now have writeable mappings and if we have
1967 * readonly mappings in more than once color, we have
1968 * an aliasing problem. Regardless mark the page as
1971 if (pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
) {
1972 if (ro_mappings
< 2) {
1974 * If we only have less than two
1975 * read-only mappings, just flush the
1976 * non-primary colors from the cache.
1979 PMAP_FLUSH_SECONDARY
);
1984 pg
->mdpage
.pvh_attrs
|= PVF_WRITE
;
1986 /* If no conflicting colors, set everything back to cached */
1990 || (pg
->mdpage
.pvh_attrs
& PMAP_KMPAGE
)) {
1991 tst_mask
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
1992 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
)
1993 KDASSERT(((tst_mask
^ pv
->pv_va
) & arm_cache_prefer_mask
) == 0);
1996 if (SLIST_EMPTY(&pg
->mdpage
.pvh_list
))
1997 PMAPCOUNT(vac_color_reuse
);
1999 PMAPCOUNT(vac_color_ok
);
2001 /* matching color, just return */
2002 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
2003 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2006 KASSERT(SLIST_FIRST(&pg
->mdpage
.pvh_list
) != NULL
);
2007 KASSERT(SLIST_NEXT(SLIST_FIRST(&pg
->mdpage
.pvh_list
), pv_link
) != NULL
);
2009 /* color conflict. evict from cache. */
2011 pmap_flush_page(pg
, PMAP_FLUSH_PRIMARY
);
2012 pg
->mdpage
.pvh_attrs
&= ~PVF_COLORED
;
2013 pg
->mdpage
.pvh_attrs
|= PVF_NC
;
2014 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
2015 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
));
2016 PMAPCOUNT(vac_color_erase
);
2017 } else if (rw_mappings
== 0
2018 && (pg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) == 0) {
2019 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_WRITE
) == 0);
2022 * If the page has dirty cache lines, clean it.
2024 if (pg
->mdpage
.pvh_attrs
& PVF_DIRTY
)
2025 pmap_flush_page(pg
, PMAP_CLEAN_PRIMARY
);
2028 * If this is the first remapping (we know that there are no
2029 * writeable mappings), then this is a simple color change.
2030 * Otherwise this is a seconary r/o mapping, which means
2031 * we don't have to do anything.
2033 if (ro_mappings
== 1) {
2034 KASSERT(((pg
->mdpage
.pvh_attrs
^ va
) & arm_cache_prefer_mask
) != 0);
2035 pg
->mdpage
.pvh_attrs
&= PAGE_SIZE
- 1;
2036 pg
->mdpage
.pvh_attrs
|= (va
& arm_cache_prefer_mask
);
2037 PMAPCOUNT(vac_color_change
);
2039 PMAPCOUNT(vac_color_blind
);
2041 pg
->mdpage
.pvh_attrs
|= PVF_MULTCLR
;
2042 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
2043 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2046 if (rw_mappings
> 0)
2047 pg
->mdpage
.pvh_attrs
|= PVF_WRITE
;
2049 /* color conflict. evict from cache. */
2050 pmap_flush_page(pg
, PMAP_FLUSH_PRIMARY
);
2052 /* the list can't be empty because this was a enter/modify */
2053 pv
= SLIST_FIRST(&pg
->mdpage
.pvh_list
);
2054 if ((pg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) == 0) {
2057 * If there's only one mapped page, change color to the
2058 * page's new color and return. Restore the DIRTY bit
2059 * that was erased by pmap_flush_page.
2061 if (SLIST_NEXT(pv
, pv_link
) == NULL
) {
2062 pg
->mdpage
.pvh_attrs
&= PAGE_SIZE
- 1;
2063 pg
->mdpage
.pvh_attrs
|= (va
& arm_cache_prefer_mask
);
2064 if (pg
->mdpage
.pvh_attrs
& PVF_DMOD
)
2065 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
2066 PMAPCOUNT(vac_color_change
);
2067 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
2068 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2069 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
));
2074 pg
->mdpage
.pvh_attrs
&= ~PVF_COLORED
;
2075 pg
->mdpage
.pvh_attrs
|= PVF_NC
;
2076 PMAPCOUNT(vac_color_erase
);
2077 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
2081 KASSERT((rw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2084 * Turn cacheing on/off for all pages.
2086 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
2087 l2b
= pmap_get_l2_bucket(pv
->pv_pmap
, pv
->pv_va
);
2088 KDASSERT(l2b
!= NULL
);
2089 ptep
= &l2b
->l2b_kva
[l2pte_index(pv
->pv_va
)];
2091 pte
= opte
& ~L2_S_CACHE_MASK
;
2093 pv
->pv_flags
|= PVF_NC
;
2095 pv
->pv_flags
&= ~PVF_NC
;
2096 pte
|= pte_l2_s_cache_mode
;
2099 if (opte
== pte
) /* only update is there's a change */
2102 if (l2pte_valid(pte
)) {
2103 if (PV_BEEN_EXECD(pv
->pv_flags
)) {
2104 pmap_tlb_flushID_SE(pv
->pv_pmap
, pv
->pv_va
);
2105 } else if (PV_BEEN_REFD(pv
->pv_flags
)) {
2106 pmap_tlb_flushD_SE(pv
->pv_pmap
, pv
->pv_va
);
2111 PTE_SYNC_CURRENT(pv
->pv_pmap
, ptep
);
2114 #endif /* PMAP_CACHE_VIPT */
2118 * Modify pte bits for all ptes corresponding to the given physical address.
2119 * We use `maskbits' rather than `clearbits' because we're always passing
2120 * constants and the latter would require an extra inversion at run-time.
2123 pmap_clearbit(struct vm_page
*pg
, u_int maskbits
)
2125 struct l2_bucket
*l2b
;
2126 struct pv_entry
*pv
;
2127 pt_entry_t
*ptep
, npte
, opte
;
2131 #ifdef PMAP_CACHE_VIPT
2132 const bool want_syncicache
= PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
);
2133 bool need_syncicache
= false;
2134 bool did_syncicache
= false;
2135 bool need_vac_me_harder
= false;
2139 printf("pmap_clearbit: pg %p (0x%08lx) mask 0x%x\n",
2140 pg
, VM_PAGE_TO_PHYS(pg
), maskbits
));
2142 PMAP_HEAD_TO_MAP_LOCK();
2143 simple_lock(&pg
->mdpage
.pvh_slock
);
2145 #ifdef PMAP_CACHE_VIPT
2147 * If we might want to sync the I-cache and we've modified it,
2148 * then we know we definitely need to sync or discard it.
2150 if (want_syncicache
)
2151 need_syncicache
= pg
->mdpage
.pvh_attrs
& PVF_MOD
;
2154 * Clear saved attributes (modify, reference)
2156 pg
->mdpage
.pvh_attrs
&= ~(maskbits
& (PVF_MOD
| PVF_REF
));
2158 if (SLIST_EMPTY(&pg
->mdpage
.pvh_list
)) {
2159 #ifdef PMAP_CACHE_VIPT
2160 if (need_syncicache
) {
2162 * No one has it mapped, so just discard it. The next
2163 * exec remapping will cause it to be synced.
2165 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
2166 PMAPCOUNT(exec_discarded_clearbit
);
2169 simple_unlock(&pg
->mdpage
.pvh_slock
);
2170 PMAP_HEAD_TO_MAP_UNLOCK();
2175 * Loop over all current mappings setting/clearing as appropos
2177 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
2180 oflags
= pv
->pv_flags
;
2182 * Kernel entries are unmanaged and as such not to be changed.
2184 if (oflags
& PVF_KENTRY
)
2186 pv
->pv_flags
&= ~maskbits
;
2188 pmap_acquire_pmap_lock(pm
);
2190 l2b
= pmap_get_l2_bucket(pm
, va
);
2191 KDASSERT(l2b
!= NULL
);
2193 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
2194 npte
= opte
= *ptep
;
2198 "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
2199 pv
, pv
->pv_pmap
, pv
->pv_va
, oflags
));
2201 if (maskbits
& (PVF_WRITE
|PVF_MOD
)) {
2202 #ifdef PMAP_CACHE_VIVT
2203 if ((pv
->pv_flags
& PVF_NC
)) {
2205 * Entry is not cacheable:
2207 * Don't turn caching on again if this is a
2208 * modified emulation. This would be
2209 * inconsitent with the settings created by
2210 * pmap_vac_me_harder(). Otherwise, it's safe
2211 * to re-enable cacheing.
2213 * There's no need to call pmap_vac_me_harder()
2214 * here: all pages are losing their write
2217 if (maskbits
& PVF_WRITE
) {
2218 npte
|= pte_l2_s_cache_mode
;
2219 pv
->pv_flags
&= ~PVF_NC
;
2222 if (opte
& L2_S_PROT_W
) {
2224 * Entry is writable/cacheable: check if pmap
2225 * is current if it is flush it, otherwise it
2226 * won't be in the cache
2228 if (PV_BEEN_EXECD(oflags
))
2229 pmap_idcache_wbinv_range(pm
, pv
->pv_va
,
2232 if (PV_BEEN_REFD(oflags
))
2233 pmap_dcache_wb_range(pm
, pv
->pv_va
,
2235 (maskbits
& PVF_REF
) != 0, false);
2239 /* make the pte read only */
2240 npte
&= ~L2_S_PROT_W
;
2242 if (maskbits
& oflags
& PVF_WRITE
) {
2244 * Keep alias accounting up to date
2246 if (pv
->pv_pmap
== pmap_kernel()) {
2247 pg
->mdpage
.krw_mappings
--;
2248 pg
->mdpage
.kro_mappings
++;
2250 pg
->mdpage
.urw_mappings
--;
2251 pg
->mdpage
.uro_mappings
++;
2253 #ifdef PMAP_CACHE_VIPT
2254 if (pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
== 0)
2255 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
2256 if (want_syncicache
)
2257 need_syncicache
= true;
2258 need_vac_me_harder
= true;
2263 if (maskbits
& PVF_REF
) {
2264 if ((pv
->pv_flags
& PVF_NC
) == 0 &&
2265 (maskbits
& (PVF_WRITE
|PVF_MOD
)) == 0 &&
2266 l2pte_valid(npte
)) {
2267 #ifdef PMAP_CACHE_VIVT
2269 * Check npte here; we may have already
2270 * done the wbinv above, and the validity
2271 * of the PTE is the same for opte and
2274 /* XXXJRT need idcache_inv_range */
2275 if (PV_BEEN_EXECD(oflags
))
2276 pmap_idcache_wbinv_range(pm
,
2277 pv
->pv_va
, PAGE_SIZE
);
2279 if (PV_BEEN_REFD(oflags
))
2280 pmap_dcache_wb_range(pm
,
2281 pv
->pv_va
, PAGE_SIZE
,
2287 * Make the PTE invalid so that we will take a
2288 * page fault the next time the mapping is
2291 npte
&= ~L2_TYPE_MASK
;
2292 npte
|= L2_TYPE_INV
;
2298 /* Flush the TLB entry if a current pmap. */
2299 if (PV_BEEN_EXECD(oflags
))
2300 pmap_tlb_flushID_SE(pm
, pv
->pv_va
);
2302 if (PV_BEEN_REFD(oflags
))
2303 pmap_tlb_flushD_SE(pm
, pv
->pv_va
);
2306 pmap_release_pmap_lock(pm
);
2309 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
2310 pm
, va
, opte
, npte
));
2313 #ifdef PMAP_CACHE_VIPT
2315 * If we need to sync the I-cache and we haven't done it yet, do it.
2317 if (need_syncicache
&& !did_syncicache
) {
2318 pmap_syncicache_page(pg
);
2319 PMAPCOUNT(exec_synced_clearbit
);
2322 * If we are changing this to read-only, we need to call vac_me_harder
2323 * so we can change all the read-only pages to cacheable. We pretend
2324 * this as a page deletion.
2326 if (need_vac_me_harder
) {
2327 if (pg
->mdpage
.pvh_attrs
& PVF_NC
)
2328 pmap_vac_me_harder(pg
, NULL
, 0);
2332 simple_unlock(&pg
->mdpage
.pvh_slock
);
2333 PMAP_HEAD_TO_MAP_UNLOCK();
2339 * This is a local function used to work out the best strategy to clean
2340 * a single page referenced by its entry in the PV table. It's used by
2341 * pmap_copy_page, pmap_zero page and maybe some others later on.
2343 * Its policy is effectively:
2344 * o If there are no mappings, we don't bother doing anything with the cache.
2345 * o If there is one mapping, we clean just that page.
2346 * o If there are multiple mappings, we clean the entire cache.
2348 * So that some functions can be further optimised, it returns 0 if it didn't
2349 * clean the entire cache, or 1 if it did.
2351 * XXX One bug in this routine is that if the pv_entry has a single page
2352 * mapped at 0x00000000 a whole cache clean will be performed rather than
2353 * just the 1 page. Since this should not occur in everyday use and if it does
2354 * it will just result in not the most efficient clean for the page.
2356 #ifdef PMAP_CACHE_VIVT
2358 pmap_clean_page(struct pv_entry
*pv
, bool is_src
)
2360 pmap_t pm_to_clean
= NULL
;
2361 struct pv_entry
*npv
;
2362 u_int cache_needs_cleaning
= 0;
2364 vaddr_t page_to_clean
= 0;
2367 /* nothing mapped in so nothing to flush */
2372 * Since we flush the cache each time we change to a different
2373 * user vmspace, we only need to flush the page if it is in the
2377 for (npv
= pv
; npv
; npv
= SLIST_NEXT(npv
, pv_link
)) {
2378 if (pmap_is_current(npv
->pv_pmap
)) {
2379 flags
|= npv
->pv_flags
;
2381 * The page is mapped non-cacheable in
2382 * this map. No need to flush the cache.
2384 if (npv
->pv_flags
& PVF_NC
) {
2386 if (cache_needs_cleaning
)
2387 panic("pmap_clean_page: "
2388 "cache inconsistency");
2391 } else if (is_src
&& (npv
->pv_flags
& PVF_WRITE
) == 0)
2393 if (cache_needs_cleaning
) {
2397 page_to_clean
= npv
->pv_va
;
2398 pm_to_clean
= npv
->pv_pmap
;
2400 cache_needs_cleaning
= 1;
2404 if (page_to_clean
) {
2405 if (PV_BEEN_EXECD(flags
))
2406 pmap_idcache_wbinv_range(pm_to_clean
, page_to_clean
,
2409 pmap_dcache_wb_range(pm_to_clean
, page_to_clean
,
2410 PAGE_SIZE
, !is_src
, (flags
& PVF_WRITE
) == 0);
2411 } else if (cache_needs_cleaning
) {
2412 pmap_t
const pm
= curproc
->p_vmspace
->vm_map
.pmap
;
2414 if (PV_BEEN_EXECD(flags
))
2415 pmap_idcache_wbinv_all(pm
);
2417 pmap_dcache_wbinv_all(pm
);
2424 #ifdef PMAP_CACHE_VIPT
2426 * Sync a page with the I-cache. Since this is a VIPT, we must pick the
2427 * right cache alias to make sure we flush the right stuff.
2430 pmap_syncicache_page(struct vm_page
*pg
)
2432 const vsize_t va_offset
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
2433 pt_entry_t
* const ptep
= &cdst_pte
[va_offset
>> PGSHIFT
];
2435 NPDEBUG(PDB_EXEC
, printf("pmap_syncicache_page: pg=%p (attrs=%#x)\n",
2436 pg
, pg
->mdpage
.pvh_attrs
));
2438 * No need to clean the page if it's non-cached.
2440 if (pg
->mdpage
.pvh_attrs
& PVF_NC
)
2442 KASSERT(arm_cache_prefer_mask
== 0 || pg
->mdpage
.pvh_attrs
& PVF_COLORED
);
2444 pmap_tlb_flushID_SE(pmap_kernel(), cdstp
+ va_offset
);
2446 * Set up a PTE with the right coloring to flush existing cache lines.
2448 *ptep
= L2_S_PROTO
|
2450 | L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
|VM_PROT_WRITE
)
2451 | pte_l2_s_cache_mode
;
2457 cpu_icache_sync_range(cdstp
+ va_offset
, PAGE_SIZE
);
2463 pmap_tlb_flushID_SE(pmap_kernel(), cdstp
+ va_offset
);
2465 pg
->mdpage
.pvh_attrs
|= PVF_EXEC
;
2466 PMAPCOUNT(exec_synced
);
2470 pmap_flush_page(struct vm_page
*pg
, enum pmap_flush_op flush
)
2472 vsize_t va_offset
, end_va
;
2473 void (*cf
)(vaddr_t
, vsize_t
);
2475 if (arm_cache_prefer_mask
== 0)
2479 case PMAP_FLUSH_PRIMARY
:
2480 if (pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
) {
2482 end_va
= arm_cache_prefer_mask
;
2483 pg
->mdpage
.pvh_attrs
&= ~PVF_MULTCLR
;
2484 PMAPCOUNT(vac_flush_lots
);
2486 va_offset
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
2488 PMAPCOUNT(vac_flush_one
);
2491 * Mark that the page is no longer dirty.
2493 pg
->mdpage
.pvh_attrs
&= ~PVF_DIRTY
;
2494 cf
= cpufuncs
.cf_idcache_wbinv_range
;
2496 case PMAP_FLUSH_SECONDARY
:
2498 end_va
= arm_cache_prefer_mask
;
2499 cf
= cpufuncs
.cf_idcache_wbinv_range
;
2500 pg
->mdpage
.pvh_attrs
&= ~PVF_MULTCLR
;
2501 PMAPCOUNT(vac_flush_lots
);
2503 case PMAP_CLEAN_PRIMARY
:
2504 va_offset
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
2506 cf
= cpufuncs
.cf_dcache_wb_range
;
2508 * Mark that the page is no longer dirty.
2510 if ((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0)
2511 pg
->mdpage
.pvh_attrs
&= ~PVF_DIRTY
;
2512 PMAPCOUNT(vac_clean_one
);
2518 KASSERT(!(pg
->mdpage
.pvh_attrs
& PVF_NC
));
2520 NPDEBUG(PDB_VAC
, printf("pmap_flush_page: pg=%p (attrs=%#x)\n",
2521 pg
, pg
->mdpage
.pvh_attrs
));
2523 for (; va_offset
<= end_va
; va_offset
+= PAGE_SIZE
) {
2524 const size_t pte_offset
= va_offset
>> PGSHIFT
;
2525 pt_entry_t
* const ptep
= &cdst_pte
[pte_offset
];
2526 const pt_entry_t oldpte
= *ptep
;
2528 if (flush
== PMAP_FLUSH_SECONDARY
2529 && va_offset
== (pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
))
2532 pmap_tlb_flushID_SE(pmap_kernel(), cdstp
+ va_offset
);
2534 * Set up a PTE with the right coloring to flush
2535 * existing cache entries.
2538 | VM_PAGE_TO_PHYS(pg
)
2539 | L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
|VM_PROT_WRITE
)
2540 | pte_l2_s_cache_mode
;
2546 (*cf
)(cdstp
+ va_offset
, PAGE_SIZE
);
2549 * Restore the page table entry since we might have interrupted
2550 * pmap_zero_page or pmap_copy_page which was already using
2555 pmap_tlb_flushID_SE(pmap_kernel(), cdstp
+ va_offset
);
2558 #endif /* PMAP_CACHE_VIPT */
2561 * Routine: pmap_page_remove
2563 * Removes this physical page from
2564 * all physical maps in which it resides.
2565 * Reflects back modify bits to the pager.
2568 pmap_page_remove(struct vm_page
*pg
)
2570 struct l2_bucket
*l2b
;
2571 struct pv_entry
*pv
, *npv
, **pvp
;
2578 printf("pmap_page_remove: pg %p (0x%08lx)\n", pg
,
2579 VM_PAGE_TO_PHYS(pg
)));
2581 PMAP_HEAD_TO_MAP_LOCK();
2582 simple_lock(&pg
->mdpage
.pvh_slock
);
2584 pv
= SLIST_FIRST(&pg
->mdpage
.pvh_list
);
2586 #ifdef PMAP_CACHE_VIPT
2588 * We *know* the page contents are about to be replaced.
2589 * Discard the exec contents
2591 if (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
))
2592 PMAPCOUNT(exec_discarded_page_protect
);
2593 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
2594 KASSERT((pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2596 simple_unlock(&pg
->mdpage
.pvh_slock
);
2597 PMAP_HEAD_TO_MAP_UNLOCK();
2600 #ifdef PMAP_CACHE_VIPT
2601 KASSERT(arm_cache_prefer_mask
== 0 || pmap_is_page_colored_p(pg
));
2605 * Clear alias counts
2607 #ifdef PMAP_CACHE_VIVT
2608 pg
->mdpage
.k_mappings
= 0;
2610 pg
->mdpage
.urw_mappings
= pg
->mdpage
.uro_mappings
= 0;
2615 #ifdef PMAP_CACHE_VIVT
2616 pmap_clean_page(pv
, false);
2619 pvp
= &SLIST_FIRST(&pg
->mdpage
.pvh_list
);
2622 npv
= SLIST_NEXT(pv
, pv_link
);
2623 if (flush
== false && pmap_is_current(pm
))
2626 if (pm
== pmap_kernel()) {
2627 #ifdef PMAP_CACHE_VIPT
2629 * If this was unmanaged mapping, it must be preserved.
2630 * Move it back on the list and advance the end-of-list
2633 if (pv
->pv_flags
& PVF_KENTRY
) {
2635 pvp
= &SLIST_NEXT(pv
, pv_link
);
2639 if (pv
->pv_flags
& PVF_WRITE
)
2640 pg
->mdpage
.krw_mappings
--;
2642 pg
->mdpage
.kro_mappings
--;
2644 PMAPCOUNT(kernel_unmappings
);
2646 PMAPCOUNT(unmappings
);
2648 pmap_acquire_pmap_lock(pm
);
2650 l2b
= pmap_get_l2_bucket(pm
, pv
->pv_va
);
2651 KDASSERT(l2b
!= NULL
);
2653 ptep
= &l2b
->l2b_kva
[l2pte_index(pv
->pv_va
)];
2658 --pm
->pm_stats
.resident_count
;
2661 if (pv
->pv_flags
& PVF_WIRED
)
2662 --pm
->pm_stats
.wired_count
;
2664 flags
|= pv
->pv_flags
;
2667 * Invalidate the PTEs.
2670 PTE_SYNC_CURRENT(pm
, ptep
);
2671 pmap_free_l2_bucket(pm
, l2b
, 1);
2673 pool_put(&pmap_pv_pool
, pv
);
2676 * if we reach the end of the list and there are still
2677 * mappings, they might be able to be cached now.
2681 if (!SLIST_EMPTY(&pg
->mdpage
.pvh_list
))
2682 pmap_vac_me_harder(pg
, pm
, 0);
2684 pmap_release_pmap_lock(pm
);
2686 #ifdef PMAP_CACHE_VIPT
2688 * Its EXEC cache is now gone.
2690 if (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
))
2691 PMAPCOUNT(exec_discarded_page_protect
);
2692 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
2693 KASSERT(pg
->mdpage
.urw_mappings
== 0);
2694 KASSERT(pg
->mdpage
.uro_mappings
== 0);
2695 if (pg
->mdpage
.krw_mappings
== 0)
2696 pg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
2697 KASSERT((pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
== 0) == !(pg
->mdpage
.pvh_attrs
& PVF_WRITE
));
2699 simple_unlock(&pg
->mdpage
.pvh_slock
);
2700 PMAP_HEAD_TO_MAP_UNLOCK();
2704 * Note: We can't use pmap_tlb_flush{I,}D() here since that
2705 * would need a subsequent call to pmap_update() to ensure
2706 * curpm->pm_cstate.cs_all is reset. Our callers are not
2707 * required to do that (see pmap(9)), so we can't modify
2708 * the current pmap's state.
2710 if (PV_BEEN_EXECD(flags
))
2719 * pmap_t pmap_create(void)
2721 * Create a new pmap structure from scratch.
2728 pm
= pool_cache_get(&pmap_cache
, PR_WAITOK
);
2730 UVM_OBJ_INIT(&pm
->pm_obj
, NULL
, 1);
2731 pm
->pm_stats
.wired_count
= 0;
2732 pm
->pm_stats
.resident_count
= 1;
2733 pm
->pm_cstate
.cs_all
= 0;
2737 * Note: The pool cache ensures that the pm_l2[] array is already
2738 * initialised to zero.
2743 LIST_INSERT_HEAD(&pmap_pmaps
, pm
, pm_list
);
2749 * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
2752 * Insert the given physical page (p) at
2753 * the specified virtual address (v) in the
2754 * target physical map with the protection requested.
2756 * NB: This is the only routine which MAY NOT lazy-evaluate
2757 * or lose information. That is, this routine must actually
2758 * insert this page into the given map NOW.
2761 pmap_enter(pmap_t pm
, vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
2763 struct l2_bucket
*l2b
;
2764 struct vm_page
*pg
, *opg
;
2765 struct pv_entry
*pv
;
2766 pt_entry_t
*ptep
, npte
, opte
;
2770 NPDEBUG(PDB_ENTER
, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm
, va
, pa
, prot
, flags
));
2772 KDASSERT((flags
& PMAP_WIRED
) == 0 || (flags
& VM_PROT_ALL
) != 0);
2773 KDASSERT(((va
| pa
) & PGOFSET
) == 0);
2776 * Get a pointer to the page. Later on in this function, we
2777 * test for a managed page by checking pg != NULL.
2779 pg
= pmap_initialized
? PHYS_TO_VM_PAGE(pa
) : NULL
;
2782 if (prot
& VM_PROT_WRITE
)
2783 nflags
|= PVF_WRITE
;
2784 if (prot
& VM_PROT_EXECUTE
)
2786 if (flags
& PMAP_WIRED
)
2787 nflags
|= PVF_WIRED
;
2789 PMAP_MAP_TO_HEAD_LOCK();
2790 pmap_acquire_pmap_lock(pm
);
2793 * Fetch the L2 bucket which maps this page, allocating one if
2794 * necessary for user pmaps.
2796 if (pm
== pmap_kernel())
2797 l2b
= pmap_get_l2_bucket(pm
, va
);
2799 l2b
= pmap_alloc_l2_bucket(pm
, va
);
2801 if (flags
& PMAP_CANFAIL
) {
2802 pmap_release_pmap_lock(pm
);
2803 PMAP_MAP_TO_HEAD_UNLOCK();
2806 panic("pmap_enter: failed to allocate L2 bucket");
2808 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
2815 * There is already a mapping at this address.
2816 * If the physical address is different, lookup the
2819 if (l2pte_pa(opte
) != pa
)
2820 opg
= PHYS_TO_VM_PAGE(l2pte_pa(opte
));
2828 * This is to be a managed mapping.
2830 if ((flags
& VM_PROT_ALL
) ||
2831 (pg
->mdpage
.pvh_attrs
& PVF_REF
)) {
2833 * - The access type indicates that we don't need
2834 * to do referenced emulation.
2836 * - The physical page has already been referenced
2837 * so no need to re-do referenced emulation here.
2843 if ((prot
& VM_PROT_WRITE
) != 0 &&
2844 ((flags
& VM_PROT_WRITE
) != 0 ||
2845 (pg
->mdpage
.pvh_attrs
& PVF_MOD
) != 0)) {
2847 * This is a writable mapping, and the
2848 * page's mod state indicates it has
2849 * already been modified. Make it
2850 * writable from the outset.
2852 npte
|= L2_S_PROT_W
;
2857 * Need to do page referenced emulation.
2859 npte
|= L2_TYPE_INV
;
2862 npte
|= pte_l2_s_cache_mode
;
2866 * We're changing the attrs of an existing mapping.
2868 simple_lock(&pg
->mdpage
.pvh_slock
);
2869 oflags
= pmap_modify_pv(pg
, pm
, va
,
2870 PVF_WRITE
| PVF_EXEC
| PVF_WIRED
|
2871 PVF_MOD
| PVF_REF
, nflags
);
2872 simple_unlock(&pg
->mdpage
.pvh_slock
);
2874 #ifdef PMAP_CACHE_VIVT
2876 * We may need to flush the cache if we're
2879 if (pm
->pm_cstate
.cs_cache_d
&&
2880 (oflags
& PVF_NC
) == 0 &&
2881 (opte
& L2_S_PROT_W
) != 0 &&
2882 (prot
& VM_PROT_WRITE
) == 0)
2883 cpu_dcache_wb_range(va
, PAGE_SIZE
);
2887 * New mapping, or changing the backing page
2888 * of an existing mapping.
2892 * Replacing an existing mapping with a new one.
2893 * It is part of our managed memory so we
2894 * must remove it from the PV list
2896 simple_lock(&opg
->mdpage
.pvh_slock
);
2897 pv
= pmap_remove_pv(opg
, pm
, va
);
2898 pmap_vac_me_harder(opg
, pm
, 0);
2899 simple_unlock(&opg
->mdpage
.pvh_slock
);
2900 oflags
= pv
->pv_flags
;
2902 #ifdef PMAP_CACHE_VIVT
2904 * If the old mapping was valid (ref/mod
2905 * emulation creates 'invalid' mappings
2906 * initially) then make sure to frob
2909 if ((oflags
& PVF_NC
) == 0 &&
2910 l2pte_valid(opte
)) {
2911 if (PV_BEEN_EXECD(oflags
)) {
2912 pmap_idcache_wbinv_range(pm
, va
,
2915 if (PV_BEEN_REFD(oflags
)) {
2916 pmap_dcache_wb_range(pm
, va
,
2918 (oflags
& PVF_WRITE
) == 0);
2923 if ((pv
= pool_get(&pmap_pv_pool
, PR_NOWAIT
)) == NULL
){
2924 if ((flags
& PMAP_CANFAIL
) == 0)
2925 panic("pmap_enter: no pv entries");
2927 if (pm
!= pmap_kernel())
2928 pmap_free_l2_bucket(pm
, l2b
, 0);
2929 pmap_release_pmap_lock(pm
);
2930 PMAP_MAP_TO_HEAD_UNLOCK();
2932 printf("pmap_enter: ENOMEM\n"));
2936 pmap_enter_pv(pg
, pv
, pm
, va
, nflags
);
2940 * We're mapping an unmanaged page.
2941 * These are always readable, and possibly writable, from
2942 * the get go as we don't need to track ref/mod status.
2945 if (prot
& VM_PROT_WRITE
)
2946 npte
|= L2_S_PROT_W
;
2949 * Make sure the vector table is mapped cacheable
2951 if (pm
!= pmap_kernel() && va
== vector_page
)
2952 npte
|= pte_l2_s_cache_mode
;
2956 * Looks like there's an existing 'managed' mapping
2959 simple_lock(&opg
->mdpage
.pvh_slock
);
2960 pv
= pmap_remove_pv(opg
, pm
, va
);
2961 pmap_vac_me_harder(opg
, pm
, 0);
2962 simple_unlock(&opg
->mdpage
.pvh_slock
);
2963 oflags
= pv
->pv_flags
;
2965 #ifdef PMAP_CACHE_VIVT
2966 if ((oflags
& PVF_NC
) == 0 && l2pte_valid(opte
)) {
2967 if (PV_BEEN_EXECD(oflags
))
2968 pmap_idcache_wbinv_range(pm
, va
,
2971 if (PV_BEEN_REFD(oflags
))
2972 pmap_dcache_wb_range(pm
, va
, PAGE_SIZE
,
2973 true, (oflags
& PVF_WRITE
) == 0);
2976 pool_put(&pmap_pv_pool
, pv
);
2981 * Make sure userland mappings get the right permissions
2983 if (pm
!= pmap_kernel() && va
!= vector_page
)
2984 npte
|= L2_S_PROT_U
;
2987 * Keep the stats up to date
2990 l2b
->l2b_occupancy
++;
2991 pm
->pm_stats
.resident_count
++;
2995 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte
, npte
));
2998 * If this is just a wiring change, the two PTEs will be
2999 * identical, so there's no need to update the page table.
3002 bool is_cached
= pmap_is_cached(pm
);
3007 * We only need to frob the cache/tlb if this pmap
3011 if (va
!= vector_page
&& l2pte_valid(npte
)) {
3013 * This mapping is likely to be accessed as
3014 * soon as we return to userland. Fix up the
3015 * L1 entry to avoid taking another
3016 * page/domain fault.
3018 pd_entry_t
*pl1pd
, l1pd
;
3020 pl1pd
= &pm
->pm_l1
->l1_kva
[L1_IDX(va
)];
3021 l1pd
= l2b
->l2b_phys
| L1_C_DOM(pm
->pm_domain
) |
3023 if (*pl1pd
!= l1pd
) {
3030 if (PV_BEEN_EXECD(oflags
))
3031 pmap_tlb_flushID_SE(pm
, va
);
3033 if (PV_BEEN_REFD(oflags
))
3034 pmap_tlb_flushD_SE(pm
, va
);
3037 printf("pmap_enter: is_cached %d cs 0x%08x\n",
3038 is_cached
, pm
->pm_cstate
.cs_all
));
3041 simple_lock(&pg
->mdpage
.pvh_slock
);
3042 pmap_vac_me_harder(pg
, pm
, va
);
3043 simple_unlock(&pg
->mdpage
.pvh_slock
);
3046 #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
3048 simple_lock(&pg
->mdpage
.pvh_slock
);
3049 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_DMOD
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
3050 KASSERT(((pg
->mdpage
.pvh_attrs
& PVF_WRITE
) == 0) == (pg
->mdpage
.urw_mappings
+ pg
->mdpage
.krw_mappings
== 0));
3051 simple_unlock(&pg
->mdpage
.pvh_slock
);
3055 pmap_release_pmap_lock(pm
);
3056 PMAP_MAP_TO_HEAD_UNLOCK();
3064 * pmap_remove is responsible for nuking a number of mappings for a range
3065 * of virtual address space in the current pmap. To do this efficiently
3066 * is interesting, because in a number of cases a wide virtual address
3067 * range may be supplied that contains few actual mappings. So, the
3068 * optimisations are:
3069 * 1. Skip over hunks of address space for which no L1 or L2 entry exists.
3070 * 2. Build up a list of pages we've hit, up to a maximum, so we can
3071 * maybe do just a partial cache clean. This path of execution is
3072 * complicated by the fact that the cache must be flushed _before_
3073 * the PTE is nuked, being a VAC :-)
3074 * 3. If we're called after UVM calls pmap_remove_all(), we can defer
3075 * all invalidations until pmap_update(), since pmap_remove_all() has
3076 * already flushed the cache.
3077 * 4. Maybe later fast-case a single page, but I don't think this is
3078 * going to make _that_ much difference overall.
3081 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
3084 pmap_remove(pmap_t pm
, vaddr_t sva
, vaddr_t eva
)
3086 struct l2_bucket
*l2b
;
3087 vaddr_t next_bucket
;
3089 u_int cleanlist_idx
, total
, cnt
;
3093 } cleanlist
[PMAP_REMOVE_CLEAN_LIST_SIZE
];
3094 u_int mappings
, is_exec
, is_refd
;
3096 NPDEBUG(PDB_REMOVE
, printf("pmap_do_remove: pmap=%p sva=%08lx "
3097 "eva=%08lx\n", pm
, sva
, eva
));
3100 * we lock in the pmap => pv_head direction
3102 PMAP_MAP_TO_HEAD_LOCK();
3103 pmap_acquire_pmap_lock(pm
);
3105 if (pm
->pm_remove_all
|| !pmap_is_cached(pm
)) {
3106 cleanlist_idx
= PMAP_REMOVE_CLEAN_LIST_SIZE
+ 1;
3107 if (pm
->pm_cstate
.cs_tlb
== 0)
3108 pm
->pm_remove_all
= true;
3116 * Do one L2 bucket's worth at a time.
3118 next_bucket
= L2_NEXT_BUCKET(sva
);
3119 if (next_bucket
> eva
)
3122 l2b
= pmap_get_l2_bucket(pm
, sva
);
3128 ptep
= &l2b
->l2b_kva
[l2pte_index(sva
)];
3130 for (mappings
= 0; sva
< next_bucket
; sva
+= PAGE_SIZE
, ptep
++){
3138 /* Nothing here, move along */
3147 * Update flags. In a number of circumstances,
3148 * we could cluster a lot of these and do a
3149 * number of sequential pages in one go.
3151 if ((pg
= PHYS_TO_VM_PAGE(pa
)) != NULL
) {
3152 struct pv_entry
*pv
;
3153 simple_lock(&pg
->mdpage
.pvh_slock
);
3154 pv
= pmap_remove_pv(pg
, pm
, sva
);
3155 pmap_vac_me_harder(pg
, pm
, 0);
3156 simple_unlock(&pg
->mdpage
.pvh_slock
);
3158 if (pm
->pm_remove_all
== false) {
3160 PV_BEEN_EXECD(pv
->pv_flags
);
3162 PV_BEEN_REFD(pv
->pv_flags
);
3164 pool_put(&pmap_pv_pool
, pv
);
3169 if (!l2pte_valid(pte
)) {
3171 * Ref/Mod emulation is still active for this
3172 * mapping, therefore it is has not yet been
3173 * accessed. No need to frob the cache/tlb.
3176 PTE_SYNC_CURRENT(pm
, ptep
);
3180 if (cleanlist_idx
< PMAP_REMOVE_CLEAN_LIST_SIZE
) {
3181 /* Add to the clean list. */
3182 cleanlist
[cleanlist_idx
].ptep
= ptep
;
3183 cleanlist
[cleanlist_idx
].va
=
3184 sva
| (is_exec
& 1);
3187 if (cleanlist_idx
== PMAP_REMOVE_CLEAN_LIST_SIZE
) {
3188 /* Nuke everything if needed. */
3189 #ifdef PMAP_CACHE_VIVT
3190 pmap_idcache_wbinv_all(pm
);
3192 pmap_tlb_flushID(pm
);
3195 * Roll back the previous PTE list,
3196 * and zero out the current PTE.
3199 cnt
< PMAP_REMOVE_CLEAN_LIST_SIZE
; cnt
++) {
3200 *cleanlist
[cnt
].ptep
= 0;
3201 PTE_SYNC(cleanlist
[cnt
].ptep
);
3206 pm
->pm_remove_all
= true;
3210 if (pm
->pm_remove_all
== false) {
3212 pmap_tlb_flushID_SE(pm
, sva
);
3215 pmap_tlb_flushD_SE(pm
, sva
);
3221 * Deal with any left overs
3223 if (cleanlist_idx
<= PMAP_REMOVE_CLEAN_LIST_SIZE
) {
3224 total
+= cleanlist_idx
;
3225 for (cnt
= 0; cnt
< cleanlist_idx
; cnt
++) {
3226 if (pm
->pm_cstate
.cs_all
!= 0) {
3227 vaddr_t clva
= cleanlist
[cnt
].va
& ~1;
3228 if (cleanlist
[cnt
].va
& 1) {
3229 #ifdef PMAP_CACHE_VIVT
3230 pmap_idcache_wbinv_range(pm
,
3233 pmap_tlb_flushID_SE(pm
, clva
);
3235 #ifdef PMAP_CACHE_VIVT
3236 pmap_dcache_wb_range(pm
,
3237 clva
, PAGE_SIZE
, true,
3240 pmap_tlb_flushD_SE(pm
, clva
);
3243 *cleanlist
[cnt
].ptep
= 0;
3244 PTE_SYNC_CURRENT(pm
, cleanlist
[cnt
].ptep
);
3248 * If it looks like we're removing a whole bunch
3249 * of mappings, it's faster to just write-back
3250 * the whole cache now and defer TLB flushes until
3251 * pmap_update() is called.
3253 if (total
<= PMAP_REMOVE_CLEAN_LIST_SIZE
)
3256 cleanlist_idx
= PMAP_REMOVE_CLEAN_LIST_SIZE
+ 1;
3257 #ifdef PMAP_CACHE_VIVT
3258 pmap_idcache_wbinv_all(pm
);
3260 pm
->pm_remove_all
= true;
3264 pmap_free_l2_bucket(pm
, l2b
, mappings
);
3265 pm
->pm_stats
.resident_count
-= mappings
;
3268 pmap_release_pmap_lock(pm
);
3269 PMAP_MAP_TO_HEAD_UNLOCK();
3272 #ifdef PMAP_CACHE_VIPT
3273 static struct pv_entry
*
3274 pmap_kremove_pg(struct vm_page
*pg
, vaddr_t va
)
3276 struct pv_entry
*pv
;
3278 simple_lock(&pg
->mdpage
.pvh_slock
);
3279 KASSERT(arm_cache_prefer_mask
== 0 || pg
->mdpage
.pvh_attrs
& (PVF_COLORED
|PVF_NC
));
3280 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) == 0);
3282 pv
= pmap_remove_pv(pg
, pmap_kernel(), va
);
3284 KASSERT(pv
->pv_flags
& PVF_KENTRY
);
3287 * If we are removing a writeable mapping to a cached exec page,
3288 * if it's the last mapping then clear it execness other sync
3289 * the page to the icache.
3291 if ((pg
->mdpage
.pvh_attrs
& (PVF_NC
|PVF_EXEC
)) == PVF_EXEC
3292 && (pv
->pv_flags
& PVF_WRITE
) != 0) {
3293 if (SLIST_EMPTY(&pg
->mdpage
.pvh_list
)) {
3294 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
3295 PMAPCOUNT(exec_discarded_kremove
);
3297 pmap_syncicache_page(pg
);
3298 PMAPCOUNT(exec_synced_kremove
);
3301 pmap_vac_me_harder(pg
, pmap_kernel(), 0);
3302 simple_unlock(&pg
->mdpage
.pvh_slock
);
3306 #endif /* PMAP_CACHE_VIPT */
3309 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3311 * We assume there is already sufficient KVM space available
3312 * to do this, as we can't allocate L2 descriptor tables/metadata
3316 pmap_kenter_pa(vaddr_t va
, paddr_t pa
, vm_prot_t prot
, u_int flags
)
3318 struct l2_bucket
*l2b
;
3319 pt_entry_t
*ptep
, opte
;
3320 #ifdef PMAP_CACHE_VIVT
3321 struct vm_page
*pg
= (prot
& PMAP_KMPAGE
) ? PHYS_TO_VM_PAGE(pa
) : NULL
;
3323 #ifdef PMAP_CACHE_VIPT
3324 struct vm_page
*pg
= PHYS_TO_VM_PAGE(pa
);
3325 struct vm_page
*opg
;
3326 struct pv_entry
*pv
= NULL
;
3330 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
3333 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
3334 KDASSERT(l2b
!= NULL
);
3336 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
3340 PMAPCOUNT(kenter_mappings
);
3341 l2b
->l2b_occupancy
++;
3343 PMAPCOUNT(kenter_remappings
);
3344 #ifdef PMAP_CACHE_VIPT
3345 opg
= PHYS_TO_VM_PAGE(l2pte_pa(opte
));
3348 KASSERT((opg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) == 0);
3349 KASSERT((prot
& PMAP_KMPAGE
) == 0);
3350 simple_lock(&opg
->mdpage
.pvh_slock
);
3351 pv
= pmap_kremove_pg(opg
, va
);
3352 simple_unlock(&opg
->mdpage
.pvh_slock
);
3355 if (l2pte_valid(opte
)) {
3356 #ifdef PMAP_CACHE_VIVT
3357 cpu_dcache_wbinv_range(va
, PAGE_SIZE
);
3359 cpu_tlb_flushD_SE(va
);
3364 *ptep
= L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, prot
) |
3365 pte_l2_s_cache_mode
;
3369 if (prot
& PMAP_KMPAGE
) {
3370 simple_lock(&pg
->mdpage
.pvh_slock
);
3371 KASSERT(pg
->mdpage
.urw_mappings
== 0);
3372 KASSERT(pg
->mdpage
.uro_mappings
== 0);
3373 KASSERT(pg
->mdpage
.krw_mappings
== 0);
3374 KASSERT(pg
->mdpage
.kro_mappings
== 0);
3375 #ifdef PMAP_CACHE_VIPT
3376 KASSERT(pv
== NULL
);
3377 KASSERT(arm_cache_prefer_mask
== 0 || (va
& PVF_COLORED
) == 0);
3378 KASSERT((pg
->mdpage
.pvh_attrs
& PVF_NC
) == 0);
3379 /* if there is a color conflict, evict from cache. */
3380 if (pmap_is_page_colored_p(pg
)
3381 && ((va
^ pg
->mdpage
.pvh_attrs
) & arm_cache_prefer_mask
)) {
3382 PMAPCOUNT(vac_color_change
);
3383 pmap_flush_page(pg
, PMAP_FLUSH_PRIMARY
);
3384 } else if (pg
->mdpage
.pvh_attrs
& PVF_MULTCLR
) {
3386 * If this page has multiple colors, expunge
3389 PMAPCOUNT(vac_flush_lots2
);
3390 pmap_flush_page(pg
, PMAP_FLUSH_SECONDARY
);
3392 pg
->mdpage
.pvh_attrs
&= PAGE_SIZE
- 1;
3393 pg
->mdpage
.pvh_attrs
|= PVF_KMPAGE
3394 | PVF_COLORED
| PVF_DIRTY
3395 | (va
& arm_cache_prefer_mask
);
3397 #ifdef PMAP_CACHE_VIVT
3398 pg
->mdpage
.pvh_attrs
|= PVF_KMPAGE
;
3401 simple_unlock(&pg
->mdpage
.pvh_slock
);
3402 #ifdef PMAP_CACHE_VIPT
3405 pv
= pool_get(&pmap_pv_pool
, PR_NOWAIT
);
3406 KASSERT(pv
!= NULL
);
3408 pmap_enter_pv(pg
, pv
, pmap_kernel(), va
,
3409 PVF_WIRED
| PVF_KENTRY
3410 | (prot
& VM_PROT_WRITE
? PVF_WRITE
: 0));
3411 if ((prot
& VM_PROT_WRITE
)
3412 && !(pg
->mdpage
.pvh_attrs
& PVF_NC
))
3413 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
3414 KASSERT((prot
& VM_PROT_WRITE
) == 0 || (pg
->mdpage
.pvh_attrs
& (PVF_DIRTY
|PVF_NC
)));
3415 simple_lock(&pg
->mdpage
.pvh_slock
);
3416 pmap_vac_me_harder(pg
, pmap_kernel(), va
);
3417 simple_unlock(&pg
->mdpage
.pvh_slock
);
3420 #ifdef PMAP_CACHE_VIPT
3423 pool_put(&pmap_pv_pool
, pv
);
3429 pmap_kremove(vaddr_t va
, vsize_t len
)
3431 struct l2_bucket
*l2b
;
3432 pt_entry_t
*ptep
, *sptep
, opte
;
3433 vaddr_t next_bucket
, eva
;
3435 struct vm_page
*opg
;
3437 PMAPCOUNT(kenter_unmappings
);
3439 NPDEBUG(PDB_KREMOVE
, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
3445 next_bucket
= L2_NEXT_BUCKET(va
);
3446 if (next_bucket
> eva
)
3449 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
3450 KDASSERT(l2b
!= NULL
);
3452 sptep
= ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
3455 while (va
< next_bucket
) {
3457 opg
= PHYS_TO_VM_PAGE(l2pte_pa(opte
));
3459 if (opg
->mdpage
.pvh_attrs
& PVF_KMPAGE
) {
3460 simple_lock(&opg
->mdpage
.pvh_slock
);
3461 KASSERT(opg
->mdpage
.urw_mappings
== 0);
3462 KASSERT(opg
->mdpage
.uro_mappings
== 0);
3463 KASSERT(opg
->mdpage
.krw_mappings
== 0);
3464 KASSERT(opg
->mdpage
.kro_mappings
== 0);
3465 opg
->mdpage
.pvh_attrs
&= ~PVF_KMPAGE
;
3466 #ifdef PMAP_CACHE_VIPT
3467 opg
->mdpage
.pvh_attrs
&= ~PVF_WRITE
;
3470 simple_unlock(&opg
->mdpage
.pvh_slock
);
3471 #ifdef PMAP_CACHE_VIPT
3473 pool_put(&pmap_pv_pool
,
3474 pmap_kremove_pg(opg
, va
));
3478 if (l2pte_valid(opte
)) {
3479 #ifdef PMAP_CACHE_VIVT
3480 cpu_dcache_wbinv_range(va
, PAGE_SIZE
);
3482 cpu_tlb_flushD_SE(va
);
3491 KDASSERT(mappings
<= l2b
->l2b_occupancy
);
3492 l2b
->l2b_occupancy
-= mappings
;
3493 PTE_SYNC_RANGE(sptep
, (u_int
)(ptep
- sptep
));
3499 pmap_extract(pmap_t pm
, vaddr_t va
, paddr_t
*pap
)
3501 struct l2_dtable
*l2
;
3502 pd_entry_t
*pl1pd
, l1pd
;
3503 pt_entry_t
*ptep
, pte
;
3507 pmap_acquire_pmap_lock(pm
);
3510 pl1pd
= &pm
->pm_l1
->l1_kva
[l1idx
];
3513 if (l1pte_section_p(l1pd
)) {
3515 * These should only happen for pmap_kernel()
3517 KDASSERT(pm
== pmap_kernel());
3518 pmap_release_pmap_lock(pm
);
3519 pa
= (l1pd
& L1_S_FRAME
) | (va
& L1_S_OFFSET
);
3522 * Note that we can't rely on the validity of the L1
3523 * descriptor as an indication that a mapping exists.
3524 * We have to look it up in the L2 dtable.
3526 l2
= pm
->pm_l2
[L2_IDX(l1idx
)];
3529 (ptep
= l2
->l2_bucket
[L2_BUCKET(l1idx
)].l2b_kva
) == NULL
) {
3530 pmap_release_pmap_lock(pm
);
3534 ptep
= &ptep
[l2pte_index(va
)];
3536 pmap_release_pmap_lock(pm
);
3541 switch (pte
& L2_TYPE_MASK
) {
3543 pa
= (pte
& L2_L_FRAME
) | (va
& L2_L_OFFSET
);
3547 pa
= (pte
& L2_S_FRAME
) | (va
& L2_S_OFFSET
);
3559 pmap_protect(pmap_t pm
, vaddr_t sva
, vaddr_t eva
, vm_prot_t prot
)
3561 struct l2_bucket
*l2b
;
3562 pt_entry_t
*ptep
, pte
;
3563 vaddr_t next_bucket
;
3568 NPDEBUG(PDB_PROTECT
,
3569 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
3570 pm
, sva
, eva
, prot
));
3572 if ((prot
& VM_PROT_READ
) == 0) {
3573 pmap_remove(pm
, sva
, eva
);
3577 if (prot
& VM_PROT_WRITE
) {
3579 * If this is a read->write transition, just ignore it and let
3580 * uvm_fault() take care of it later.
3585 PMAP_MAP_TO_HEAD_LOCK();
3586 pmap_acquire_pmap_lock(pm
);
3588 flush
= ((eva
- sva
) >= (PAGE_SIZE
* 4)) ? 0 : -1;
3590 clr_mask
= PVF_WRITE
| ((prot
& VM_PROT_EXECUTE
) ? 0 : PVF_EXEC
);
3593 next_bucket
= L2_NEXT_BUCKET(sva
);
3594 if (next_bucket
> eva
)
3597 l2b
= pmap_get_l2_bucket(pm
, sva
);
3603 ptep
= &l2b
->l2b_kva
[l2pte_index(sva
)];
3605 while (sva
< next_bucket
) {
3607 if (l2pte_valid(pte
) != 0 && (pte
& L2_S_PROT_W
) != 0) {
3611 #ifdef PMAP_CACHE_VIVT
3613 * OK, at this point, we know we're doing
3614 * write-protect operation. If the pmap is
3615 * active, write-back the page.
3617 pmap_dcache_wb_range(pm
, sva
, PAGE_SIZE
,
3621 pg
= PHYS_TO_VM_PAGE(l2pte_pa(pte
));
3622 pte
&= ~L2_S_PROT_W
;
3627 simple_lock(&pg
->mdpage
.pvh_slock
);
3628 f
= pmap_modify_pv(pg
, pm
, sva
,
3630 pmap_vac_me_harder(pg
, pm
, sva
);
3631 simple_unlock(&pg
->mdpage
.pvh_slock
);
3633 f
= PVF_REF
| PVF_EXEC
;
3639 if (PV_BEEN_EXECD(f
))
3640 pmap_tlb_flushID_SE(pm
, sva
);
3642 if (PV_BEEN_REFD(f
))
3643 pmap_tlb_flushD_SE(pm
, sva
);
3651 pmap_release_pmap_lock(pm
);
3652 PMAP_MAP_TO_HEAD_UNLOCK();
3655 if (PV_BEEN_EXECD(flags
))
3656 pmap_tlb_flushID(pm
);
3658 if (PV_BEEN_REFD(flags
))
3659 pmap_tlb_flushD(pm
);
3664 pmap_icache_sync_range(pmap_t pm
, vaddr_t sva
, vaddr_t eva
)
3666 struct l2_bucket
*l2b
;
3668 vaddr_t next_bucket
;
3669 vsize_t page_size
= trunc_page(sva
) + PAGE_SIZE
- sva
;
3672 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n",
3675 PMAP_MAP_TO_HEAD_LOCK();
3676 pmap_acquire_pmap_lock(pm
);
3679 next_bucket
= L2_NEXT_BUCKET(sva
);
3680 if (next_bucket
> eva
)
3683 l2b
= pmap_get_l2_bucket(pm
, sva
);
3689 for (ptep
= &l2b
->l2b_kva
[l2pte_index(sva
)];
3691 sva
+= page_size
, ptep
++, page_size
= PAGE_SIZE
) {
3692 if (l2pte_valid(*ptep
)) {
3693 cpu_icache_sync_range(sva
,
3694 min(page_size
, eva
- sva
));
3699 pmap_release_pmap_lock(pm
);
3700 PMAP_MAP_TO_HEAD_UNLOCK();
3704 pmap_page_protect(struct vm_page
*pg
, vm_prot_t prot
)
3707 NPDEBUG(PDB_PROTECT
,
3708 printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n",
3709 pg
, VM_PAGE_TO_PHYS(pg
), prot
));
3712 case VM_PROT_READ
|VM_PROT_WRITE
:
3713 #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3714 pmap_clearbit(pg
, PVF_EXEC
);
3717 case VM_PROT_READ
|VM_PROT_WRITE
|VM_PROT_EXECUTE
:
3721 #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3722 pmap_clearbit(pg
, PVF_WRITE
|PVF_EXEC
);
3725 case VM_PROT_READ
|VM_PROT_EXECUTE
:
3726 pmap_clearbit(pg
, PVF_WRITE
);
3730 pmap_page_remove(pg
);
3736 * pmap_clear_modify:
3738 * Clear the "modified" attribute for a page.
3741 pmap_clear_modify(struct vm_page
*pg
)
3745 if (pg
->mdpage
.pvh_attrs
& PVF_MOD
) {
3747 #ifdef PMAP_CACHE_VIPT
3749 * If we are going to clear the modified bit and there are
3750 * no other modified bits set, flush the page to memory and
3753 if ((pg
->mdpage
.pvh_attrs
& (PVF_DMOD
|PVF_NC
)) == PVF_MOD
)
3754 pmap_flush_page(pg
, PMAP_CLEAN_PRIMARY
);
3756 pmap_clearbit(pg
, PVF_MOD
);
3764 * pmap_clear_reference:
3766 * Clear the "referenced" attribute for a page.
3769 pmap_clear_reference(struct vm_page
*pg
)
3773 if (pg
->mdpage
.pvh_attrs
& PVF_REF
) {
3775 pmap_clearbit(pg
, PVF_REF
);
3785 * Test if a page has the "modified" attribute.
3787 /* See <arm/arm32/pmap.h> */
3790 * pmap_is_referenced:
3792 * Test if a page has the "referenced" attribute.
3794 /* See <arm/arm32/pmap.h> */
3797 pmap_fault_fixup(pmap_t pm
, vaddr_t va
, vm_prot_t ftype
, int user
)
3799 struct l2_dtable
*l2
;
3800 struct l2_bucket
*l2b
;
3801 pd_entry_t
*pl1pd
, l1pd
;
3802 pt_entry_t
*ptep
, pte
;
3807 PMAP_MAP_TO_HEAD_LOCK();
3808 pmap_acquire_pmap_lock(pm
);
3813 * If there is no l2_dtable for this address, then the process
3814 * has no business accessing it.
3816 * Note: This will catch userland processes trying to access
3819 l2
= pm
->pm_l2
[L2_IDX(l1idx
)];
3824 * Likewise if there is no L2 descriptor table
3826 l2b
= &l2
->l2_bucket
[L2_BUCKET(l1idx
)];
3827 if (l2b
->l2b_kva
== NULL
)
3831 * Check the PTE itself.
3833 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
3839 * Catch a userland access to the vector page mapped at 0x0
3841 if (user
&& (pte
& L2_S_PROT_U
) == 0)
3846 if ((ftype
& VM_PROT_WRITE
) && (pte
& L2_S_PROT_W
) == 0) {
3848 * This looks like a good candidate for "page modified"
3851 struct pv_entry
*pv
;
3854 /* Extract the physical address of the page */
3855 if ((pg
= PHYS_TO_VM_PAGE(pa
)) == NULL
)
3858 /* Get the current flags for this page. */
3859 simple_lock(&pg
->mdpage
.pvh_slock
);
3861 pv
= pmap_find_pv(pg
, pm
, va
);
3863 simple_unlock(&pg
->mdpage
.pvh_slock
);
3868 * Do the flags say this page is writable? If not then it
3869 * is a genuine write fault. If yes then the write fault is
3870 * our fault as we did not reflect the write access in the
3871 * PTE. Now we know a write has occurred we can correct this
3872 * and also set the modified bit
3874 if ((pv
->pv_flags
& PVF_WRITE
) == 0) {
3875 simple_unlock(&pg
->mdpage
.pvh_slock
);
3880 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3881 pm
, va
, VM_PAGE_TO_PHYS(pg
)));
3883 pg
->mdpage
.pvh_attrs
|= PVF_REF
| PVF_MOD
;
3884 pv
->pv_flags
|= PVF_REF
| PVF_MOD
;
3885 #ifdef PMAP_CACHE_VIPT
3887 * If there are cacheable mappings for this page, mark it dirty.
3889 if ((pg
->mdpage
.pvh_attrs
& PVF_NC
) == 0)
3890 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
3892 simple_unlock(&pg
->mdpage
.pvh_slock
);
3895 * Re-enable write permissions for the page. No need to call
3896 * pmap_vac_me_harder(), since this is just a
3897 * modified-emulation fault, and the PVF_WRITE bit isn't
3898 * changing. We've already set the cacheable bits based on
3899 * the assumption that we can write to this page.
3901 *ptep
= (pte
& ~L2_TYPE_MASK
) | L2_S_PROTO
| L2_S_PROT_W
;
3905 if ((pte
& L2_TYPE_MASK
) == L2_TYPE_INV
) {
3907 * This looks like a good candidate for "page referenced"
3910 struct pv_entry
*pv
;
3913 /* Extract the physical address of the page */
3914 if ((pg
= PHYS_TO_VM_PAGE(pa
)) == NULL
)
3917 /* Get the current flags for this page. */
3918 simple_lock(&pg
->mdpage
.pvh_slock
);
3920 pv
= pmap_find_pv(pg
, pm
, va
);
3922 simple_unlock(&pg
->mdpage
.pvh_slock
);
3926 pg
->mdpage
.pvh_attrs
|= PVF_REF
;
3927 pv
->pv_flags
|= PVF_REF
;
3928 simple_unlock(&pg
->mdpage
.pvh_slock
);
3931 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3932 pm
, va
, VM_PAGE_TO_PHYS(pg
)));
3934 *ptep
= (pte
& ~L2_TYPE_MASK
) | L2_S_PROTO
;
3940 * We know there is a valid mapping here, so simply
3941 * fix up the L1 if necessary.
3943 pl1pd
= &pm
->pm_l1
->l1_kva
[l1idx
];
3944 l1pd
= l2b
->l2b_phys
| L1_C_DOM(pm
->pm_domain
) | L1_C_PROTO
;
3945 if (*pl1pd
!= l1pd
) {
3953 * There are bugs in the rev K SA110. This is a check for one
3956 if (rv
== 0 && curcpu()->ci_arm_cputype
== CPU_ID_SA110
&&
3957 curcpu()->ci_arm_cpurev
< 3) {
3958 /* Always current pmap */
3959 if (l2pte_valid(pte
)) {
3960 extern int kernel_debug
;
3961 if (kernel_debug
& 1) {
3962 struct proc
*p
= curlwp
->l_proc
;
3963 printf("prefetch_abort: page is already "
3964 "mapped - pte=%p *pte=%08x\n", ptep
, pte
);
3965 printf("prefetch_abort: pc=%08lx proc=%p "
3966 "process=%s\n", va
, p
, p
->p_comm
);
3967 printf("prefetch_abort: far=%08x fs=%x\n",
3968 cpu_faultaddress(), cpu_faultstatus());
3971 if (kernel_debug
& 2)
3977 #endif /* CPU_SA110 */
3981 * If 'rv == 0' at this point, it generally indicates that there is a
3982 * stale TLB entry for the faulting address. This happens when two or
3983 * more processes are sharing an L1. Since we don't flush the TLB on
3984 * a context switch between such processes, we can take domain faults
3985 * for mappings which exist at the same VA in both processes. EVEN IF
3986 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
3989 * This is extremely likely to happen if pmap_enter() updated the L1
3990 * entry for a recently entered mapping. In this case, the TLB is
3991 * flushed for the new mapping, but there may still be TLB entries for
3992 * other mappings belonging to other processes in the 1MB range
3993 * covered by the L1 entry.
3995 * Since 'rv == 0', we know that the L1 already contains the correct
3996 * value, so the fault must be due to a stale TLB entry.
3998 * Since we always need to flush the TLB anyway in the case where we
3999 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
4000 * stale TLB entries dynamically.
4002 * However, the above condition can ONLY happen if the current L1 is
4003 * being shared. If it happens when the L1 is unshared, it indicates
4004 * that other parts of the pmap are not doing their job WRT managing
4007 if (rv
== 0 && pm
->pm_l1
->l1_domain_use_count
== 1) {
4008 extern int last_fault_code
;
4009 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
4011 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
4012 l2
, l2b
, ptep
, pl1pd
);
4013 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
4014 pte
, l1pd
, last_fault_code
);
4021 cpu_tlb_flushID_SE(va
);
4027 pmap_release_pmap_lock(pm
);
4028 PMAP_MAP_TO_HEAD_UNLOCK();
4034 * Routine: pmap_procwr
4037 * Synchronize caches corresponding to [addr, addr+len) in p.
4041 pmap_procwr(struct proc
*p
, vaddr_t va
, int len
)
4043 /* We only need to do anything if it is the current process. */
4045 cpu_icache_sync_range(va
, len
);
4049 * Routine: pmap_unwire
4050 * Function: Clear the wired attribute for a map/virtual-address pair.
4052 * In/out conditions:
4053 * The mapping must already exist in the pmap.
4056 pmap_unwire(pmap_t pm
, vaddr_t va
)
4058 struct l2_bucket
*l2b
;
4059 pt_entry_t
*ptep
, pte
;
4063 NPDEBUG(PDB_WIRING
, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm
, va
));
4065 PMAP_MAP_TO_HEAD_LOCK();
4066 pmap_acquire_pmap_lock(pm
);
4068 l2b
= pmap_get_l2_bucket(pm
, va
);
4069 KDASSERT(l2b
!= NULL
);
4071 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
4074 /* Extract the physical address of the page */
4077 if ((pg
= PHYS_TO_VM_PAGE(pa
)) != NULL
) {
4078 /* Update the wired bit in the pv entry for this page. */
4079 simple_lock(&pg
->mdpage
.pvh_slock
);
4080 (void) pmap_modify_pv(pg
, pm
, va
, PVF_WIRED
, 0);
4081 simple_unlock(&pg
->mdpage
.pvh_slock
);
4084 pmap_release_pmap_lock(pm
);
4085 PMAP_MAP_TO_HEAD_UNLOCK();
4089 pmap_activate(struct lwp
*l
)
4091 extern int block_userspace_access
;
4092 pmap_t opm
, npm
, rpm
;
4093 uint32_t odacr
, ndacr
;
4097 * If activating a non-current lwp or the current lwp is
4098 * already active, just return.
4101 l
->l_proc
->p_vmspace
->vm_map
.pmap
->pm_activated
== true)
4104 npm
= l
->l_proc
->p_vmspace
->vm_map
.pmap
;
4105 ndacr
= (DOMAIN_CLIENT
<< (PMAP_DOMAIN_KERNEL
* 2)) |
4106 (DOMAIN_CLIENT
<< (npm
->pm_domain
* 2));
4109 * If TTB and DACR are unchanged, short-circuit all the
4110 * TLB/cache management stuff.
4112 if (pmap_previous_active_lwp
!= NULL
) {
4113 opm
= pmap_previous_active_lwp
->l_proc
->p_vmspace
->vm_map
.pmap
;
4114 odacr
= (DOMAIN_CLIENT
<< (PMAP_DOMAIN_KERNEL
* 2)) |
4115 (DOMAIN_CLIENT
<< (opm
->pm_domain
* 2));
4117 if (opm
->pm_l1
== npm
->pm_l1
&& odacr
== ndacr
)
4122 PMAPCOUNT(activations
);
4123 block_userspace_access
= 1;
4126 * If switching to a user vmspace which is different to the
4127 * most recent one, and the most recent one is potentially
4128 * live in the cache, we must write-back and invalidate the
4131 rpm
= pmap_recent_user
;
4134 * XXXSCW: There's a corner case here which can leave turds in the cache as
4135 * reported in kern/41058. They're probably left over during tear-down and
4136 * switching away from an exiting process. Until the root cause is identified
4137 * and fixed, zap the cache when switching pmaps. This will result in a few
4138 * unnecessary cache flushes, but that's better than silently corrupting data.
4141 if (npm
!= pmap_kernel() && rpm
&& npm
!= rpm
&&
4142 rpm
->pm_cstate
.cs_cache
) {
4143 rpm
->pm_cstate
.cs_cache
= 0;
4144 #ifdef PMAP_CACHE_VIVT
4145 cpu_idcache_wbinv_all();
4150 rpm
->pm_cstate
.cs_cache
= 0;
4151 if (npm
== pmap_kernel())
4152 pmap_recent_user
= NULL
;
4153 #ifdef PMAP_CACHE_VIVT
4154 cpu_idcache_wbinv_all();
4159 /* No interrupts while we frob the TTB/DACR */
4160 oldirqstate
= disable_interrupts(IF32_bits
);
4163 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4164 * entry corresponding to 'vector_page' in the incoming L1 table
4165 * before switching to it otherwise subsequent interrupts/exceptions
4166 * (including domain faults!) will jump into hyperspace.
4168 if (npm
->pm_pl1vec
!= NULL
) {
4169 cpu_tlb_flushID_SE((u_int
)vector_page
);
4171 *npm
->pm_pl1vec
= npm
->pm_l1vec
;
4172 PTE_SYNC(npm
->pm_pl1vec
);
4177 if (npm
== pmap_kernel() || npm
== rpm
) {
4179 * Switching to a kernel thread, or back to the
4180 * same user vmspace as before... Simply update
4181 * the TTB (no TLB flush required)
4183 __asm
volatile("mcr p15, 0, %0, c2, c0, 0" ::
4184 "r"(npm
->pm_l1
->l1_physaddr
));
4188 * Otherwise, update TTB and flush TLB
4190 cpu_context_switch(npm
->pm_l1
->l1_physaddr
);
4192 rpm
->pm_cstate
.cs_tlb
= 0;
4195 restore_interrupts(oldirqstate
);
4197 block_userspace_access
= 0;
4201 * The new pmap is resident. Make sure it's marked
4202 * as resident in the cache/TLB.
4204 npm
->pm_cstate
.cs_all
= PMAP_CACHE_STATE_ALL
;
4205 if (npm
!= pmap_kernel())
4206 pmap_recent_user
= npm
;
4208 /* The old pmap is not longer active */
4210 opm
->pm_activated
= false;
4212 /* But the new one is */
4213 npm
->pm_activated
= true;
4217 pmap_deactivate(struct lwp
*l
)
4221 * If the process is exiting, make sure pmap_activate() does
4222 * a full MMU context-switch and cache flush, which we might
4223 * otherwise skip. See PR port-arm/38950.
4225 if (l
->l_proc
->p_sflag
& PS_WEXIT
)
4226 pmap_previous_active_lwp
= NULL
;
4228 l
->l_proc
->p_vmspace
->vm_map
.pmap
->pm_activated
= false;
4232 pmap_update(pmap_t pm
)
4235 if (pm
->pm_remove_all
) {
4237 * Finish up the pmap_remove_all() optimisation by flushing
4240 pmap_tlb_flushID(pm
);
4241 pm
->pm_remove_all
= false;
4244 if (pmap_is_current(pm
)) {
4246 * If we're dealing with a current userland pmap, move its L1
4247 * to the end of the LRU.
4249 if (pm
!= pmap_kernel())
4253 * We can assume we're done with frobbing the cache/tlb for
4254 * now. Make sure any future pmap ops don't skip cache/tlb
4257 pm
->pm_cstate
.cs_all
= PMAP_CACHE_STATE_ALL
;
4263 * make sure TLB/cache operations have completed.
4269 pmap_remove_all(pmap_t pm
)
4273 * The vmspace described by this pmap is about to be torn down.
4274 * Until pmap_update() is called, UVM will only make calls
4275 * to pmap_remove(). We can make life much simpler by flushing
4276 * the cache now, and deferring TLB invalidation to pmap_update().
4278 #ifdef PMAP_CACHE_VIVT
4279 pmap_idcache_wbinv_all(pm
);
4281 pm
->pm_remove_all
= true;
4285 * Retire the given physical map from service.
4286 * Should only be called if the map contains no valid mappings.
4289 pmap_destroy(pmap_t pm
)
4296 if (pm
->pm_remove_all
) {
4297 pmap_tlb_flushID(pm
);
4298 pm
->pm_remove_all
= false;
4302 * Drop reference count
4304 mutex_enter(&pm
->pm_lock
);
4305 count
= --pm
->pm_obj
.uo_refs
;
4306 mutex_exit(&pm
->pm_lock
);
4308 if (pmap_is_current(pm
)) {
4309 if (pm
!= pmap_kernel())
4311 pm
->pm_cstate
.cs_all
= PMAP_CACHE_STATE_ALL
;
4317 * reference count is zero, free pmap resources and then free pmap.
4320 if (vector_page
< KERNEL_BASE
) {
4321 KDASSERT(!pmap_is_current(pm
));
4323 /* Remove the vector page mapping */
4324 pmap_remove(pm
, vector_page
, vector_page
+ PAGE_SIZE
);
4328 LIST_REMOVE(pm
, pm_list
);
4332 if (pmap_recent_user
== pm
)
4333 pmap_recent_user
= NULL
;
4335 UVM_OBJ_DESTROY(&pm
->pm_obj
);
4337 /* return the pmap to the pool */
4338 pool_cache_put(&pmap_cache
, pm
);
4343 * void pmap_reference(pmap_t pm)
4345 * Add a reference to the specified pmap.
4348 pmap_reference(pmap_t pm
)
4356 mutex_enter(&pm
->pm_lock
);
4357 pm
->pm_obj
.uo_refs
++;
4358 mutex_exit(&pm
->pm_lock
);
4363 static struct evcnt pmap_prefer_nochange_ev
=
4364 EVCNT_INITIALIZER(EVCNT_TYPE_MISC
, NULL
, "pmap prefer", "nochange");
4365 static struct evcnt pmap_prefer_change_ev
=
4366 EVCNT_INITIALIZER(EVCNT_TYPE_MISC
, NULL
, "pmap prefer", "change");
4368 EVCNT_ATTACH_STATIC(pmap_prefer_change_ev
);
4369 EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev
);
4372 pmap_prefer(vaddr_t hint
, vaddr_t
*vap
, int td
)
4374 vsize_t mask
= arm_cache_prefer_mask
| (PAGE_SIZE
- 1);
4376 vaddr_t diff
= (hint
- va
) & mask
;
4378 pmap_prefer_nochange_ev
.ev_count
++;
4380 pmap_prefer_change_ev
.ev_count
++;
4381 if (__predict_false(td
))
4386 #endif /* ARM_MMU_V6 */
4391 * Zero a given physical page by mapping it at a page hook point.
4392 * In doing the zero page op, the page we zero is mapped cachable, as with
4393 * StrongARM accesses to non-cached pages are non-burst making writing
4394 * _any_ bulk data very slow.
4396 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
4398 pmap_zero_page_generic(paddr_t phys
)
4400 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4401 struct vm_page
*pg
= PHYS_TO_VM_PAGE(phys
);
4403 #ifdef PMAP_CACHE_VIPT
4404 /* Choose the last page color it had, if any */
4405 const vsize_t va_offset
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
4407 const vsize_t va_offset
= 0;
4409 pt_entry_t
* const ptep
= &cdst_pte
[va_offset
>> PGSHIFT
];
4412 if (!SLIST_EMPTY(&pg
->mdpage
.pvh_list
))
4413 panic("pmap_zero_page: page has mappings");
4416 KDASSERT((phys
& PGOFSET
) == 0);
4419 * Hook in the page, zero it, and purge the cache for that
4420 * zeroed page. Invalidate the TLB as needed.
4422 *ptep
= L2_S_PROTO
| phys
|
4423 L2_S_PROT(PTE_KERNEL
, VM_PROT_WRITE
) | pte_l2_s_cache_mode
;
4425 cpu_tlb_flushD_SE(cdstp
+ va_offset
);
4427 bzero_page(cdstp
+ va_offset
);
4433 cpu_tlb_flushD_SE(cdstp
+ va_offset
);
4434 #ifdef PMAP_CACHE_VIVT
4435 cpu_dcache_wbinv_range(cdstp
+ va_offset
, PAGE_SIZE
);
4437 #ifdef PMAP_CACHE_VIPT
4439 * This page is now cache resident so it now has a page color.
4440 * Any contents have been obliterated so clear the EXEC flag.
4442 if (!pmap_is_page_colored_p(pg
)) {
4443 PMAPCOUNT(vac_color_new
);
4444 pg
->mdpage
.pvh_attrs
|= PVF_COLORED
;
4446 if (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
)) {
4447 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
4448 PMAPCOUNT(exec_discarded_zero
);
4450 pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
4453 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4455 #if ARM_MMU_XSCALE == 1
4457 pmap_zero_page_xscale(paddr_t phys
)
4460 struct vm_page
*pg
= PHYS_TO_VM_PAGE(phys
);
4462 if (!SLIST_EMPTY(&pg
->mdpage
.pvh_list
))
4463 panic("pmap_zero_page: page has mappings");
4466 KDASSERT((phys
& PGOFSET
) == 0);
4469 * Hook in the page, zero it, and purge the cache for that
4470 * zeroed page. Invalidate the TLB as needed.
4472 *cdst_pte
= L2_S_PROTO
| phys
|
4473 L2_S_PROT(PTE_KERNEL
, VM_PROT_WRITE
) |
4474 L2_C
| L2_XS_T_TEX(TEX_XSCALE_X
); /* mini-data */
4476 cpu_tlb_flushD_SE(cdstp
);
4479 xscale_cache_clean_minidata();
4481 #endif /* ARM_MMU_XSCALE == 1 */
4483 /* pmap_pageidlezero()
4485 * The same as above, except that we assume that the page is not
4486 * mapped. This means we never have to flush the cache first. Called
4487 * from the idle loop.
4490 pmap_pageidlezero(paddr_t phys
)
4495 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4496 struct vm_page
* const pg
= PHYS_TO_VM_PAGE(phys
);
4498 #ifdef PMAP_CACHE_VIPT
4499 /* Choose the last page color it had, if any */
4500 const vsize_t va_offset
= pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
4502 const vsize_t va_offset
= 0;
4504 pt_entry_t
* const ptep
= &csrc_pte
[va_offset
>> PGSHIFT
];
4508 if (!SLIST_EMPTY(&pg
->mdpage
.pvh_list
))
4509 panic("pmap_pageidlezero: page has mappings");
4512 KDASSERT((phys
& PGOFSET
) == 0);
4515 * Hook in the page, zero it, and purge the cache for that
4516 * zeroed page. Invalidate the TLB as needed.
4518 *ptep
= L2_S_PROTO
| phys
|
4519 L2_S_PROT(PTE_KERNEL
, VM_PROT_WRITE
) | pte_l2_s_cache_mode
;
4521 cpu_tlb_flushD_SE(cdstp
+ va_offset
);
4524 for (i
= 0, ptr
= (int *)(cdstp
+ va_offset
);
4525 i
< (PAGE_SIZE
/ sizeof(int)); i
++) {
4526 if (sched_curcpu_runnable_p() != 0) {
4528 * A process has become ready. Abort now,
4529 * so we don't keep it waiting while we
4530 * do slow memory access to finish this
4539 #ifdef PMAP_CACHE_VIVT
4542 * if we aborted we'll rezero this page again later so don't
4543 * purge it unless we finished it
4545 cpu_dcache_wbinv_range(cdstp
, PAGE_SIZE
);
4546 #elif defined(PMAP_CACHE_VIPT)
4548 * This page is now cache resident so it now has a page color.
4549 * Any contents have been obliterated so clear the EXEC flag.
4551 if (!pmap_is_page_colored_p(pg
)) {
4552 PMAPCOUNT(vac_color_new
);
4553 pg
->mdpage
.pvh_attrs
|= PVF_COLORED
;
4555 if (PV_IS_EXEC_P(pg
->mdpage
.pvh_attrs
)) {
4556 pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
4557 PMAPCOUNT(exec_discarded_zero
);
4565 cpu_tlb_flushD_SE(cdstp
+ va_offset
);
4573 * Copy one physical page into another, by mapping the pages into
4574 * hook points. The same comment regarding cachability as in
4575 * pmap_zero_page also applies here.
4577 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
4579 pmap_copy_page_generic(paddr_t src
, paddr_t dst
)
4581 struct vm_page
* const src_pg
= PHYS_TO_VM_PAGE(src
);
4582 #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4583 struct vm_page
* const dst_pg
= PHYS_TO_VM_PAGE(dst
);
4585 #ifdef PMAP_CACHE_VIPT
4586 const vsize_t src_va_offset
= src_pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
4587 const vsize_t dst_va_offset
= dst_pg
->mdpage
.pvh_attrs
& arm_cache_prefer_mask
;
4589 const vsize_t src_va_offset
= 0;
4590 const vsize_t dst_va_offset
= 0;
4592 pt_entry_t
* const src_ptep
= &csrc_pte
[src_va_offset
>> PGSHIFT
];
4593 pt_entry_t
* const dst_ptep
= &cdst_pte
[dst_va_offset
>> PGSHIFT
];
4596 if (!SLIST_EMPTY(&dst_pg
->mdpage
.pvh_list
))
4597 panic("pmap_copy_page: dst page has mappings");
4600 #ifdef PMAP_CACHE_VIPT
4601 KASSERT(arm_cache_prefer_mask
== 0 || src_pg
->mdpage
.pvh_attrs
& (PVF_COLORED
|PVF_NC
));
4603 KDASSERT((src
& PGOFSET
) == 0);
4604 KDASSERT((dst
& PGOFSET
) == 0);
4607 * Clean the source page. Hold the source page's lock for
4608 * the duration of the copy so that no other mappings can
4609 * be created while we have a potentially aliased mapping.
4611 simple_lock(&src_pg
->mdpage
.pvh_slock
);
4612 #ifdef PMAP_CACHE_VIVT
4613 (void) pmap_clean_page(SLIST_FIRST(&src_pg
->mdpage
.pvh_list
), true);
4617 * Map the pages into the page hook points, copy them, and purge
4618 * the cache for the appropriate page. Invalidate the TLB
4621 *src_ptep
= L2_S_PROTO
4623 #ifdef PMAP_CACHE_VIPT
4624 | ((src_pg
->mdpage
.pvh_attrs
& PVF_NC
) ? 0 : pte_l2_s_cache_mode
)
4626 #ifdef PMAP_CACHE_VIVT
4627 | pte_l2_s_cache_mode
4629 | L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
);
4630 *dst_ptep
= L2_S_PROTO
| dst
|
4631 L2_S_PROT(PTE_KERNEL
, VM_PROT_WRITE
) | pte_l2_s_cache_mode
;
4634 cpu_tlb_flushD_SE(csrcp
+ src_va_offset
);
4635 cpu_tlb_flushD_SE(cdstp
+ dst_va_offset
);
4637 bcopy_page(csrcp
+ src_va_offset
, cdstp
+ dst_va_offset
);
4638 #ifdef PMAP_CACHE_VIVT
4639 cpu_dcache_inv_range(csrcp
+ src_va_offset
, PAGE_SIZE
);
4641 simple_unlock(&src_pg
->mdpage
.pvh_slock
); /* cache is safe again */
4642 #ifdef PMAP_CACHE_VIVT
4643 cpu_dcache_wbinv_range(cdstp
+ dst_va_offset
, PAGE_SIZE
);
4652 cpu_tlb_flushD_SE(csrcp
+ src_va_offset
);
4653 cpu_tlb_flushD_SE(cdstp
+ dst_va_offset
);
4654 #ifdef PMAP_CACHE_VIPT
4656 * Now that the destination page is in the cache, mark it as colored.
4657 * If this was an exec page, discard it.
4659 if (!pmap_is_page_colored_p(dst_pg
)) {
4660 PMAPCOUNT(vac_color_new
);
4661 dst_pg
->mdpage
.pvh_attrs
|= PVF_COLORED
;
4663 if (PV_IS_EXEC_P(dst_pg
->mdpage
.pvh_attrs
)) {
4664 dst_pg
->mdpage
.pvh_attrs
&= ~PVF_EXEC
;
4665 PMAPCOUNT(exec_discarded_copy
);
4667 dst_pg
->mdpage
.pvh_attrs
|= PVF_DIRTY
;
4670 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4672 #if ARM_MMU_XSCALE == 1
4674 pmap_copy_page_xscale(paddr_t src
, paddr_t dst
)
4676 struct vm_page
*src_pg
= PHYS_TO_VM_PAGE(src
);
4678 struct vm_page
*dst_pg
= PHYS_TO_VM_PAGE(dst
);
4680 if (!SLIST_EMPTY(&dst_pg
->mdpage
.pvh_list
))
4681 panic("pmap_copy_page: dst page has mappings");
4684 KDASSERT((src
& PGOFSET
) == 0);
4685 KDASSERT((dst
& PGOFSET
) == 0);
4688 * Clean the source page. Hold the source page's lock for
4689 * the duration of the copy so that no other mappings can
4690 * be created while we have a potentially aliased mapping.
4692 simple_lock(&src_pg
->mdpage
.pvh_slock
);
4693 #ifdef PMAP_CACHE_VIVT
4694 (void) pmap_clean_page(SLIST_FIRST(&src_pg
->mdpage
.pvh_list
), true);
4698 * Map the pages into the page hook points, copy them, and purge
4699 * the cache for the appropriate page. Invalidate the TLB
4702 *csrc_pte
= L2_S_PROTO
| src
|
4703 L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
) |
4704 L2_C
| L2_XS_T_TEX(TEX_XSCALE_X
); /* mini-data */
4706 *cdst_pte
= L2_S_PROTO
| dst
|
4707 L2_S_PROT(PTE_KERNEL
, VM_PROT_WRITE
) |
4708 L2_C
| L2_XS_T_TEX(TEX_XSCALE_X
); /* mini-data */
4710 cpu_tlb_flushD_SE(csrcp
);
4711 cpu_tlb_flushD_SE(cdstp
);
4713 bcopy_page(csrcp
, cdstp
);
4714 simple_unlock(&src_pg
->mdpage
.pvh_slock
); /* cache is safe again */
4715 xscale_cache_clean_minidata();
4717 #endif /* ARM_MMU_XSCALE == 1 */
4720 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
4722 * Return the start and end addresses of the kernel's virtual space.
4723 * These values are setup in pmap_bootstrap and are updated as pages
4727 pmap_virtual_space(vaddr_t
*start
, vaddr_t
*end
)
4729 *start
= virtual_avail
;
4734 * Helper function for pmap_grow_l2_bucket()
4737 pmap_grow_map(vaddr_t va
, pt_entry_t cache_mode
, paddr_t
*pap
)
4739 struct l2_bucket
*l2b
;
4743 if (uvm
.page_init_done
== false) {
4744 #ifdef PMAP_STEAL_MEMORY
4746 pmap_boot_pagealloc(PAGE_SIZE
,
4747 #ifdef PMAP_CACHE_VIPT
4748 arm_cache_prefer_mask
,
4749 va
& arm_cache_prefer_mask
,
4756 if (uvm_page_physget(&pa
) == false)
4758 #endif /* PMAP_STEAL_MEMORY */
4761 pg
= uvm_pagealloc(NULL
, 0, NULL
, UVM_PGA_USERESERVE
);
4764 pa
= VM_PAGE_TO_PHYS(pg
);
4765 #ifdef PMAP_CACHE_VIPT
4767 * This new page must not have any mappings. Enter it via
4768 * pmap_kenter_pa and let that routine do the hard work.
4770 KASSERT(SLIST_EMPTY(&pg
->mdpage
.pvh_list
));
4771 pmap_kenter_pa(va
, pa
,
4772 VM_PROT_READ
|VM_PROT_WRITE
|PMAP_KMPAGE
, 0);
4779 PMAPCOUNT(pt_mappings
);
4780 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
4781 KDASSERT(l2b
!= NULL
);
4783 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
4784 *ptep
= L2_S_PROTO
| pa
| cache_mode
|
4785 L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
| VM_PROT_WRITE
);
4787 memset((void *)va
, 0, PAGE_SIZE
);
4792 * This is the same as pmap_alloc_l2_bucket(), except that it is only
4793 * used by pmap_growkernel().
4795 static inline struct l2_bucket
*
4796 pmap_grow_l2_bucket(pmap_t pm
, vaddr_t va
)
4798 struct l2_dtable
*l2
;
4799 struct l2_bucket
*l2b
;
4805 if ((l2
= pm
->pm_l2
[L2_IDX(l1idx
)]) == NULL
) {
4807 * No mapping at this address, as there is
4808 * no entry in the L1 table.
4809 * Need to allocate a new l2_dtable.
4811 nva
= pmap_kernel_l2dtable_kva
;
4812 if ((nva
& PGOFSET
) == 0) {
4814 * Need to allocate a backing page
4816 if (pmap_grow_map(nva
, pte_l2_s_cache_mode
, NULL
))
4820 l2
= (struct l2_dtable
*)nva
;
4821 nva
+= sizeof(struct l2_dtable
);
4823 if ((nva
& PGOFSET
) < (pmap_kernel_l2dtable_kva
& PGOFSET
)) {
4825 * The new l2_dtable straddles a page boundary.
4826 * Map in another page to cover it.
4828 if (pmap_grow_map(nva
, pte_l2_s_cache_mode
, NULL
))
4832 pmap_kernel_l2dtable_kva
= nva
;
4835 * Link it into the parent pmap
4837 pm
->pm_l2
[L2_IDX(l1idx
)] = l2
;
4840 l2b
= &l2
->l2_bucket
[L2_BUCKET(l1idx
)];
4843 * Fetch pointer to the L2 page table associated with the address.
4845 if (l2b
->l2b_kva
== NULL
) {
4849 * No L2 page table has been allocated. Chances are, this
4850 * is because we just allocated the l2_dtable, above.
4852 nva
= pmap_kernel_l2ptp_kva
;
4853 ptep
= (pt_entry_t
*)nva
;
4854 if ((nva
& PGOFSET
) == 0) {
4856 * Need to allocate a backing page
4858 if (pmap_grow_map(nva
, pte_l2_s_cache_mode_pt
,
4859 &pmap_kernel_l2ptp_phys
))
4861 PTE_SYNC_RANGE(ptep
, PAGE_SIZE
/ sizeof(pt_entry_t
));
4865 l2b
->l2b_kva
= ptep
;
4866 l2b
->l2b_l1idx
= l1idx
;
4867 l2b
->l2b_phys
= pmap_kernel_l2ptp_phys
;
4869 pmap_kernel_l2ptp_kva
+= L2_TABLE_SIZE_REAL
;
4870 pmap_kernel_l2ptp_phys
+= L2_TABLE_SIZE_REAL
;
4877 pmap_growkernel(vaddr_t maxkvaddr
)
4879 pmap_t kpm
= pmap_kernel();
4880 struct l1_ttable
*l1
;
4881 struct l2_bucket
*l2b
;
4885 if (maxkvaddr
<= pmap_curmaxkvaddr
)
4886 goto out
; /* we are OK */
4888 NPDEBUG(PDB_GROWKERN
,
4889 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
4890 pmap_curmaxkvaddr
, maxkvaddr
));
4892 KDASSERT(maxkvaddr
<= virtual_end
);
4895 * whoops! we need to add kernel PTPs
4898 s
= splhigh(); /* to be safe */
4899 mutex_enter(&kpm
->pm_lock
);
4901 /* Map 1MB at a time */
4902 for (; pmap_curmaxkvaddr
< maxkvaddr
; pmap_curmaxkvaddr
+= L1_S_SIZE
) {
4904 l2b
= pmap_grow_l2_bucket(kpm
, pmap_curmaxkvaddr
);
4905 KDASSERT(l2b
!= NULL
);
4907 /* Distribute new L1 entry to all other L1s */
4908 SLIST_FOREACH(l1
, &l1_list
, l1_link
) {
4909 pl1pd
= &l1
->l1_kva
[L1_IDX(pmap_curmaxkvaddr
)];
4910 *pl1pd
= l2b
->l2b_phys
| L1_C_DOM(PMAP_DOMAIN_KERNEL
) |
4917 * flush out the cache, expensive but growkernel will happen so
4920 cpu_dcache_wbinv_all();
4924 mutex_exit(&kpm
->pm_lock
);
4928 return (pmap_curmaxkvaddr
);
4931 /************************ Utility routines ****************************/
4934 * vector_page_setprot:
4936 * Manipulate the protection of the vector page.
4939 vector_page_setprot(int prot
)
4941 struct l2_bucket
*l2b
;
4944 l2b
= pmap_get_l2_bucket(pmap_kernel(), vector_page
);
4945 KDASSERT(l2b
!= NULL
);
4947 ptep
= &l2b
->l2b_kva
[l2pte_index(vector_page
)];
4949 *ptep
= (*ptep
& ~L1_S_PROT_MASK
) | L2_S_PROT(PTE_KERNEL
, prot
);
4951 cpu_tlb_flushD_SE(vector_page
);
4956 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
4957 * Returns true if the mapping exists, else false.
4959 * NOTE: This function is only used by a couple of arm-specific modules.
4960 * It is not safe to take any pmap locks here, since we could be right
4961 * in the middle of debugging the pmap anyway...
4963 * It is possible for this routine to return false even though a valid
4964 * mapping does exist. This is because we don't lock, so the metadata
4965 * state may be inconsistent.
4967 * NOTE: We can return a NULL *ptp in the case where the L1 pde is
4968 * a "section" mapping.
4971 pmap_get_pde_pte(pmap_t pm
, vaddr_t va
, pd_entry_t
**pdp
, pt_entry_t
**ptp
)
4973 struct l2_dtable
*l2
;
4974 pd_entry_t
*pl1pd
, l1pd
;
4978 if (pm
->pm_l1
== NULL
)
4982 *pdp
= pl1pd
= &pm
->pm_l1
->l1_kva
[l1idx
];
4985 if (l1pte_section_p(l1pd
)) {
4990 if (pm
->pm_l2
== NULL
)
4993 l2
= pm
->pm_l2
[L2_IDX(l1idx
)];
4996 (ptep
= l2
->l2_bucket
[L2_BUCKET(l1idx
)].l2b_kva
) == NULL
) {
5000 *ptp
= &ptep
[l2pte_index(va
)];
5005 pmap_get_pde(pmap_t pm
, vaddr_t va
, pd_entry_t
**pdp
)
5009 if (pm
->pm_l1
== NULL
)
5013 *pdp
= &pm
->pm_l1
->l1_kva
[l1idx
];
5018 /************************ Bootstrapping routines ****************************/
5021 pmap_init_l1(struct l1_ttable
*l1
, pd_entry_t
*l1pt
)
5026 l1
->l1_domain_use_count
= 0;
5027 l1
->l1_domain_first
= 0;
5029 for (i
= 0; i
< PMAP_DOMAINS
; i
++)
5030 l1
->l1_domain_free
[i
] = i
+ 1;
5033 * Copy the kernel's L1 entries to each new L1.
5035 if (pmap_initialized
)
5036 memcpy(l1pt
, pmap_kernel()->pm_l1
->l1_kva
, L1_TABLE_SIZE
);
5038 if (pmap_extract(pmap_kernel(), (vaddr_t
)l1pt
,
5039 &l1
->l1_physaddr
) == false)
5040 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt
);
5042 SLIST_INSERT_HEAD(&l1_list
, l1
, l1_link
);
5043 TAILQ_INSERT_TAIL(&l1_lru_list
, l1
, l1_lru
);
5047 * pmap_bootstrap() is called from the board-specific initarm() routine
5048 * once the kernel L1/L2 descriptors tables have been set up.
5050 * This is a somewhat convoluted process since pmap bootstrap is, effectively,
5051 * spread over a number of disparate files/functions.
5053 * We are passed the following parameters
5055 * This is a pointer to the base of the kernel's L1 translation table.
5057 * 1MB-aligned start of managed kernel virtual memory.
5059 * 1MB-aligned end of managed kernel virtual memory.
5061 * We use the first parameter to build the metadata (struct l1_ttable and
5062 * struct l2_dtable) necessary to track kernel mappings.
5064 #define PMAP_STATIC_L2_SIZE 16
5066 pmap_bootstrap(vaddr_t vstart
, vaddr_t vend
)
5068 static struct l1_ttable static_l1
;
5069 static struct l2_dtable static_l2
[PMAP_STATIC_L2_SIZE
];
5070 struct l1_ttable
*l1
= &static_l1
;
5071 struct l2_dtable
*l2
;
5072 struct l2_bucket
*l2b
;
5073 pd_entry_t
*l1pt
= (pd_entry_t
*) kernel_l1pt
.pv_va
;
5074 pmap_t pm
= pmap_kernel();
5080 int nptes
, l1idx
, l2idx
, l2next
= 0;
5083 * Initialise the kernel pmap object
5086 pm
->pm_domain
= PMAP_DOMAIN_KERNEL
;
5087 pm
->pm_activated
= true;
5088 pm
->pm_cstate
.cs_all
= PMAP_CACHE_STATE_ALL
;
5089 UVM_OBJ_INIT(&pm
->pm_obj
, NULL
, 1);
5092 * Scan the L1 translation table created by initarm() and create
5093 * the required metadata for all valid mappings found in it.
5095 for (l1idx
= 0; l1idx
< (L1_TABLE_SIZE
/ sizeof(pd_entry_t
)); l1idx
++) {
5099 * We're only interested in Coarse mappings.
5100 * pmap_extract() can deal with section mappings without
5101 * recourse to checking L2 metadata.
5103 if ((pde
& L1_TYPE_MASK
) != L1_TYPE_C
)
5107 * Lookup the KVA of this L2 descriptor table
5109 pa
= (paddr_t
)(pde
& L1_C_ADDR_MASK
);
5110 ptep
= (pt_entry_t
*)kernel_pt_lookup(pa
);
5112 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
5113 (u_int
)l1idx
<< L1_S_SHIFT
, pa
);
5117 * Fetch the associated L2 metadata structure.
5118 * Allocate a new one if necessary.
5120 if ((l2
= pm
->pm_l2
[L2_IDX(l1idx
)]) == NULL
) {
5121 if (l2next
== PMAP_STATIC_L2_SIZE
)
5122 panic("pmap_bootstrap: out of static L2s");
5123 pm
->pm_l2
[L2_IDX(l1idx
)] = l2
= &static_l2
[l2next
++];
5127 * One more L1 slot tracked...
5132 * Fill in the details of the L2 descriptor in the
5133 * appropriate bucket.
5135 l2b
= &l2
->l2_bucket
[L2_BUCKET(l1idx
)];
5136 l2b
->l2b_kva
= ptep
;
5138 l2b
->l2b_l1idx
= l1idx
;
5141 * Establish an initial occupancy count for this descriptor
5144 l2idx
< (L2_TABLE_SIZE_REAL
/ sizeof(pt_entry_t
));
5146 if ((ptep
[l2idx
] & L2_TYPE_MASK
) != L2_TYPE_INV
) {
5147 l2b
->l2b_occupancy
++;
5152 * Make sure the descriptor itself has the correct cache mode.
5153 * If not, fix it, but whine about the problem. Port-meisters
5154 * should consider this a clue to fix up their initarm()
5157 if (pmap_set_pt_cache_mode(l1pt
, (vaddr_t
)ptep
)) {
5158 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5159 "L2 pte @ %p\n", ptep
);
5164 * Ensure the primary (kernel) L1 has the correct cache mode for
5165 * a page table. Bitch if it is not correctly set.
5167 for (va
= (vaddr_t
)l1pt
;
5168 va
< ((vaddr_t
)l1pt
+ L1_TABLE_SIZE
); va
+= PAGE_SIZE
) {
5169 if (pmap_set_pt_cache_mode(l1pt
, va
))
5170 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5171 "primary L1 @ 0x%lx\n", va
);
5174 cpu_dcache_wbinv_all();
5179 * now we allocate the "special" VAs which are used for tmp mappings
5180 * by the pmap (and other modules). we allocate the VAs by advancing
5181 * virtual_avail (note that there are no pages mapped at these VAs).
5183 * Managed KVM space start from wherever initarm() tells us.
5185 virtual_avail
= vstart
;
5188 #ifdef PMAP_CACHE_VIPT
5190 * If we have a VIPT cache, we need one page/pte per possible alias
5191 * page so we won't violate cache aliasing rules.
5193 virtual_avail
= (virtual_avail
+ arm_cache_prefer_mask
) & ~arm_cache_prefer_mask
;
5194 nptes
= (arm_cache_prefer_mask
>> PGSHIFT
) + 1;
5198 pmap_alloc_specials(&virtual_avail
, nptes
, &csrcp
, &csrc_pte
);
5199 pmap_set_pt_cache_mode(l1pt
, (vaddr_t
)csrc_pte
);
5200 pmap_alloc_specials(&virtual_avail
, nptes
, &cdstp
, &cdst_pte
);
5201 pmap_set_pt_cache_mode(l1pt
, (vaddr_t
)cdst_pte
);
5202 pmap_alloc_specials(&virtual_avail
, nptes
, &memhook
, NULL
);
5203 pmap_alloc_specials(&virtual_avail
, round_page(MSGBUFSIZE
) / PAGE_SIZE
,
5204 (void *)&msgbufaddr
, NULL
);
5207 * Allocate a range of kernel virtual address space to be used
5208 * for L2 descriptor tables and metadata allocation in
5209 * pmap_growkernel().
5211 size
= ((virtual_end
- pmap_curmaxkvaddr
) + L1_S_OFFSET
) / L1_S_SIZE
;
5212 pmap_alloc_specials(&virtual_avail
,
5213 round_page(size
* L2_TABLE_SIZE_REAL
) / PAGE_SIZE
,
5214 &pmap_kernel_l2ptp_kva
, NULL
);
5216 size
= (size
+ (L2_BUCKET_SIZE
- 1)) / L2_BUCKET_SIZE
;
5217 pmap_alloc_specials(&virtual_avail
,
5218 round_page(size
* sizeof(struct l2_dtable
)) / PAGE_SIZE
,
5219 &pmap_kernel_l2dtable_kva
, NULL
);
5222 * init the static-global locks and global pmap list.
5224 /* spinlockinit(&pmap_main_lock, "pmaplk", 0); */
5227 * We can now initialise the first L1's metadata.
5229 SLIST_INIT(&l1_list
);
5230 TAILQ_INIT(&l1_lru_list
);
5231 simple_lock_init(&l1_lru_lock
);
5232 pmap_init_l1(l1
, l1pt
);
5234 /* Set up vector page L1 details, if necessary */
5235 if (vector_page
< KERNEL_BASE
) {
5236 pm
->pm_pl1vec
= &pm
->pm_l1
->l1_kva
[L1_IDX(vector_page
)];
5237 l2b
= pmap_get_l2_bucket(pm
, vector_page
);
5238 KDASSERT(l2b
!= NULL
);
5239 pm
->pm_l1vec
= l2b
->l2b_phys
| L1_C_PROTO
|
5240 L1_C_DOM(pm
->pm_domain
);
5242 pm
->pm_pl1vec
= NULL
;
5245 * Initialize the pmap cache
5247 pool_cache_bootstrap(&pmap_cache
, sizeof(struct pmap
), 0, 0, 0,
5248 "pmappl", NULL
, IPL_NONE
, pmap_pmap_ctor
, NULL
, NULL
);
5249 LIST_INIT(&pmap_pmaps
);
5250 LIST_INSERT_HEAD(&pmap_pmaps
, pm
, pm_list
);
5253 * Initialize the pv pool.
5255 pool_init(&pmap_pv_pool
, sizeof(struct pv_entry
), 0, 0, 0, "pvepl",
5256 &pmap_bootstrap_pv_allocator
, IPL_NONE
);
5259 * Initialize the L2 dtable pool and cache.
5261 pool_cache_bootstrap(&pmap_l2dtable_cache
, sizeof(struct l2_dtable
), 0,
5262 0, 0, "l2dtblpl", NULL
, IPL_NONE
, pmap_l2dtable_ctor
, NULL
, NULL
);
5265 * Initialise the L2 descriptor table pool and cache
5267 pool_cache_bootstrap(&pmap_l2ptp_cache
, L2_TABLE_SIZE_REAL
, 0,
5268 L2_TABLE_SIZE_REAL
, 0, "l2ptppl", NULL
, IPL_NONE
,
5269 pmap_l2ptp_ctor
, NULL
, NULL
);
5271 cpu_dcache_wbinv_all();
5275 pmap_set_pt_cache_mode(pd_entry_t
*kl1
, vaddr_t va
)
5277 pd_entry_t
*pdep
, pde
;
5278 pt_entry_t
*ptep
, pte
;
5283 * Make sure the descriptor itself has the correct cache mode
5285 pdep
= &kl1
[L1_IDX(va
)];
5288 if (l1pte_section_p(pde
)) {
5289 if ((pde
& L1_S_CACHE_MASK
) != pte_l1_s_cache_mode_pt
) {
5290 *pdep
= (pde
& ~L1_S_CACHE_MASK
) |
5291 pte_l1_s_cache_mode_pt
;
5293 cpu_dcache_wbinv_range((vaddr_t
)pdep
, sizeof(*pdep
));
5297 pa
= (paddr_t
)(pde
& L1_C_ADDR_MASK
);
5298 ptep
= (pt_entry_t
*)kernel_pt_lookup(pa
);
5300 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep
);
5302 ptep
= &ptep
[l2pte_index(va
)];
5304 if ((pte
& L2_S_CACHE_MASK
) != pte_l2_s_cache_mode_pt
) {
5305 *ptep
= (pte
& ~L2_S_CACHE_MASK
) |
5306 pte_l2_s_cache_mode_pt
;
5308 cpu_dcache_wbinv_range((vaddr_t
)ptep
, sizeof(*ptep
));
5317 pmap_alloc_specials(vaddr_t
*availp
, int pages
, vaddr_t
*vap
, pt_entry_t
**ptep
)
5319 vaddr_t va
= *availp
;
5320 struct l2_bucket
*l2b
;
5323 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
5325 panic("pmap_alloc_specials: no l2b for 0x%lx", va
);
5328 *ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
5332 *availp
= va
+ (PAGE_SIZE
* pages
);
5340 * Set the available memory vars - These do not map to real memory
5341 * addresses and cannot as the physical memory is fragmented.
5342 * They are used by ps for %mem calculations.
5343 * One could argue whether this should be the entire memory or just
5344 * the memory that is useable in a user process.
5346 avail_start
= ptoa(vm_physmem
[0].start
);
5347 avail_end
= ptoa(vm_physmem
[vm_nphysseg
- 1].end
);
5350 * Now we need to free enough pv_entry structures to allow us to get
5351 * the kmem_map/kmem_object allocated and inited (done after this
5352 * function is finished). to do this we allocate one bootstrap page out
5353 * of kernel_map and use it to provide an initial pool of pv_entry
5354 * structures. we never free this page.
5356 pool_setlowat(&pmap_pv_pool
,
5357 (PAGE_SIZE
/ sizeof(struct pv_entry
)) * 2);
5359 mutex_init(&memlock
, MUTEX_DEFAULT
, IPL_NONE
);
5360 zeropage
= (void *)uvm_km_alloc(kernel_map
, PAGE_SIZE
, 0,
5361 UVM_KMF_WIRED
|UVM_KMF_ZERO
);
5363 pmap_initialized
= true;
5366 static vaddr_t last_bootstrap_page
= 0;
5367 static void *free_bootstrap_pages
= NULL
;
5370 pmap_bootstrap_pv_page_alloc(struct pool
*pp
, int flags
)
5372 extern void *pool_page_alloc(struct pool
*, int);
5376 if (pmap_initialized
)
5377 return (pool_page_alloc(pp
, flags
));
5379 if (free_bootstrap_pages
) {
5380 rv
= free_bootstrap_pages
;
5381 free_bootstrap_pages
= *((void **)rv
);
5385 new_page
= uvm_km_alloc(kernel_map
, PAGE_SIZE
, 0,
5386 UVM_KMF_WIRED
| ((flags
& PR_WAITOK
) ? 0 : UVM_KMF_NOWAIT
));
5388 KASSERT(new_page
> last_bootstrap_page
);
5389 last_bootstrap_page
= new_page
;
5390 return ((void *)new_page
);
5394 pmap_bootstrap_pv_page_free(struct pool
*pp
, void *v
)
5396 extern void pool_page_free(struct pool
*, void *);
5398 if ((vaddr_t
)v
<= last_bootstrap_page
) {
5399 *((void **)v
) = free_bootstrap_pages
;
5400 free_bootstrap_pages
= v
;
5404 if (pmap_initialized
) {
5405 pool_page_free(pp
, v
);
5413 * This routine is called after the vm and kmem subsystems have been
5414 * initialised. This allows the pmap code to perform any initialisation
5415 * that can only be done one the memory allocation is in place.
5420 extern paddr_t physical_start
, physical_end
;
5421 struct l2_bucket
*l2b
;
5422 struct l1_ttable
*l1
;
5423 struct pglist plist
;
5426 pt_entry_t
*ptep
, pte
;
5431 pool_cache_setlowat(&pmap_l2ptp_cache
,
5432 (PAGE_SIZE
/ L2_TABLE_SIZE_REAL
) * 4);
5433 pool_cache_setlowat(&pmap_l2dtable_cache
,
5434 (PAGE_SIZE
/ sizeof(struct l2_dtable
)) * 2);
5436 needed
= (maxproc
/ PMAP_DOMAINS
) + ((maxproc
% PMAP_DOMAINS
) ? 1 : 0);
5439 l1
= malloc(sizeof(*l1
) * needed
, M_VMPMAP
, M_WAITOK
);
5441 for (loop
= 0; loop
< needed
; loop
++, l1
++) {
5442 /* Allocate a L1 page table */
5443 va
= uvm_km_alloc(kernel_map
, L1_TABLE_SIZE
, 0, UVM_KMF_VAONLY
);
5445 panic("Cannot allocate L1 KVM");
5447 error
= uvm_pglistalloc(L1_TABLE_SIZE
, physical_start
,
5448 physical_end
, L1_TABLE_SIZE
, 0, &plist
, 1, M_WAITOK
);
5450 panic("Cannot allocate L1 physical pages");
5452 m
= TAILQ_FIRST(&plist
);
5453 eva
= va
+ L1_TABLE_SIZE
;
5454 pl1pt
= (pd_entry_t
*)va
;
5456 while (m
&& va
< eva
) {
5457 paddr_t pa
= VM_PAGE_TO_PHYS(m
);
5459 pmap_kenter_pa(va
, pa
,
5460 VM_PROT_READ
|VM_PROT_WRITE
|PMAP_KMPAGE
, 0);
5463 * Make sure the L1 descriptor table is mapped
5464 * with the cache-mode set to write-through.
5466 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
5467 KDASSERT(l2b
!= NULL
);
5468 ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
5470 pte
= (pte
& ~L2_S_CACHE_MASK
) | pte_l2_s_cache_mode_pt
;
5473 cpu_tlb_flushD_SE(va
);
5476 m
= TAILQ_NEXT(m
, pageq
.queue
);
5481 panic("pmap_alloc_l1pt: pglist not empty");
5482 #endif /* DIAGNOSTIC */
5484 pmap_init_l1(l1
, pl1pt
);
5488 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
5494 * Note that the following routines are used by board-specific initialisation
5495 * code to configure the initial kernel page tables.
5497 * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
5498 * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
5499 * behaviour of the old pmap, and provides an easy migration path for
5500 * initial bring-up of the new pmap on existing ports. Fortunately,
5501 * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
5502 * will be deprecated.
5504 * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
5509 * This list exists for the benefit of pmap_map_chunk(). It keeps track
5510 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
5511 * find them as necessary.
5513 * Note that the data on this list MUST remain valid after initarm() returns,
5514 * as pmap_bootstrap() uses it to contruct L2 table metadata.
5516 SLIST_HEAD(, pv_addr
) kernel_pt_list
= SLIST_HEAD_INITIALIZER(kernel_pt_list
);
5519 kernel_pt_lookup(paddr_t pa
)
5523 SLIST_FOREACH(pv
, &kernel_pt_list
, pv_list
) {
5524 #ifndef ARM32_NEW_VM_LAYOUT
5525 if (pv
->pv_pa
== (pa
& ~PGOFSET
))
5526 return (pv
->pv_va
| (pa
& PGOFSET
));
5528 if (pv
->pv_pa
== pa
)
5538 * Create a single section mapping.
5541 pmap_map_section(vaddr_t l1pt
, vaddr_t va
, paddr_t pa
, int prot
, int cache
)
5543 pd_entry_t
*pde
= (pd_entry_t
*) l1pt
;
5546 KASSERT(((va
| pa
) & L1_S_OFFSET
) == 0);
5555 fl
= pte_l1_s_cache_mode
;
5559 fl
= pte_l1_s_cache_mode_pt
;
5563 pde
[va
>> L1_S_SHIFT
] = L1_S_PROTO
| pa
|
5564 L1_S_PROT(PTE_KERNEL
, prot
) | fl
| L1_S_DOM(PMAP_DOMAIN_KERNEL
);
5565 PTE_SYNC(&pde
[va
>> L1_S_SHIFT
]);
5571 * Create a single page mapping.
5574 pmap_map_entry(vaddr_t l1pt
, vaddr_t va
, paddr_t pa
, int prot
, int cache
)
5576 pd_entry_t
*pde
= (pd_entry_t
*) l1pt
;
5580 KASSERT(((va
| pa
) & PGOFSET
) == 0);
5589 fl
= pte_l2_s_cache_mode
;
5593 fl
= pte_l2_s_cache_mode_pt
;
5597 if ((pde
[va
>> L1_S_SHIFT
] & L1_TYPE_MASK
) != L1_TYPE_C
)
5598 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va
);
5600 #ifndef ARM32_NEW_VM_LAYOUT
5601 pte
= (pt_entry_t
*)
5602 kernel_pt_lookup(pde
[va
>> L1_S_SHIFT
] & L2_S_FRAME
);
5604 pte
= (pt_entry_t
*) kernel_pt_lookup(pde
[L1_IDX(va
)] & L1_C_ADDR_MASK
);
5607 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va
);
5609 fl
|= L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, prot
);
5610 #ifndef ARM32_NEW_VM_LAYOUT
5611 pte
+= (va
>> PGSHIFT
) & 0x3ff;
5613 pte
+= l2pte_index(va
);
5614 L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, prot
) | fl
;
5623 * Link the L2 page table specified by "l2pv" into the L1
5624 * page table at the slot for "va".
5627 pmap_link_l2pt(vaddr_t l1pt
, vaddr_t va
, pv_addr_t
*l2pv
)
5629 pd_entry_t
*pde
= (pd_entry_t
*) l1pt
, proto
;
5630 u_int slot
= va
>> L1_S_SHIFT
;
5632 #ifndef ARM32_NEW_VM_LAYOUT
5633 KASSERT((va
& ((L1_S_SIZE
* 4) - 1)) == 0);
5634 KASSERT((l2pv
->pv_pa
& PGOFSET
) == 0);
5637 proto
= L1_S_DOM(PMAP_DOMAIN_KERNEL
) | L1_C_PROTO
;
5639 pde
[slot
+ 0] = proto
| (l2pv
->pv_pa
+ 0x000);
5640 #ifdef ARM32_NEW_VM_LAYOUT
5641 PTE_SYNC(&pde
[slot
]);
5643 pde
[slot
+ 1] = proto
| (l2pv
->pv_pa
+ 0x400);
5644 pde
[slot
+ 2] = proto
| (l2pv
->pv_pa
+ 0x800);
5645 pde
[slot
+ 3] = proto
| (l2pv
->pv_pa
+ 0xc00);
5646 PTE_SYNC_RANGE(&pde
[slot
+ 0], 4);
5649 SLIST_INSERT_HEAD(&kernel_pt_list
, l2pv
, pv_list
);
5655 * Map a chunk of memory using the most efficient mappings
5656 * possible (section, large page, small page) into the
5657 * provided L1 and L2 tables at the specified virtual address.
5660 pmap_map_chunk(vaddr_t l1pt
, vaddr_t va
, paddr_t pa
, vsize_t size
,
5661 int prot
, int cache
)
5663 pd_entry_t
*pde
= (pd_entry_t
*) l1pt
;
5664 pt_entry_t
*pte
, f1
, f2s
, f2l
;
5668 resid
= (size
+ (PAGE_SIZE
- 1)) & ~(PAGE_SIZE
- 1);
5671 panic("pmap_map_chunk: no L1 table provided");
5673 #ifdef VERBOSE_INIT_ARM
5674 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
5675 "prot=0x%x cache=%d\n", pa
, va
, size
, resid
, prot
, cache
);
5687 f1
= pte_l1_s_cache_mode
;
5688 f2l
= pte_l2_l_cache_mode
;
5689 f2s
= pte_l2_s_cache_mode
;
5693 f1
= pte_l1_s_cache_mode_pt
;
5694 f2l
= pte_l2_l_cache_mode_pt
;
5695 f2s
= pte_l2_s_cache_mode_pt
;
5702 /* See if we can use a section mapping. */
5703 if (L1_S_MAPPABLE_P(va
, pa
, resid
)) {
5704 #ifdef VERBOSE_INIT_ARM
5707 pde
[va
>> L1_S_SHIFT
] = L1_S_PROTO
| pa
|
5708 L1_S_PROT(PTE_KERNEL
, prot
) | f1
|
5709 L1_S_DOM(PMAP_DOMAIN_KERNEL
);
5710 PTE_SYNC(&pde
[va
>> L1_S_SHIFT
]);
5718 * Ok, we're going to use an L2 table. Make sure
5719 * one is actually in the corresponding L1 slot
5720 * for the current VA.
5722 if ((pde
[va
>> L1_S_SHIFT
] & L1_TYPE_MASK
) != L1_TYPE_C
)
5723 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va
);
5725 #ifndef ARM32_NEW_VM_LAYOUT
5726 pte
= (pt_entry_t
*)
5727 kernel_pt_lookup(pde
[va
>> L1_S_SHIFT
] & L2_S_FRAME
);
5729 pte
= (pt_entry_t
*) kernel_pt_lookup(
5730 pde
[L1_IDX(va
)] & L1_C_ADDR_MASK
);
5733 panic("pmap_map_chunk: can't find L2 table for VA"
5736 /* See if we can use a L2 large page mapping. */
5737 if (L2_L_MAPPABLE_P(va
, pa
, resid
)) {
5738 #ifdef VERBOSE_INIT_ARM
5741 for (i
= 0; i
< 16; i
++) {
5742 #ifndef ARM32_NEW_VM_LAYOUT
5743 pte
[((va
>> PGSHIFT
) & 0x3f0) + i
] =
5745 L2_L_PROT(PTE_KERNEL
, prot
) | f2l
;
5746 PTE_SYNC(&pte
[((va
>> PGSHIFT
) & 0x3f0) + i
]);
5748 pte
[l2pte_index(va
) + i
] =
5750 L2_L_PROT(PTE_KERNEL
, prot
) | f2l
;
5751 PTE_SYNC(&pte
[l2pte_index(va
) + i
]);
5760 /* Use a small page mapping. */
5761 #ifdef VERBOSE_INIT_ARM
5764 #ifndef ARM32_NEW_VM_LAYOUT
5765 pte
[(va
>> PGSHIFT
) & 0x3ff] =
5766 L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, prot
) | f2s
;
5767 PTE_SYNC(&pte
[(va
>> PGSHIFT
) & 0x3ff]);
5769 pte
[l2pte_index(va
)] =
5770 L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, prot
) | f2s
;
5771 PTE_SYNC(&pte
[l2pte_index(va
)]);
5777 #ifdef VERBOSE_INIT_ARM
5783 /********************** Static device map routines ***************************/
5785 static const struct pmap_devmap
*pmap_devmap_table
;
5788 * Register the devmap table. This is provided in case early console
5789 * initialization needs to register mappings created by bootstrap code
5790 * before pmap_devmap_bootstrap() is called.
5793 pmap_devmap_register(const struct pmap_devmap
*table
)
5796 pmap_devmap_table
= table
;
5800 * Map all of the static regions in the devmap table, and remember
5801 * the devmap table so other parts of the kernel can look up entries
5805 pmap_devmap_bootstrap(vaddr_t l1pt
, const struct pmap_devmap
*table
)
5809 pmap_devmap_table
= table
;
5811 for (i
= 0; pmap_devmap_table
[i
].pd_size
!= 0; i
++) {
5812 #ifdef VERBOSE_INIT_ARM
5813 printf("devmap: %08lx -> %08lx @ %08lx\n",
5814 pmap_devmap_table
[i
].pd_pa
,
5815 pmap_devmap_table
[i
].pd_pa
+
5816 pmap_devmap_table
[i
].pd_size
- 1,
5817 pmap_devmap_table
[i
].pd_va
);
5819 pmap_map_chunk(l1pt
, pmap_devmap_table
[i
].pd_va
,
5820 pmap_devmap_table
[i
].pd_pa
,
5821 pmap_devmap_table
[i
].pd_size
,
5822 pmap_devmap_table
[i
].pd_prot
,
5823 pmap_devmap_table
[i
].pd_cache
);
5827 const struct pmap_devmap
*
5828 pmap_devmap_find_pa(paddr_t pa
, psize_t size
)
5833 if (pmap_devmap_table
== NULL
)
5836 endpa
= (uint64_t)pa
+ (uint64_t)(size
- 1);
5838 for (i
= 0; pmap_devmap_table
[i
].pd_size
!= 0; i
++) {
5839 if (pa
>= pmap_devmap_table
[i
].pd_pa
&&
5840 endpa
<= (uint64_t)pmap_devmap_table
[i
].pd_pa
+
5841 (uint64_t)(pmap_devmap_table
[i
].pd_size
- 1))
5842 return (&pmap_devmap_table
[i
]);
5848 const struct pmap_devmap
*
5849 pmap_devmap_find_va(vaddr_t va
, vsize_t size
)
5853 if (pmap_devmap_table
== NULL
)
5856 for (i
= 0; pmap_devmap_table
[i
].pd_size
!= 0; i
++) {
5857 if (va
>= pmap_devmap_table
[i
].pd_va
&&
5858 va
+ size
- 1 <= pmap_devmap_table
[i
].pd_va
+
5859 pmap_devmap_table
[i
].pd_size
- 1)
5860 return (&pmap_devmap_table
[i
]);
5866 /********************** PTE initialization routines **************************/
5869 * These routines are called when the CPU type is identified to set up
5870 * the PTE prototypes, cache modes, etc.
5872 * The variables are always here, just in case modules need to reference
5873 * them (though, they shouldn't).
5876 pt_entry_t pte_l1_s_cache_mode
;
5877 pt_entry_t pte_l1_s_cache_mode_pt
;
5878 pt_entry_t pte_l1_s_cache_mask
;
5880 pt_entry_t pte_l2_l_cache_mode
;
5881 pt_entry_t pte_l2_l_cache_mode_pt
;
5882 pt_entry_t pte_l2_l_cache_mask
;
5884 pt_entry_t pte_l2_s_cache_mode
;
5885 pt_entry_t pte_l2_s_cache_mode_pt
;
5886 pt_entry_t pte_l2_s_cache_mask
;
5888 pt_entry_t pte_l2_s_prot_u
;
5889 pt_entry_t pte_l2_s_prot_w
;
5890 pt_entry_t pte_l2_s_prot_mask
;
5892 pt_entry_t pte_l1_s_proto
;
5893 pt_entry_t pte_l1_c_proto
;
5894 pt_entry_t pte_l2_s_proto
;
5896 void (*pmap_copy_page_func
)(paddr_t
, paddr_t
);
5897 void (*pmap_zero_page_func
)(paddr_t
);
5899 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
5901 pmap_pte_init_generic(void)
5904 pte_l1_s_cache_mode
= L1_S_B
|L1_S_C
;
5905 pte_l1_s_cache_mask
= L1_S_CACHE_MASK_generic
;
5907 pte_l2_l_cache_mode
= L2_B
|L2_C
;
5908 pte_l2_l_cache_mask
= L2_L_CACHE_MASK_generic
;
5910 pte_l2_s_cache_mode
= L2_B
|L2_C
;
5911 pte_l2_s_cache_mask
= L2_S_CACHE_MASK_generic
;
5914 * If we have a write-through cache, set B and C. If
5915 * we have a write-back cache, then we assume setting
5916 * only C will make those pages write-through.
5918 if (cpufuncs
.cf_dcache_wb_range
== (void *) cpufunc_nullop
) {
5919 pte_l1_s_cache_mode_pt
= L1_S_B
|L1_S_C
;
5920 pte_l2_l_cache_mode_pt
= L2_B
|L2_C
;
5921 pte_l2_s_cache_mode_pt
= L2_B
|L2_C
;
5924 pte_l1_s_cache_mode_pt
= L1_S_B
|L1_S_C
; /* arm116 errata 399234 */
5925 pte_l2_l_cache_mode_pt
= L2_B
|L2_C
; /* arm116 errata 399234 */
5926 pte_l2_s_cache_mode_pt
= L2_B
|L2_C
; /* arm116 errata 399234 */
5928 pte_l1_s_cache_mode_pt
= L1_S_C
;
5929 pte_l2_l_cache_mode_pt
= L2_C
;
5930 pte_l2_s_cache_mode_pt
= L2_C
;
5934 pte_l2_s_prot_u
= L2_S_PROT_U_generic
;
5935 pte_l2_s_prot_w
= L2_S_PROT_W_generic
;
5936 pte_l2_s_prot_mask
= L2_S_PROT_MASK_generic
;
5938 pte_l1_s_proto
= L1_S_PROTO_generic
;
5939 pte_l1_c_proto
= L1_C_PROTO_generic
;
5940 pte_l2_s_proto
= L2_S_PROTO_generic
;
5942 pmap_copy_page_func
= pmap_copy_page_generic
;
5943 pmap_zero_page_func
= pmap_zero_page_generic
;
5946 #if defined(CPU_ARM8)
5948 pmap_pte_init_arm8(void)
5952 * ARM8 is compatible with generic, but we need to use
5953 * the page tables uncached.
5955 pmap_pte_init_generic();
5957 pte_l1_s_cache_mode_pt
= 0;
5958 pte_l2_l_cache_mode_pt
= 0;
5959 pte_l2_s_cache_mode_pt
= 0;
5961 #endif /* CPU_ARM8 */
5963 #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
5965 pmap_pte_init_arm9(void)
5969 * ARM9 is compatible with generic, but we want to use
5970 * write-through caching for now.
5972 pmap_pte_init_generic();
5974 pte_l1_s_cache_mode
= L1_S_C
;
5975 pte_l2_l_cache_mode
= L2_C
;
5976 pte_l2_s_cache_mode
= L2_C
;
5978 pte_l1_s_cache_mode_pt
= L1_S_C
;
5979 pte_l2_l_cache_mode_pt
= L2_C
;
5980 pte_l2_s_cache_mode_pt
= L2_C
;
5982 #endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */
5983 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
5985 #if defined(CPU_ARM10)
5987 pmap_pte_init_arm10(void)
5991 * ARM10 is compatible with generic, but we want to use
5992 * write-through caching for now.
5994 pmap_pte_init_generic();
5996 pte_l1_s_cache_mode
= L1_S_B
| L1_S_C
;
5997 pte_l2_l_cache_mode
= L2_B
| L2_C
;
5998 pte_l2_s_cache_mode
= L2_B
| L2_C
;
6000 pte_l1_s_cache_mode_pt
= L1_S_C
;
6001 pte_l2_l_cache_mode_pt
= L2_C
;
6002 pte_l2_s_cache_mode_pt
= L2_C
;
6005 #endif /* CPU_ARM10 */
6007 #if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH)
6009 pmap_pte_init_arm11(void)
6013 * ARM11 is compatible with generic, but we want to use
6014 * write-through caching for now.
6016 pmap_pte_init_generic();
6018 pte_l1_s_cache_mode
= L1_S_C
;
6019 pte_l2_l_cache_mode
= L2_C
;
6020 pte_l2_s_cache_mode
= L2_C
;
6022 pte_l1_s_cache_mode_pt
= L1_S_C
;
6023 pte_l2_l_cache_mode_pt
= L2_C
;
6024 pte_l2_s_cache_mode_pt
= L2_C
;
6026 #endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */
6028 #if ARM_MMU_SA1 == 1
6030 pmap_pte_init_sa1(void)
6034 * The StrongARM SA-1 cache does not have a write-through
6035 * mode. So, do the generic initialization, then reset
6036 * the page table cache mode to B=1,C=1, and note that
6037 * the PTEs need to be sync'd.
6039 pmap_pte_init_generic();
6041 pte_l1_s_cache_mode_pt
= L1_S_B
|L1_S_C
;
6042 pte_l2_l_cache_mode_pt
= L2_B
|L2_C
;
6043 pte_l2_s_cache_mode_pt
= L2_B
|L2_C
;
6045 pmap_needs_pte_sync
= 1;
6047 #endif /* ARM_MMU_SA1 == 1*/
6049 #if ARM_MMU_XSCALE == 1
6051 static u_int xscale_use_minidata
;
6055 pmap_pte_init_xscale(void)
6058 int write_through
= 0;
6060 pte_l1_s_cache_mode
= L1_S_B
|L1_S_C
;
6061 pte_l1_s_cache_mask
= L1_S_CACHE_MASK_xscale
;
6063 pte_l2_l_cache_mode
= L2_B
|L2_C
;
6064 pte_l2_l_cache_mask
= L2_L_CACHE_MASK_xscale
;
6066 pte_l2_s_cache_mode
= L2_B
|L2_C
;
6067 pte_l2_s_cache_mask
= L2_S_CACHE_MASK_xscale
;
6069 pte_l1_s_cache_mode_pt
= L1_S_C
;
6070 pte_l2_l_cache_mode_pt
= L2_C
;
6071 pte_l2_s_cache_mode_pt
= L2_C
;
6073 #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
6075 * The XScale core has an enhanced mode where writes that
6076 * miss the cache cause a cache line to be allocated. This
6077 * is significantly faster than the traditional, write-through
6078 * behavior of this case.
6080 pte_l1_s_cache_mode
|= L1_S_XS_TEX(TEX_XSCALE_X
);
6081 pte_l2_l_cache_mode
|= L2_XS_L_TEX(TEX_XSCALE_X
);
6082 pte_l2_s_cache_mode
|= L2_XS_T_TEX(TEX_XSCALE_X
);
6083 #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
6085 #ifdef XSCALE_CACHE_WRITE_THROUGH
6087 * Some versions of the XScale core have various bugs in
6088 * their cache units, the work-around for which is to run
6089 * the cache in write-through mode. Unfortunately, this
6090 * has a major (negative) impact on performance. So, we
6091 * go ahead and run fast-and-loose, in the hopes that we
6092 * don't line up the planets in a way that will trip the
6095 * However, we give you the option to be slow-but-correct.
6098 #elif defined(XSCALE_CACHE_WRITE_BACK)
6099 /* force write back cache mode */
6101 #elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
6103 * Intel PXA2[15]0 processors are known to have a bug in
6104 * write-back cache on revision 4 and earlier (stepping
6105 * A[01] and B[012]). Fixed for C0 and later.
6111 type
= id
& ~(CPU_ID_XSCALE_COREREV_MASK
|CPU_ID_REVISION_MASK
);
6113 if (type
== CPU_ID_PXA250
|| type
== CPU_ID_PXA210
) {
6114 if ((id
& CPU_ID_REVISION_MASK
) < 5) {
6115 /* write through for stepping A0-1 and B0-2 */
6120 #endif /* XSCALE_CACHE_WRITE_THROUGH */
6122 if (write_through
) {
6123 pte_l1_s_cache_mode
= L1_S_C
;
6124 pte_l2_l_cache_mode
= L2_C
;
6125 pte_l2_s_cache_mode
= L2_C
;
6129 xscale_use_minidata
= 1;
6132 pte_l2_s_prot_u
= L2_S_PROT_U_xscale
;
6133 pte_l2_s_prot_w
= L2_S_PROT_W_xscale
;
6134 pte_l2_s_prot_mask
= L2_S_PROT_MASK_xscale
;
6136 pte_l1_s_proto
= L1_S_PROTO_xscale
;
6137 pte_l1_c_proto
= L1_C_PROTO_xscale
;
6138 pte_l2_s_proto
= L2_S_PROTO_xscale
;
6140 pmap_copy_page_func
= pmap_copy_page_xscale
;
6141 pmap_zero_page_func
= pmap_zero_page_xscale
;
6144 * Disable ECC protection of page table access, for now.
6146 __asm
volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl
));
6147 auxctl
&= ~XSCALE_AUXCTL_P
;
6148 __asm
volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl
));
6152 * xscale_setup_minidata:
6154 * Set up the mini-data cache clean area. We require the
6155 * caller to allocate the right amount of physically and
6156 * virtually contiguous space.
6159 xscale_setup_minidata(vaddr_t l1pt
, vaddr_t va
, paddr_t pa
)
6161 extern vaddr_t xscale_minidata_clean_addr
;
6162 extern vsize_t xscale_minidata_clean_size
; /* already initialized */
6163 pd_entry_t
*pde
= (pd_entry_t
*) l1pt
;
6168 xscale_minidata_clean_addr
= va
;
6170 /* Round it to page size. */
6171 size
= (xscale_minidata_clean_size
+ L2_S_OFFSET
) & L2_S_FRAME
;
6174 va
+= L2_S_SIZE
, pa
+= L2_S_SIZE
, size
-= L2_S_SIZE
) {
6175 #ifndef ARM32_NEW_VM_LAYOUT
6176 pte
= (pt_entry_t
*)
6177 kernel_pt_lookup(pde
[va
>> L1_S_SHIFT
] & L2_S_FRAME
);
6179 pte
= (pt_entry_t
*) kernel_pt_lookup(
6180 pde
[L1_IDX(va
)] & L1_C_ADDR_MASK
);
6183 panic("xscale_setup_minidata: can't find L2 table for "
6185 #ifndef ARM32_NEW_VM_LAYOUT
6186 pte
[(va
>> PGSHIFT
) & 0x3ff] =
6188 pte
[l2pte_index(va
)] =
6190 L2_S_PROTO
| pa
| L2_S_PROT(PTE_KERNEL
, VM_PROT_READ
) |
6191 L2_C
| L2_XS_T_TEX(TEX_XSCALE_X
);
6195 * Configure the mini-data cache for write-back with
6196 * read/write-allocate.
6198 * NOTE: In order to reconfigure the mini-data cache, we must
6199 * make sure it contains no valid data! In order to do that,
6200 * we must issue a global data cache invalidate command!
6202 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
6203 * THIS IS VERY IMPORTANT!
6206 /* Invalidate data and mini-data. */
6207 __asm
volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
6208 __asm
volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl
));
6209 auxctl
= (auxctl
& ~XSCALE_AUXCTL_MD_MASK
) | XSCALE_AUXCTL_MD_WB_RWA
;
6210 __asm
volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl
));
6214 * Change the PTEs for the specified kernel mappings such that they
6215 * will use the mini data cache instead of the main data cache.
6218 pmap_uarea(vaddr_t va
)
6220 struct l2_bucket
*l2b
;
6221 pt_entry_t
*ptep
, *sptep
, pte
;
6222 vaddr_t next_bucket
, eva
;
6225 if (xscale_use_minidata
== 0)
6232 next_bucket
= L2_NEXT_BUCKET(va
);
6233 if (next_bucket
> eva
)
6236 l2b
= pmap_get_l2_bucket(pmap_kernel(), va
);
6237 KDASSERT(l2b
!= NULL
);
6239 sptep
= ptep
= &l2b
->l2b_kva
[l2pte_index(va
)];
6241 while (va
< next_bucket
) {
6243 if (!l2pte_minidata(pte
)) {
6244 cpu_dcache_wbinv_range(va
, PAGE_SIZE
);
6245 cpu_tlb_flushD_SE(va
);
6246 *ptep
= pte
& ~L2_B
;
6251 PTE_SYNC_RANGE(sptep
, (u_int
)(ptep
- sptep
));
6255 #endif /* ARM_MMU_XSCALE == 1 */
6258 * return the PA of the current L1 table, for use when handling a crash dump
6260 uint32_t pmap_kernel_L1_addr(void)
6262 return pmap_kernel()->pm_l1
->l1_physaddr
;
6267 * A couple of ddb-callable functions for dumping pmaps
6269 void pmap_dump_all(void);
6270 void pmap_dump(pmap_t
);
6277 LIST_FOREACH(pm
, &pmap_pmaps
, pm_list
) {
6278 if (pm
== pmap_kernel())
6285 static pt_entry_t ncptes
[64];
6286 static void pmap_dump_ncpg(pmap_t
);
6289 pmap_dump(pmap_t pm
)
6291 struct l2_dtable
*l2
;
6292 struct l2_bucket
*l2b
;
6293 pt_entry_t
*ptep
, pte
;
6294 vaddr_t l2_va
, l2b_va
, va
;
6295 int i
, j
, k
, occ
, rows
= 0;
6297 if (pm
== pmap_kernel())
6298 printf("pmap_kernel (%p): ", pm
);
6300 printf("user pmap (%p): ", pm
);
6302 printf("domain %d, l1 at %p\n", pm
->pm_domain
, pm
->pm_l1
->l1_kva
);
6305 for (i
= 0; i
< L2_SIZE
; i
++, l2_va
+= 0x01000000) {
6308 if (l2
== NULL
|| l2
->l2_occupancy
== 0)
6312 for (j
= 0; j
< L2_BUCKET_SIZE
; j
++, l2b_va
+= 0x00100000) {
6313 l2b
= &l2
->l2_bucket
[j
];
6315 if (l2b
->l2b_occupancy
== 0 || l2b
->l2b_kva
== NULL
)
6318 ptep
= l2b
->l2b_kva
;
6320 for (k
= 0; k
< 256 && ptep
[k
] == 0; k
++)
6324 occ
= l2b
->l2b_occupancy
;
6325 va
= l2b_va
+ (k
* 4096);
6326 for (; k
< 256; k
++, va
+= 0x1000) {
6328 if ((k
% 64) == 0) {
6329 if ((rows
% 8) == 0) {
6331 " |0000 |8000 |10000 |18000 |20000 |28000 |30000 |38000\n");
6333 printf("%08lx: ", va
);
6342 switch (pte
& 0x0c) {
6344 ch
= 'D'; /* No cache No buff */
6347 ch
= 'B'; /* No cache buff */
6353 ch
= 'C'; /* Cache No buff */
6356 ch
= 'F'; /* Cache Buff */
6360 if ((pte
& L2_S_PROT_U
) == L2_S_PROT_U
)
6363 if ((pte
& 0xc) == 0)
6364 ncptes
[k
& 63] = pte
;
6367 if ((k
% 64) == 63) {
6381 pmap_dump_ncpg(pmap_t pm
)
6384 struct pv_entry
*pv
;
6387 for (i
= 0; i
< 63; i
++) {
6391 pg
= PHYS_TO_VM_PAGE(l2pte_pa(ncptes
[i
]));
6395 printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n",
6396 VM_PAGE_TO_PHYS(pg
),
6397 pg
->mdpage
.krw_mappings
, pg
->mdpage
.kro_mappings
,
6398 pg
->mdpage
.urw_mappings
, pg
->mdpage
.uro_mappings
);
6400 SLIST_FOREACH(pv
, &pg
->mdpage
.pvh_list
, pv_link
) {
6401 printf(" %c va 0x%08lx, flags 0x%x\n",
6402 (pm
== pv
->pv_pmap
) ? '*' : ' ',
6403 pv
->pv_va
, pv
->pv_flags
);
6409 #ifdef PMAP_STEAL_MEMORY
6411 pmap_boot_pageadd(pv_addr_t
*newpv
)
6413 pv_addr_t
*pv
, *npv
;
6415 if ((pv
= SLIST_FIRST(&pmap_boot_freeq
)) != NULL
) {
6416 if (newpv
->pv_pa
< pv
->pv_va
) {
6417 KASSERT(newpv
->pv_pa
+ newpv
->pv_size
<= pv
->pv_pa
);
6418 if (newpv
->pv_pa
+ newpv
->pv_size
== pv
->pv_pa
) {
6419 newpv
->pv_size
+= pv
->pv_size
;
6420 SLIST_REMOVE_HEAD(&pmap_boot_freeq
, pv_list
);
6424 for (; (npv
= SLIST_NEXT(pv
, pv_list
)) != NULL
;
6426 KASSERT(pv
->pv_pa
+ pv
->pv_size
< npv
->pv_pa
);
6427 KASSERT(pv
->pv_pa
< newpv
->pv_pa
);
6428 if (newpv
->pv_pa
> npv
->pv_pa
)
6430 if (pv
->pv_pa
+ pv
->pv_size
== newpv
->pv_pa
) {
6431 pv
->pv_size
+= newpv
->pv_size
;
6434 if (newpv
->pv_pa
+ newpv
->pv_size
< npv
->pv_pa
)
6436 newpv
->pv_size
+= npv
->pv_size
;
6437 SLIST_INSERT_AFTER(pv
, newpv
, pv_list
);
6438 SLIST_REMOVE_AFTER(newpv
, pv_list
);
6445 SLIST_INSERT_AFTER(pv
, newpv
, pv_list
);
6447 SLIST_INSERT_HEAD(&pmap_boot_freeq
, newpv
, pv_list
);
6452 pmap_boot_pagealloc(psize_t amount
, psize_t mask
, psize_t match
,
6455 pv_addr_t
*pv
, **pvp
;
6456 struct vm_physseg
*ps
;
6459 KASSERT(amount
& PGOFSET
);
6460 KASSERT((mask
& PGOFSET
) == 0);
6461 KASSERT((match
& PGOFSET
) == 0);
6462 KASSERT(amount
!= 0);
6464 for (pvp
= &SLIST_FIRST(&pmap_boot_freeq
);
6465 (pv
= *pvp
) != NULL
;
6466 pvp
= &SLIST_NEXT(pv
, pv_list
)) {
6470 * If this entry is too small to satify the request...
6472 KASSERT(pv
->pv_size
> 0);
6473 if (pv
->pv_size
< amount
)
6476 for (off
= 0; off
<= mask
; off
+= PAGE_SIZE
) {
6477 if (((pv
->pv_pa
+ off
) & mask
) == match
6478 && off
+ amount
<= pv
->pv_size
)
6484 rpv
->pv_va
= pv
->pv_va
+ off
;
6485 rpv
->pv_pa
= pv
->pv_pa
+ off
;
6486 rpv
->pv_size
= amount
;
6487 pv
->pv_size
-= amount
;
6488 if (pv
->pv_size
== 0) {
6490 KASSERT((vaddr_t
) pv
== rpv
->pv_va
);
6491 *pvp
= SLIST_NEXT(pv
, pv_list
);
6492 } else if (off
== 0) {
6493 KASSERT((vaddr_t
) pv
== rpv
->pv_va
);
6494 newpv
= (pv_addr_t
*) (rpv
->pv_va
+ amount
);
6496 newpv
->pv_pa
+= amount
;
6497 newpv
->pv_va
+= amount
;
6499 } else if (off
< pv
->pv_size
) {
6500 newpv
= (pv_addr_t
*) (rpv
->pv_va
+ amount
);
6502 newpv
->pv_size
-= off
;
6503 newpv
->pv_pa
+= off
+ amount
;
6504 newpv
->pv_va
+= off
+ amount
;
6506 SLIST_NEXT(pv
, pv_list
) = newpv
;
6509 KASSERT((vaddr_t
) pv
!= rpv
->pv_va
);
6511 memset((void *)rpv
->pv_va
, 0, amount
);
6515 if (vm_nphysseg
== 0)
6516 panic("pmap_boot_pagealloc: couldn't allocate memory");
6518 for (pvp
= &SLIST_FIRST(&pmap_boot_freeq
);
6519 (pv
= *pvp
) != NULL
;
6520 pvp
= &SLIST_NEXT(pv
, pv_list
)) {
6521 if (SLIST_NEXT(pv
, pv_list
) == NULL
)
6525 for (ps
= vm_physmem
, i
= 0; i
< vm_nphysseg
; ps
++, i
++) {
6526 if (ps
->avail_start
== atop(pv
->pv_pa
+ pv
->pv_size
)
6527 && pv
->pv_va
+ pv
->pv_size
<= ptoa(ps
->avail_end
)) {
6528 rpv
->pv_va
= pv
->pv_va
;
6529 rpv
->pv_pa
= pv
->pv_pa
;
6530 rpv
->pv_size
= amount
;
6532 pmap_map_chunk(kernel_l1pt
.pv_va
,
6533 ptoa(ps
->avail_start
) + (pv
->pv_va
- pv
->pv_pa
),
6534 ptoa(ps
->avail_start
),
6535 amount
- pv
->pv_size
,
6536 VM_PROT_READ
|VM_PROT_WRITE
,
6538 ps
->avail_start
+= atop(amount
- pv
->pv_size
);
6540 * If we consumed the entire physseg, remove it.
6542 if (ps
->avail_start
== ps
->avail_end
) {
6543 for (--vm_nphysseg
; i
< vm_nphysseg
; i
++, ps
++)
6546 memset((void *)rpv
->pv_va
, 0, rpv
->pv_size
);
6551 panic("pmap_boot_pagealloc: couldn't allocate memory");
6555 pmap_steal_memory(vsize_t size
, vaddr_t
*vstartp
, vaddr_t
*vendp
)
6559 pmap_boot_pagealloc(size
, 0, 0, &pv
);
6563 #endif /* PMAP_STEAL_MEMORY */
6565 SYSCTL_SETUP(sysctl_machdep_pmap_setup
, "sysctl machdep.kmpages setup")
6567 sysctl_createv(clog
, 0, NULL
, NULL
,
6569 CTLTYPE_NODE
, "machdep", NULL
,
6571 CTL_MACHDEP
, CTL_EOL
);
6573 sysctl_createv(clog
, 0, NULL
, NULL
,
6575 CTLTYPE_INT
, "kmpages",
6576 SYSCTL_DESCR("count of pages allocated to kernel memory allocators"),
6577 NULL
, 0, &pmap_kmpages
, 0,
6578 CTL_MACHDEP
, CTL_CREATE
, CTL_EOL
);