1 /* $NetBSD: pmap.h,v 1.142 2015/09/09 07:37:36 skrll Exp $ */
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 #ifndef _ARM32_PMAP_H_
69 #define _ARM32_PMAP_H_
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #include "opt_multiprocessor.h"
80 #include <arm/cpufunc.h>
81 #include <arm/locore.h>
82 #include <uvm/uvm_object.h>
85 #ifdef ARM_MMU_EXTENDED
86 #define PMAP_TLB_MAX 1
87 #define PMAP_TLB_HWPAGEWALKER 1
89 #define PMAP_NEED_TLB_SHOOTDOWN 1
91 #define PMAP_TLB_FLUSH_ASID_ON_RESET (arm_has_tlbiasid_p)
92 #define PMAP_TLB_NUM_PIDS 256
93 #define cpu_set_tlb_info(ci, ti) ((void)((ci)->ci_tlb_info = (ti)))
95 #define cpu_tlb_info(ci) ((ci)->ci_tlb_info)
97 #define cpu_tlb_info(ci) (&pmap_tlb0_info)
99 #define pmap_md_tlb_asid_max() (PMAP_TLB_NUM_PIDS - 1)
100 #include <uvm/pmap/tlb.h>
101 #include <uvm/pmap/pmap_tlb.h>
104 * If we have an EXTENDED MMU and the address space is split evenly between
105 * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
106 * user and kernel address spaces.
108 #if (KERNEL_BASE & 0x80000000) == 0
109 #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
111 #endif /* ARM_MMU_EXTENDED */
114 * a pmap describes a processes' 4GB virtual address space. this
115 * virtual address space can be broken up into 4096 1MB regions which
116 * are described by L1 PTEs in the L1 table.
118 * There is a line drawn at KERNEL_BASE. Everything below that line
119 * changes when the VM context is switched. Everything above that line
120 * is the same no matter which VM context is running. This is achieved
121 * by making the L1 PTEs for those slots above KERNEL_BASE reference
124 * The basic layout of the virtual address space thus looks like this:
131 * --------------------
139 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
140 * A bucket size of 16 provides for 16MB of contiguous virtual address
141 * space per l2_dtable. Most processes will, therefore, require only two or
142 * three of these to map their whole working set.
144 #define L2_BUCKET_XLOG2 (L1_S_SHIFT)
145 #define L2_BUCKET_XSIZE (1 << L2_BUCKET_XLOG2)
146 #define L2_BUCKET_LOG2 4
147 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
150 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
151 * of l2_dtable structures required to track all possible page descriptors
152 * mappable by an L1 translation table is given by the following constants:
154 #define L2_LOG2 (32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
155 #define L2_SIZE (1 << L2_LOG2)
158 * tell MI code that the cache is virtually-indexed.
159 * ARMv6 is physically-tagged but all others are virtually-tagged.
161 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
162 #define PMAP_CACHE_VIPT
164 #define PMAP_CACHE_VIVT
169 #ifndef PMAP_MMU_EXTENDED
174 * Track cache/tlb occupancy using the following structure
176 union pmap_cache_state
{
179 uint8_t csu_cache_b
[2];
184 uint8_t csu_tlb_b
[2];
190 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
191 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
192 #define cs_cache cs_s.cs_cache_u.csu_cache
193 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
194 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
195 #define cs_tlb cs_s.cs_tlb_u.csu_tlb
198 * Assigned to cs_all to force cacheops to work for a particular pmap
200 #define PMAP_CACHE_STATE_ALL 0xffffffffu
201 #endif /* !ARM_MMU_EXTENDED */
204 * This structure is used by machine-dependent code to describe
205 * static mappings of devices, created at bootstrap time.
208 vaddr_t pd_va
; /* virtual address */
209 paddr_t pd_pa
; /* physical address */
210 psize_t pd_size
; /* size of region */
211 vm_prot_t pd_prot
; /* protection code */
212 int pd_cache
; /* cache attributes */
216 * The pmap structure itself
219 struct uvm_object pm_obj
;
220 kmutex_t pm_obj_lock
;
221 #define pm_lock pm_obj.vmobjlock
223 pd_entry_t
*pm_pl1vec
;
226 struct l2_dtable
*pm_l2
[L2_SIZE
];
227 struct pmap_statistics pm_stats
;
228 LIST_ENTRY(pmap
) pm_list
;
229 #ifdef ARM_MMU_EXTENDED
233 #ifdef MULTIPROCESSOR
234 kcpuset_t
*pm_onproc
;
235 kcpuset_t
*pm_active
;
237 u_int pm_shootdown_pending
;
240 struct pmap_asid_info pm_pai
[PMAP_TLB_MAX
];
242 struct l1_ttable
*pm_l1
;
243 union pmap_cache_state pm_cstate
;
251 struct pmap kernel_pmap
;
255 * Physical / virtual address structure. In a number of places (particularly
256 * during bootstrapping) we need to keep track of the physical and virtual
257 * addresses of various pages
259 typedef struct pv_addr
{
260 SLIST_ENTRY(pv_addr
) pv_list
;
267 typedef SLIST_HEAD(, pv_addr
) pv_addrqh_t
;
269 extern pv_addrqh_t pmap_freeq
;
270 extern pv_addr_t kernelstack
;
271 extern pv_addr_t abtstack
;
272 extern pv_addr_t fiqstack
;
273 extern pv_addr_t irqstack
;
274 extern pv_addr_t undstack
;
275 extern pv_addr_t idlestack
;
276 extern pv_addr_t systempage
;
277 extern pv_addr_t kernel_l1pt
;
279 #ifdef ARM_MMU_EXTENDED
280 extern bool arm_has_tlbiasid_p
; /* also in <arm/locore.h> */
284 * Determine various modes for PTEs (user vs. kernel, cacheable
285 * vs. non-cacheable).
289 #define PTE_NOCACHE 0
291 #define PTE_PAGETABLE 2
294 * Flags that indicate attributes of pages or mappings of pages.
296 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
297 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
298 * pv_entry's for each page. They live in the same "namespace" so
299 * that we can clear multiple attributes at a time.
301 * Note the "non-cacheable" flag generally means the page has
302 * multiple mappings in a given address space.
304 #define PVF_MOD 0x01 /* page is modified */
305 #define PVF_REF 0x02 /* page is referenced */
306 #define PVF_WIRED 0x04 /* mapping is wired */
307 #define PVF_WRITE 0x08 /* mapping is writable */
308 #define PVF_EXEC 0x10 /* mapping is executable */
309 #ifdef PMAP_CACHE_VIVT
310 #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
311 #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
312 #define PVF_NC (PVF_UNC|PVF_KNC)
314 #ifdef PMAP_CACHE_VIPT
315 #define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
316 #define PVF_MULTCLR 0x40 /* mapping is multi-colored */
318 #define PVF_COLORED 0x80 /* page has or had a color */
319 #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
320 #define PVF_KMPAGE 0x0200 /* page is used for kmem */
321 #define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
322 #define PVF_KMOD 0x0800 /* unmanaged page is modified */
323 #define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
324 #define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
327 * Commonly referenced structures
329 extern int pmap_debug_level
; /* Only exists if PMAP_DEBUG */
330 extern int arm_poolpage_vmfreelist
;
333 * Macros that we need to export
335 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
336 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
338 #define pmap_is_modified(pg) \
339 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
340 #define pmap_is_referenced(pg) \
341 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
342 #define pmap_is_page_colored_p(md) \
343 (((md)->pvh_attrs & PVF_COLORED) != 0)
345 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
347 #define pmap_phys_address(ppn) (arm_ptob((ppn)))
348 u_int
arm32_mmap_flags(paddr_t
);
349 #define ARM32_MMAP_WRITECOMBINE 0x40000000
350 #define ARM32_MMAP_CACHEABLE 0x20000000
351 #define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
353 #define PMAP_PTE 0x10000000 /* kenter_pa */
356 * Functions that we need to export
358 void pmap_procwr(struct proc
*, vaddr_t
, int);
359 void pmap_remove_all(pmap_t
);
360 bool pmap_extract(pmap_t
, vaddr_t
, paddr_t
*);
362 #define PMAP_NEED_PROCWR
363 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
364 #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
366 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
367 #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
368 void pmap_prefer(vaddr_t
, vaddr_t
*, int);
371 void pmap_icache_sync_range(pmap_t
, vaddr_t
, vaddr_t
);
373 /* Functions we use internally. */
374 #ifdef PMAP_STEAL_MEMORY
375 void pmap_boot_pagealloc(psize_t
, psize_t
, psize_t
, pv_addr_t
*);
376 void pmap_boot_pageadd(pv_addr_t
*);
377 vaddr_t
pmap_steal_memory(vsize_t
, vaddr_t
*, vaddr_t
*);
379 void pmap_bootstrap(vaddr_t
, vaddr_t
);
381 void pmap_do_remove(pmap_t
, vaddr_t
, vaddr_t
, int);
382 int pmap_fault_fixup(pmap_t
, vaddr_t
, vm_prot_t
, int);
383 int pmap_prefetchabt_fixup(void *);
384 bool pmap_get_pde_pte(pmap_t
, vaddr_t
, pd_entry_t
**, pt_entry_t
**);
385 bool pmap_get_pde(pmap_t
, vaddr_t
, pd_entry_t
**);
387 void pmap_set_pcb_pagedir(pmap_t
, struct pcb
*);
389 void pmap_debug(int);
390 void pmap_postinit(void);
392 void vector_page_setprot(int);
394 const struct pmap_devmap
*pmap_devmap_find_pa(paddr_t
, psize_t
);
395 const struct pmap_devmap
*pmap_devmap_find_va(vaddr_t
, vsize_t
);
397 /* Bootstrapping routines. */
398 void pmap_map_section(vaddr_t
, vaddr_t
, paddr_t
, int, int);
399 void pmap_map_entry(vaddr_t
, vaddr_t
, paddr_t
, int, int);
400 vsize_t
pmap_map_chunk(vaddr_t
, vaddr_t
, paddr_t
, vsize_t
, int, int);
401 void pmap_link_l2pt(vaddr_t
, vaddr_t
, pv_addr_t
*);
402 void pmap_devmap_bootstrap(vaddr_t
, const struct pmap_devmap
*);
403 void pmap_devmap_register(const struct pmap_devmap
*);
406 * Special page zero routine for use by the idle loop (no cache cleans).
408 bool pmap_pageidlezero(paddr_t
);
409 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
411 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
413 * For the pmap, this is a more useful way to map a direct mapped page.
414 * It returns either the direct-mapped VA or the VA supplied if it can't
417 vaddr_t
pmap_direct_mapped_phys(paddr_t
, bool *, vaddr_t
);
421 * used by dumpsys to record the PA of the L1 table
423 uint32_t pmap_kernel_L1_addr(void);
425 * The current top of kernel VM
427 extern vaddr_t pmap_curmaxkvaddr
;
429 #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
431 * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
433 extern vaddr_t pmap_directlimit
;
437 * Useful macros and constants
440 /* Virtual address to page table entry */
441 static inline pt_entry_t
*
447 KASSERT(trunc_page(va
) == va
);
449 if (pmap_get_pde_pte(pmap_kernel(), va
, &pdep
, &ptep
) == false)
455 * Virtual address to physical address
457 static inline paddr_t
462 if (pmap_extract(pmap_kernel(), va
, &pa
) == false)
463 return (0); /* XXXSCW: Panic? */
469 * The new pmap ensures that page-tables are always mapping Write-Thru.
470 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
473 * Unfortunately, not all CPUs have a write-through cache mode. So we
474 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
475 * and if there is the chance for PTE syncs to be needed, we define
476 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
479 extern int pmap_needs_pte_sync
;
480 #if defined(_KERNEL_OPT)
482 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
483 * we need to do PTE syncs. If only SA-1 is configured, then evaluate
484 * this at compile time.
486 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
487 #define PMAP_INCLUDE_PTE_SYNC
489 #define PMAP_NEEDS_PTE_SYNC 1
490 #elif (ARM_MMU_SA1 == 0)
491 #define PMAP_NEEDS_PTE_SYNC 0
494 #endif /* _KERNEL_OPT */
497 * Provide a fallback in case we were not able to determine it at
500 #ifndef PMAP_NEEDS_PTE_SYNC
501 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
502 #define PMAP_INCLUDE_PTE_SYNC
506 pmap_ptesync(pt_entry_t
*ptep
, size_t cnt
)
508 if (PMAP_NEEDS_PTE_SYNC
) {
509 cpu_dcache_wb_range((vaddr_t
)ptep
, cnt
* sizeof(pt_entry_t
));
510 #ifdef SHEEVA_L2_CACHE
511 cpu_sdcache_wb_range((vaddr_t
)ptep
, -1,
512 cnt
* sizeof(pt_entry_t
));
518 #define PDE_SYNC(pdep) pmap_ptesync((pdep), 1)
519 #define PDE_SYNC_RANGE(pdep, cnt) pmap_ptesync((pdep), (cnt))
520 #define PTE_SYNC(ptep) pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
521 #define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
523 #define l1pte_valid_p(pde) ((pde) != 0)
524 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
525 #define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
526 && ((pde) & L1_S_V6_SUPER) != 0)
527 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
528 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
529 #define l1pte_pa(pde) ((pde) & L1_C_ADDR_MASK)
530 #define l1pte_index(v) ((vaddr_t)(v) >> L1_S_SHIFT)
531 #define l1pte_pgindex(v) l1pte_index((v) & L1_ADDR_BITS \
532 & ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
535 l1pte_setone(pt_entry_t
*pdep
, pt_entry_t pde
)
541 l1pte_set(pt_entry_t
*pdep
, pt_entry_t pde
)
544 if (l1pte_page_p(pde
)) {
545 KASSERTMSG((((uintptr_t)pdep
/ sizeof(pde
)) & (PAGE_SIZE
/ L2_T_SIZE
- 1)) == 0, "%p", pdep
);
546 for (size_t k
= 1; k
< PAGE_SIZE
/ L2_T_SIZE
; k
++) {
550 } else if (l1pte_supersection_p(pde
)) {
551 KASSERTMSG((((uintptr_t)pdep
/ sizeof(pde
)) & (L1_SS_SIZE
/ L1_S_SIZE
- 1)) == 0, "%p", pdep
);
552 for (size_t k
= 1; k
< L1_SS_SIZE
/ L1_S_SIZE
; k
++) {
558 #define l2pte_index(v) ((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
559 #define l2pte_valid_p(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
560 #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
561 #define l1pte_lpage_p(pte) (((pte) & L2_TYPE_MASK) == L2_TYPE_L)
562 #define l2pte_minidata_p(pte) (((pte) & \
563 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
564 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
567 l2pte_set(pt_entry_t
*ptep
, pt_entry_t pte
, pt_entry_t opte
)
569 if (l1pte_lpage_p(pte
)) {
570 KASSERTMSG((((uintptr_t)ptep
/ sizeof(pte
)) & (L2_L_SIZE
/ L2_S_SIZE
- 1)) == 0, "%p", ptep
);
571 for (size_t k
= 0; k
< L2_L_SIZE
/ L2_S_SIZE
; k
++) {
575 KASSERTMSG((((uintptr_t)ptep
/ sizeof(pte
)) & (PAGE_SIZE
/ L2_S_SIZE
- 1)) == 0, "%p", ptep
);
576 for (size_t k
= 0; k
< PAGE_SIZE
/ L2_S_SIZE
; k
++) {
577 KASSERTMSG(*ptep
== opte
, "%#x [*%p] != %#x", *ptep
, ptep
, opte
);
587 l2pte_reset(pt_entry_t
*ptep
)
589 KASSERTMSG((((uintptr_t)ptep
/ sizeof(*ptep
)) & (PAGE_SIZE
/ L2_S_SIZE
- 1)) == 0, "%p", ptep
);
591 for (vsize_t k
= 1; k
< PAGE_SIZE
/ L2_S_SIZE
; k
++) {
596 /* L1 and L2 page table macros */
597 #define pmap_pde_v(pde) l1pte_valid(*(pde))
598 #define pmap_pde_section(pde) l1pte_section_p(*(pde))
599 #define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
600 #define pmap_pde_page(pde) l1pte_page_p(*(pde))
601 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
603 #define pmap_pte_v(pte) l2pte_valid_p(*(pte))
604 #define pmap_pte_pa(pte) l2pte_pa(*(pte))
606 /* Size of the kernel part of the L1 page table */
607 #define KERNEL_PD_SIZE \
608 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
610 void bzero_page(vaddr_t
);
611 void bcopy_page(vaddr_t
, vaddr_t
);
614 void bzero_page_vfp(vaddr_t
);
615 void bcopy_page_vfp(vaddr_t
, vaddr_t
);
618 /************************* ARM MMU configuration *****************************/
620 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
621 void pmap_copy_page_generic(paddr_t
, paddr_t
);
622 void pmap_zero_page_generic(paddr_t
);
624 void pmap_pte_init_generic(void);
625 #if defined(CPU_ARM8)
626 void pmap_pte_init_arm8(void);
628 #if defined(CPU_ARM9)
629 void pmap_pte_init_arm9(void);
630 #endif /* CPU_ARM9 */
631 #if defined(CPU_ARM10)
632 void pmap_pte_init_arm10(void);
633 #endif /* CPU_ARM10 */
634 #if defined(CPU_ARM11) /* ARM_MMU_V6 */
635 void pmap_pte_init_arm11(void);
636 #endif /* CPU_ARM11 */
637 #if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
638 void pmap_pte_init_arm11mpcore(void);
641 void pmap_pte_init_armv7(void);
642 #endif /* ARM_MMU_V7 */
643 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
646 void pmap_pte_init_sa1(void);
647 #endif /* ARM_MMU_SA1 == 1 */
649 #if ARM_MMU_XSCALE == 1
650 void pmap_copy_page_xscale(paddr_t
, paddr_t
);
651 void pmap_zero_page_xscale(paddr_t
);
653 void pmap_pte_init_xscale(void);
655 void xscale_setup_minidata(vaddr_t
, vaddr_t
, paddr_t
);
657 #define PMAP_UAREA(va) pmap_uarea(va)
658 void pmap_uarea(vaddr_t
);
659 #endif /* ARM_MMU_XSCALE == 1 */
661 extern pt_entry_t pte_l1_s_cache_mode
;
662 extern pt_entry_t pte_l1_s_cache_mask
;
664 extern pt_entry_t pte_l2_l_cache_mode
;
665 extern pt_entry_t pte_l2_l_cache_mask
;
667 extern pt_entry_t pte_l2_s_cache_mode
;
668 extern pt_entry_t pte_l2_s_cache_mask
;
670 extern pt_entry_t pte_l1_s_cache_mode_pt
;
671 extern pt_entry_t pte_l2_l_cache_mode_pt
;
672 extern pt_entry_t pte_l2_s_cache_mode_pt
;
674 extern pt_entry_t pte_l1_s_wc_mode
;
675 extern pt_entry_t pte_l2_l_wc_mode
;
676 extern pt_entry_t pte_l2_s_wc_mode
;
678 extern pt_entry_t pte_l1_s_prot_u
;
679 extern pt_entry_t pte_l1_s_prot_w
;
680 extern pt_entry_t pte_l1_s_prot_ro
;
681 extern pt_entry_t pte_l1_s_prot_mask
;
683 extern pt_entry_t pte_l2_s_prot_u
;
684 extern pt_entry_t pte_l2_s_prot_w
;
685 extern pt_entry_t pte_l2_s_prot_ro
;
686 extern pt_entry_t pte_l2_s_prot_mask
;
688 extern pt_entry_t pte_l2_l_prot_u
;
689 extern pt_entry_t pte_l2_l_prot_w
;
690 extern pt_entry_t pte_l2_l_prot_ro
;
691 extern pt_entry_t pte_l2_l_prot_mask
;
693 extern pt_entry_t pte_l1_ss_proto
;
694 extern pt_entry_t pte_l1_s_proto
;
695 extern pt_entry_t pte_l1_c_proto
;
696 extern pt_entry_t pte_l2_s_proto
;
698 extern void (*pmap_copy_page_func
)(paddr_t
, paddr_t
);
699 extern void (*pmap_zero_page_func
)(paddr_t
);
701 #endif /* !_LOCORE */
703 /*****************************************************************************/
705 #define KERNEL_PID 0 /* The kernel uses ASID 0 */
708 * Definitions for MMU domains
710 #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
711 #define PMAP_DOMAIN_KERNEL 0 /* The kernel pmap uses domain #0 */
712 #ifdef ARM_MMU_EXTENDED
713 #define PMAP_DOMAIN_USER 1 /* User pmaps use domain #1 */
717 * These macros define the various bit masks in the PTE.
719 * We use these macros since we use different bits on different processor
722 #define L1_S_PROT_U_generic (L1_S_AP(AP_U))
723 #define L1_S_PROT_W_generic (L1_S_AP(AP_W))
724 #define L1_S_PROT_RO_generic (0)
725 #define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
727 #define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
728 #define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
729 #define L1_S_PROT_RO_xscale (0)
730 #define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
732 #define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
733 #define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
734 #define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
735 #define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
737 #define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
738 #define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
739 #define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
740 #define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
742 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
743 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
744 #define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
745 #define L1_S_CACHE_MASK_armv6n (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
746 #define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
748 #define L2_L_PROT_U_generic (L2_AP(AP_U))
749 #define L2_L_PROT_W_generic (L2_AP(AP_W))
750 #define L2_L_PROT_RO_generic (0)
751 #define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
753 #define L2_L_PROT_U_xscale (L2_AP(AP_U))
754 #define L2_L_PROT_W_xscale (L2_AP(AP_W))
755 #define L2_L_PROT_RO_xscale (0)
756 #define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
758 #define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
759 #define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
760 #define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
761 #define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
763 #define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
764 #define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
765 #define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
766 #define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
768 #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
769 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
770 #define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
771 #define L2_L_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
772 #define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
774 #define L2_S_PROT_U_generic (L2_AP(AP_U))
775 #define L2_S_PROT_W_generic (L2_AP(AP_W))
776 #define L2_S_PROT_RO_generic (0)
777 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
779 #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
780 #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
781 #define L2_S_PROT_RO_xscale (0)
782 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
784 #define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
785 #define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
786 #define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
787 #define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
789 #define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
790 #define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
791 #define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
792 #define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
794 #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
795 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
796 #define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
797 #ifdef ARMV6_EXTENDED_SMALL_PAGE
798 #define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
800 #define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
802 #define L2_S_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
803 #define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
806 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
807 #define L1_S_PROTO_xscale (L1_TYPE_S)
808 #define L1_S_PROTO_armv6 (L1_TYPE_S)
809 #define L1_S_PROTO_armv7 (L1_TYPE_S)
811 #define L1_SS_PROTO_generic 0
812 #define L1_SS_PROTO_xscale 0
813 #define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
814 #define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
816 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
817 #define L1_C_PROTO_xscale (L1_TYPE_C)
818 #define L1_C_PROTO_armv6 (L1_TYPE_C)
819 #define L1_C_PROTO_armv7 (L1_TYPE_C)
821 #define L2_L_PROTO (L2_TYPE_L)
823 #define L2_S_PROTO_generic (L2_TYPE_S)
824 #define L2_S_PROTO_xscale (L2_TYPE_XS)
825 #ifdef ARMV6_EXTENDED_SMALL_PAGE
826 #define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
828 #define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
830 #ifdef ARM_MMU_EXTENDED
831 #define L2_S_PROTO_armv6n (L2_TYPE_S|L2_XS_XN)
833 #define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
835 #ifdef ARM_MMU_EXTENDED
836 #define L2_S_PROTO_armv7 (L2_TYPE_S|L2_XS_XN)
838 #define L2_S_PROTO_armv7 (L2_TYPE_S)
842 * User-visible names for the ones that vary with MMU class.
846 /* More than one MMU class configured; use variables. */
847 #define L1_S_PROT_U pte_l1_s_prot_u
848 #define L1_S_PROT_W pte_l1_s_prot_w
849 #define L1_S_PROT_RO pte_l1_s_prot_ro
850 #define L1_S_PROT_MASK pte_l1_s_prot_mask
852 #define L2_S_PROT_U pte_l2_s_prot_u
853 #define L2_S_PROT_W pte_l2_s_prot_w
854 #define L2_S_PROT_RO pte_l2_s_prot_ro
855 #define L2_S_PROT_MASK pte_l2_s_prot_mask
857 #define L2_L_PROT_U pte_l2_l_prot_u
858 #define L2_L_PROT_W pte_l2_l_prot_w
859 #define L2_L_PROT_RO pte_l2_l_prot_ro
860 #define L2_L_PROT_MASK pte_l2_l_prot_mask
862 #define L1_S_CACHE_MASK pte_l1_s_cache_mask
863 #define L2_L_CACHE_MASK pte_l2_l_cache_mask
864 #define L2_S_CACHE_MASK pte_l2_s_cache_mask
866 #define L1_SS_PROTO pte_l1_ss_proto
867 #define L1_S_PROTO pte_l1_s_proto
868 #define L1_C_PROTO pte_l1_c_proto
869 #define L2_S_PROTO pte_l2_s_proto
871 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
872 #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
873 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
874 #define L1_S_PROT_U L1_S_PROT_U_generic
875 #define L1_S_PROT_W L1_S_PROT_W_generic
876 #define L1_S_PROT_RO L1_S_PROT_RO_generic
877 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
879 #define L2_S_PROT_U L2_S_PROT_U_generic
880 #define L2_S_PROT_W L2_S_PROT_W_generic
881 #define L2_S_PROT_RO L2_S_PROT_RO_generic
882 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
884 #define L2_L_PROT_U L2_L_PROT_U_generic
885 #define L2_L_PROT_W L2_L_PROT_W_generic
886 #define L2_L_PROT_RO L2_L_PROT_RO_generic
887 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
889 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
890 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
891 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
893 #define L1_SS_PROTO L1_SS_PROTO_generic
894 #define L1_S_PROTO L1_S_PROTO_generic
895 #define L1_C_PROTO L1_C_PROTO_generic
896 #define L2_S_PROTO L2_S_PROTO_generic
898 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
899 #define pmap_zero_page(d) pmap_zero_page_generic((d))
900 #elif ARM_MMU_V6N != 0
901 #define L1_S_PROT_U L1_S_PROT_U_armv6
902 #define L1_S_PROT_W L1_S_PROT_W_armv6
903 #define L1_S_PROT_RO L1_S_PROT_RO_armv6
904 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
906 #define L2_S_PROT_U L2_S_PROT_U_armv6n
907 #define L2_S_PROT_W L2_S_PROT_W_armv6n
908 #define L2_S_PROT_RO L2_S_PROT_RO_armv6n
909 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
911 #define L2_L_PROT_U L2_L_PROT_U_armv6n
912 #define L2_L_PROT_W L2_L_PROT_W_armv6n
913 #define L2_L_PROT_RO L2_L_PROT_RO_armv6n
914 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
916 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6n
917 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6n
918 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
920 /* These prototypes make writeable mappings, while the other MMU types
921 * make read-only mappings. */
922 #define L1_SS_PROTO L1_SS_PROTO_armv6
923 #define L1_S_PROTO L1_S_PROTO_armv6
924 #define L1_C_PROTO L1_C_PROTO_armv6
925 #define L2_S_PROTO L2_S_PROTO_armv6n
927 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
928 #define pmap_zero_page(d) pmap_zero_page_generic((d))
929 #elif ARM_MMU_V6C != 0
930 #define L1_S_PROT_U L1_S_PROT_U_generic
931 #define L1_S_PROT_W L1_S_PROT_W_generic
932 #define L1_S_PROT_RO L1_S_PROT_RO_generic
933 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
935 #define L2_S_PROT_U L2_S_PROT_U_generic
936 #define L2_S_PROT_W L2_S_PROT_W_generic
937 #define L2_S_PROT_RO L2_S_PROT_RO_generic
938 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
940 #define L2_L_PROT_U L2_L_PROT_U_generic
941 #define L2_L_PROT_W L2_L_PROT_W_generic
942 #define L2_L_PROT_RO L2_L_PROT_RO_generic
943 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
945 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
946 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
947 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
949 #define L1_SS_PROTO L1_SS_PROTO_armv6
950 #define L1_S_PROTO L1_S_PROTO_generic
951 #define L1_C_PROTO L1_C_PROTO_generic
952 #define L2_S_PROTO L2_S_PROTO_generic
954 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
955 #define pmap_zero_page(d) pmap_zero_page_generic((d))
956 #elif ARM_MMU_XSCALE == 1
957 #define L1_S_PROT_U L1_S_PROT_U_generic
958 #define L1_S_PROT_W L1_S_PROT_W_generic
959 #define L1_S_PROT_RO L1_S_PROT_RO_generic
960 #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
962 #define L2_S_PROT_U L2_S_PROT_U_xscale
963 #define L2_S_PROT_W L2_S_PROT_W_xscale
964 #define L2_S_PROT_RO L2_S_PROT_RO_xscale
965 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
967 #define L2_L_PROT_U L2_L_PROT_U_generic
968 #define L2_L_PROT_W L2_L_PROT_W_generic
969 #define L2_L_PROT_RO L2_L_PROT_RO_generic
970 #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
972 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
973 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
974 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
976 #define L1_SS_PROTO L1_SS_PROTO_xscale
977 #define L1_S_PROTO L1_S_PROTO_xscale
978 #define L1_C_PROTO L1_C_PROTO_xscale
979 #define L2_S_PROTO L2_S_PROTO_xscale
981 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
982 #define pmap_zero_page(d) pmap_zero_page_xscale((d))
983 #elif ARM_MMU_V7 == 1
984 #define L1_S_PROT_U L1_S_PROT_U_armv7
985 #define L1_S_PROT_W L1_S_PROT_W_armv7
986 #define L1_S_PROT_RO L1_S_PROT_RO_armv7
987 #define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
989 #define L2_S_PROT_U L2_S_PROT_U_armv7
990 #define L2_S_PROT_W L2_S_PROT_W_armv7
991 #define L2_S_PROT_RO L2_S_PROT_RO_armv7
992 #define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
994 #define L2_L_PROT_U L2_L_PROT_U_armv7
995 #define L2_L_PROT_W L2_L_PROT_W_armv7
996 #define L2_L_PROT_RO L2_L_PROT_RO_armv7
997 #define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
999 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
1000 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
1001 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
1003 /* These prototypes make writeable mappings, while the other MMU types
1004 * make read-only mappings. */
1005 #define L1_SS_PROTO L1_SS_PROTO_armv7
1006 #define L1_S_PROTO L1_S_PROTO_armv7
1007 #define L1_C_PROTO L1_C_PROTO_armv7
1008 #define L2_S_PROTO L2_S_PROTO_armv7
1010 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
1011 #define pmap_zero_page(d) pmap_zero_page_generic((d))
1012 #endif /* ARM_NMMUS > 1 */
1015 * Macros to set and query the write permission on page descriptors.
1017 #define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
1018 #define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1019 #define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
1020 #define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1022 #define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1023 (L2_S_PROT_RO == 0 || \
1024 ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
1027 * These macros return various bits based on kernel/user and protection.
1028 * Note that the compiler will usually fold these at compile time.
1030 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
1031 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
1033 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
1034 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
1036 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
1037 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
1040 * Macros to test if a mapping is mappable with an L1 SuperSection,
1041 * L1 Section, or an L2 Large Page mapping.
1043 #define L1_SS_MAPPABLE_P(va, pa, size) \
1044 ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
1046 #define L1_S_MAPPABLE_P(va, pa, size) \
1047 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
1049 #define L2_L_MAPPABLE_P(va, pa, size) \
1050 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1054 * Hooks for the pool allocator.
1056 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
1057 extern paddr_t physical_start
, physical_end
;
1058 #ifdef PMAP_NEED_ALLOC_POOLPAGE
1059 struct vm_page
*arm_pmap_alloc_poolpage(int);
1060 #define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage
1062 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1063 vaddr_t
pmap_map_poolpage(paddr_t
);
1064 paddr_t
pmap_unmap_poolpage(vaddr_t
);
1065 #define PMAP_MAP_POOLPAGE(pa) pmap_map_poolpage(pa)
1066 #define PMAP_UNMAP_POOLPAGE(va) pmap_unmap_poolpage(va)
1070 * pmap-specific data store in the vm_page structure.
1072 #define __HAVE_VM_PAGE_MD
1074 SLIST_HEAD(,pv_entry
) pvh_list
; /* pv_entry list */
1075 int pvh_attrs
; /* page attributes */
1079 u_short s_mappings
[2]; /* Assume kernel count <= 65535 */
1082 #define kro_mappings k_u.s_mappings[0]
1083 #define krw_mappings k_u.s_mappings[1]
1084 #define k_mappings k_u.i_mappings
1088 * Set the default color of each page.
1091 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1092 (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
1094 #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1095 (pg)->mdpage.pvh_attrs = 0
1098 #define VM_MDPAGE_INIT(pg) \
1100 SLIST_INIT(&(pg)->mdpage.pvh_list); \
1101 VM_MDPAGE_PVH_ATTRS_INIT(pg); \
1102 (pg)->mdpage.uro_mappings = 0; \
1103 (pg)->mdpage.urw_mappings = 0; \
1104 (pg)->mdpage.k_mappings = 0; \
1105 } while (/*CONSTCOND*/0)
1107 #endif /* !_LOCORE */
1109 #endif /* _KERNEL */
1111 #endif /* _ARM32_PMAP_H_ */