2 * SPDX-License-Identifier: BSD-3-Clause AND BSD-4-Clause
4 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
7 * Adapted for Freescale's e500 core CPUs.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
23 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
25 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33 * Copyright (C) 1995, 1996 TooLs GmbH.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement:
46 * This product includes software developed by TooLs GmbH.
47 * 4. The name of TooLs GmbH may not be used to endorse or promote products
48 * derived from this software without specific prior written permission.
50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
64 #ifndef _MACHINE_PMAP_H_
65 #define _MACHINE_PMAP_H_
67 #include <sys/queue.h>
69 #include <sys/_cpuset.h>
70 #include <sys/_lock.h>
71 #include <sys/_mutex.h>
72 #include <machine/sr.h>
73 #include <machine/pte.h>
74 #include <machine/slb.h>
75 #include <machine/tlb.h>
76 #include <machine/vmparam.h>
78 #include <vm/_vm_radix.h>
82 * The radix page table structure is described by levels 1-4.
83 * See Fig 33. on p. 1002 of Power ISA v3.0B
85 * Page directories and tables must be size aligned.
88 /* Root page directory - 64k -- each entry covers 512GB */
89 typedef uint64_t pml1_entry_t
;
90 /* l2 page directory - 4k -- each entry covers 1GB */
91 typedef uint64_t pml2_entry_t
;
92 /* l3 page directory - 4k -- each entry covers 2MB */
93 typedef uint64_t pml3_entry_t
;
94 /* l4 page directory - 256B/4k -- each entry covers 64k/4k */
95 typedef uint64_t pml4_entry_t
;
97 typedef uint64_t pt_entry_t
;
100 typedef struct pmap
*pmap_t
;
102 #define PMAP_ENTER_QUICK_LOCKED 0x10000000
106 #endif /* !defined(NPMAPS) */
111 LIST_ENTRY(pvo_entry
) pvo_vlink
; /* Link to common virt page */
112 #ifndef __powerpc64__
113 LIST_ENTRY(pvo_entry
) pvo_olink
; /* Link to overflow entry */
116 RB_ENTRY(pvo_entry
) pvo_plink
; /* Link to pmap entries */
117 SLIST_ENTRY(pvo_entry
) pvo_dlink
; /* Link to delete enty */
120 #ifndef __powerpc64__
129 pmap_t pvo_pmap
; /* Owning pmap */
130 vm_offset_t pvo_vaddr
; /* VA of entry */
131 uint64_t pvo_vpn
; /* Virtual page number */
133 LIST_HEAD(pvo_head
, pvo_entry
);
134 SLIST_HEAD(pvo_dlist
, pvo_entry
);
135 RB_HEAD(pvo_tree
, pvo_entry
);
136 int pvo_vaddr_compare(struct pvo_entry
*, struct pvo_entry
*);
137 RB_PROTOTYPE(pvo_tree
, pvo_entry
, pvo_plink
, pvo_vaddr_compare
);
139 /* Used by 32-bit PMAP */
140 #define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */
141 #define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */
142 /* Used by 64-bit PMAP */
143 #define PVO_HID 0x008UL /* PVO entry in alternate hash*/
145 #define PVO_WIRED 0x010UL /* PVO entry is wired */
146 #define PVO_MANAGED 0x020UL /* PVO entry is managed */
147 #define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during
149 #define PVO_DEAD 0x100UL /* waiting to be deleted */
150 #define PVO_LARGE 0x200UL /* large page */
151 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF)
152 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
153 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
154 #define PVO_PTEGIDX_CLR(pvo) \
155 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
156 #define PVO_PTEGIDX_SET(pvo, i) \
157 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
158 #define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16)
161 struct pmap_statistics pm_stats
;
167 struct slbtnode
*pm_slb_tree_root
;
171 register_t pm_sr
[16];
174 struct pmap
*pmap_phys
;
175 struct pvo_tree pmap_pvo
;
180 pml1_entry_t
*pm_pml1
; /* KVA of root page directory */
181 struct vm_radix pm_radix
; /* spare page table pages */
182 TAILQ_HEAD(,pv_chunk
) pm_pvchunk
; /* list of mappings in pmap */
183 uint64_t pm_pid
; /* PIDR value */
188 /* TID to identify this pmap entries in TLB */
189 tlbtid_t pm_tid
[MAXCPU
];
193 * Page table directory,
194 * array of pointers to page directories.
199 * Page table directory,
200 * array of pointers to page tables.
204 /* List of allocated ptbl bufs (ptbl kva regions). */
205 TAILQ_HEAD(, ptbl_buf
) pm_ptbl_list
;
208 } __aligned(CACHE_LINE_SIZE
);
212 * pv_entries are allocated in chunks per-process. This avoids the
213 * need to track per-pmap assignments.
216 #define _NPCM howmany(_NPCPV, 64)
218 #define PV_CHUNK_HEADER \
220 TAILQ_ENTRY(pv_chunk) pc_list; \
221 uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \
222 TAILQ_ENTRY(pv_chunk) pc_lru;
227 TAILQ_ENTRY(pv_entry
) pv_link
;
229 typedef struct pv_entry
*pv_entry_t
;
231 struct pv_chunk_header
{
237 struct pv_entry pc_pventry
[_NPCPV
];
243 volatile int32_t mdpg_attrs
;
244 vm_memattr_t mdpg_cache_attrs
;
245 struct pvo_head mdpg_pvoh
;
246 int pv_gen
; /* (p) */
252 TAILQ_HEAD(, pv_entry
) pv_list
; /* (p) */
256 #define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs)
258 #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
262 * Return the VSID corresponding to a given virtual address.
263 * If no VSID is currently defined, it will allocate one, and add
264 * it to a free slot if available.
266 * NB: The PMAP MUST be locked already.
268 uint64_t va_to_vsid(pmap_t pm
, vm_offset_t va
);
270 /* Lock-free, non-allocating lookup routines */
271 uint64_t kernel_va_to_slbv(vm_offset_t va
);
272 struct slb
*user_va_to_slb_entry(pmap_t pm
, vm_offset_t va
);
274 uint64_t allocate_user_vsid(pmap_t pm
, uint64_t esid
, int large
);
275 void free_vsid(pmap_t pm
, uint64_t esid
, int large
);
276 void slb_insert_user(pmap_t pm
, struct slb
*slb
);
277 void slb_insert_kernel(uint64_t slbe
, uint64_t slbv
);
279 struct slbtnode
*slb_alloc_tree(void);
280 void slb_free_tree(pmap_t pm
);
281 struct slb
**slb_alloc_user_cache(void);
282 void slb_free_user_cache(struct slb
**);
284 extern struct pmap kernel_pmap_store
;
285 #define kernel_pmap (&kernel_pmap_store)
289 #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
290 #define PMAP_LOCK_ASSERT(pmap, type) \
291 mtx_assert(&(pmap)->pm_mtx, (type))
292 #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
293 #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, \
294 (pmap == kernel_pmap) ? "kernelpmap" : \
295 "pmap", NULL, MTX_DEF | MTX_DUPOK)
296 #define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx)
297 #define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
298 #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
299 #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
301 #define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0)
303 #define pmap_vm_page_alloc_check(m)
305 void pmap_bootstrap(vm_offset_t
, vm_offset_t
);
306 void pmap_kenter(vm_offset_t va
, vm_paddr_t pa
);
307 void pmap_kenter_attr(vm_offset_t va
, vm_paddr_t pa
, vm_memattr_t
);
308 void pmap_kremove(vm_offset_t
);
309 void *pmap_mapdev(vm_paddr_t
, vm_size_t
);
310 void *pmap_mapdev_attr(vm_paddr_t
, vm_size_t
, vm_memattr_t
);
311 void pmap_unmapdev(void *, vm_size_t
);
312 void pmap_page_set_memattr(vm_page_t
, vm_memattr_t
);
313 int pmap_change_attr(vm_offset_t
, vm_size_t
, vm_memattr_t
);
314 int pmap_map_user_ptr(pmap_t pm
, volatile const void *uaddr
,
315 void **kaddr
, size_t ulen
, size_t *klen
);
316 int pmap_decode_kernel_ptr(vm_offset_t addr
, int *is_user
,
317 vm_offset_t
*decoded_addr
);
318 void pmap_deactivate(struct thread
*);
319 vm_paddr_t
pmap_kextract(vm_offset_t
);
320 int pmap_dev_direct_mapped(vm_paddr_t
, vm_size_t
);
321 bool pmap_mmu_install(char *name
, int prio
);
322 void pmap_mmu_init(void);
323 const char *pmap_mmu_name(void);
324 bool pmap_ps_enabled(pmap_t pmap
);
325 int pmap_nofault(pmap_t pmap
, vm_offset_t va
, vm_prot_t flags
);
326 bool pmap_page_is_mapped(vm_page_t m
);
327 #define pmap_map_delete(pmap, sva, eva) pmap_remove(pmap, sva, eva)
329 void pmap_page_array_startup(long count
);
331 #define vtophys(va) pmap_kextract((vm_offset_t)(va))
333 extern vm_offset_t virtual_avail
;
334 extern vm_offset_t virtual_end
;
335 extern caddr_t crashdumpmap
;
337 extern vm_offset_t msgbuf_phys
;
339 extern int pmap_bootstrapped
;
340 extern int radix_mmu
;
341 extern int superpages_enabled
;
344 void pmap_early_io_map_init(void);
346 vm_offset_t
pmap_early_io_map(vm_paddr_t pa
, vm_size_t size
);
347 void pmap_early_io_unmap(vm_offset_t va
, vm_size_t size
);
348 void pmap_track_page(pmap_t pmap
, vm_offset_t va
);
349 void pmap_page_print_mappings(vm_page_t m
);
350 void pmap_tlbie_all(void);
353 pmap_vmspace_copy(pmap_t dst_pmap __unused
, pmap_t src_pmap __unused
)
361 #endif /* !_MACHINE_PMAP_H_ */