Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / vax / include / pmap.h
blob524bdaa5f061a6dfe3f3dba319bcd47235c3bc2e
1 /* $NetBSD: pmap.h,v 1.76 2008/12/09 20:45:46 pooka Exp $ */
3 /*
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
7 * Changed for the VAX port. /IC
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
37 * @(#)pmap.h 7.6 (Berkeley) 5/10/91
40 /*
41 * Copyright (c) 1987 Carnegie-Mellon University
43 * Changed for the VAX port. /IC
45 * This code is derived from software contributed to Berkeley by
46 * the Systems Programming Group of the University of Utah Computer
47 * Science Department.
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
77 * @(#)pmap.h 7.6 (Berkeley) 5/10/91
81 #ifndef PMAP_H
82 #define PMAP_H
84 #include <sys/simplelock.h>
86 #include <machine/pte.h>
87 #include <machine/mtpr.h>
88 #include <machine/pcb.h>
91 * Some constants to make life easier.
93 #define LTOHPS (PGSHIFT - VAX_PGSHIFT)
94 #define LTOHPN (1 << LTOHPS)
97 * Pmap structure
98 * pm_stack holds lowest allocated memory for the process stack.
101 struct pmap {
102 struct pte *pm_p1ap; /* Base of alloced p1 pte space */
103 int pm_count; /* reference count */
104 struct pcb *pm_pcbs; /* PCBs using this pmap */
105 struct pte *pm_p0br; /* page 0 base register */
106 long pm_p0lr; /* page 0 length register */
107 struct pte *pm_p1br; /* page 1 base register */
108 long pm_p1lr; /* page 1 length register */
109 struct simplelock pm_lock; /* Lock entry in MP environment */
110 struct pmap_statistics pm_stats; /* Some statistics */
114 * For each struct vm_page, there is a list of all currently valid virtual
115 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
118 struct pv_entry {
119 struct pv_entry *pv_next; /* next pv_entry */
120 vaddr_t pv_vaddr; /* address for this physical page */
121 struct pmap *pv_pmap; /* pmap this entry belongs to */
122 int pv_attr; /* write/modified bits */
125 extern struct pv_entry *pv_table;
127 /* Mapping macros used when allocating SPT */
128 #define MAPVIRT(ptr, count) \
129 ptr = virtual_avail; \
130 virtual_avail += (count) * VAX_NBPG;
132 #define MAPPHYS(ptr, count, perm) \
133 ptr = avail_start + KERNBASE; \
134 avail_start += (count) * VAX_NBPG;
138 * Real nice (fast) routines to get the virtual address of a physical page
139 * (and vice versa).
141 #define PMAP_VTOPHYS(va) ((va) & ~KERNBASE)
142 #define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE)
143 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
145 #define PMAP_STEAL_MEMORY
148 * This is the by far most used pmap routine. Make it inline.
150 __inline static bool
151 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
153 int *pte, sva;
155 if (va & KERNBASE) {
156 paddr_t pa;
158 pa = kvtophys(va); /* Is 0 if not mapped */
159 if (pap)
160 *pap = pa;
161 if (pa)
162 return (true);
163 return (false);
166 sva = PG_PFNUM(va);
167 if (va < 0x40000000) {
168 if (sva >= (pmap->pm_p0lr & ~AST_MASK))
169 goto fail;
170 pte = (int *)pmap->pm_p0br;
171 } else {
172 if (sva < pmap->pm_p1lr)
173 goto fail;
174 pte = (int *)pmap->pm_p1br;
177 * Since the PTE tables are sparsely allocated, make sure the page
178 * table page actually exists before deferencing the pte itself.
180 if (kvtopte(&pte[sva])->pg_v && (pte[sva] & PG_FRAME)) {
181 if (pap)
182 *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
183 return (true);
185 fail:
186 if (pap)
187 *pap = 0;
188 return (false);
191 bool pmap_clear_modify_long(struct pv_entry *);
192 bool pmap_clear_reference_long(struct pv_entry *);
193 bool pmap_is_modified_long(struct pv_entry *);
194 void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
195 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
197 __inline static bool
198 pmap_is_referenced(struct vm_page *pg)
200 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
201 bool rv = (pv->pv_attr & PG_V) != 0;
203 return rv;
206 __inline static bool
207 pmap_clear_reference(struct vm_page *pg)
209 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
210 bool rv = (pv->pv_attr & PG_V) != 0;
212 pv->pv_attr &= ~PG_V;
213 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
214 rv |= pmap_clear_reference_long(pv);
215 return rv;
218 __inline static bool
219 pmap_clear_modify(struct vm_page *pg)
221 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
222 bool rv = (pv->pv_attr & PG_M) != 0;
224 pv->pv_attr &= ~PG_M;
225 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
226 rv |= pmap_clear_modify_long(pv);
227 return rv;
230 __inline static bool
231 pmap_is_modified(struct vm_page *pg)
233 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
234 if (pv->pv_attr & PG_M)
235 return 1;
236 else
237 return pmap_is_modified_long(pv);
240 __inline static void
241 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
243 struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
245 if (pv->pv_pmap != NULL || pv->pv_next != NULL)
246 pmap_page_protect_long(pv, prot);
249 __inline static void
250 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
252 if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
253 (start & KERNBASE) != 0)
254 pmap_protect_long(pmap, start, end, prot);
257 static __inline void
258 pmap_remove_all(struct pmap *pmap)
260 /* Nothing. */
263 /* Routines that are best to define as macros */
264 #define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT)
265 #define pmap_copy(a,b,c,d,e) /* Dont do anything */
266 #define pmap_update(pmap) /* nothing (yet) */
267 #define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0)
268 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
269 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
270 #define pmap_reference(pmap) (pmap)->pm_count++
272 /* These can be done as efficient inline macros */
273 #define pmap_copy_page(src, dst) \
274 __asm("addl3 $0x80000000,%0,%%r0;" \
275 "addl3 $0x80000000,%1,%%r1;" \
276 "movc3 $4096,(%%r0),(%%r1)" \
277 :: "r"(src), "r"(dst) \
278 : "r0","r1","r2","r3","r4","r5");
280 #define pmap_zero_page(phys) \
281 __asm("addl3 $0x80000000,%0,%%r0;" \
282 "movc5 $0,(%%r0),$0,$4096,(%%r0)" \
283 :: "r"(phys) \
284 : "r0","r1","r2","r3","r4","r5");
286 /* Prototypes */
287 void pmap_bootstrap(void);
288 vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int);
290 #endif /* PMAP_H */