Expand PMF_FN_* macros.
[netbsd-mini2440.git] / lib / libkvm / kvm_hp300.c
blob2154a23f30074ff85a6bc8d613bf37ae80a86ecd
1 /*-
2 * Copyright (c) 1989, 1992, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software developed by the Computer Systems
6 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
7 * BG 91-66 and contributed to Berkeley.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
38 #if defined(LIBC_SCCS) && !defined(lint)
39 static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
40 #endif /* LIBC_SCCS and not lint */
43 * Hp300 machine dependent routines for kvm. Hopefully, the forthcoming
44 * vm code will one day obsolete this module.
47 #include <sys/param.h>
48 #include <sys/user.h>
49 #include <sys/proc.h>
50 #include <sys/stat.h>
51 #include <unistd.h>
52 #include <nlist.h>
53 #include <kvm.h>
55 #include <vm/vm.h>
56 #include <vm/vm_param.h>
58 #include <limits.h>
59 #include <db.h>
61 #include "kvm_private.h"
63 #if defined(hp300)
64 #include <hp300/hp300/pte.h>
65 #endif
67 #if defined(luna68k)
68 #include <luna68k/luna68k/pte.h>
69 #endif
71 #ifndef btop
72 #define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
73 #define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
74 #endif
76 struct vmstate {
77 u_long lowram;
78 int mmutype;
79 struct ste *Sysseg;
82 #define KREAD(kd, addr, p)\
83 (kvm_read(kd, addr, (char *)(p), sizeof(*(p))) != sizeof(*(p)))
85 void
86 _kvm_freevtop(kd)
87 kvm_t *kd;
89 if (kd->vmst != 0)
90 free(kd->vmst);
93 int
94 _kvm_initvtop(kd)
95 kvm_t *kd;
97 struct vmstate *vm;
98 struct nlist nlist[4];
100 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
101 if (vm == 0)
102 return (-1);
103 kd->vmst = vm;
105 nlist[0].n_name = "_lowram";
106 nlist[1].n_name = "_mmutype";
107 nlist[2].n_name = "_Sysseg";
108 nlist[3].n_name = 0;
110 if (kvm_nlist(kd, nlist) != 0) {
111 _kvm_err(kd, kd->program, "bad namelist");
112 return (-1);
114 vm->Sysseg = 0;
115 if (KREAD(kd, (u_long)nlist[0].n_value, &vm->lowram)) {
116 _kvm_err(kd, kd->program, "cannot read lowram");
117 return (-1);
119 if (KREAD(kd, (u_long)nlist[1].n_value, &vm->mmutype)) {
120 _kvm_err(kd, kd->program, "cannot read mmutype");
121 return (-1);
123 if (KREAD(kd, (u_long)nlist[2].n_value, &vm->Sysseg)) {
124 _kvm_err(kd, kd->program, "cannot read segment table");
125 return (-1);
127 return (0);
130 static int
131 _kvm_vatop(kd, sta, va, pa)
132 kvm_t *kd;
133 struct ste *sta;
134 u_long va;
135 u_long *pa;
137 register struct vmstate *vm;
138 register u_long lowram;
139 register u_long addr;
140 int p, ste, pte;
141 int offset;
143 if (ISALIVE(kd)) {
144 _kvm_err(kd, 0, "vatop called in live kernel!");
145 return((off_t)0);
147 vm = kd->vmst;
148 offset = va & PGOFSET;
150 * If we are initializing (kernel segment table pointer not yet set)
151 * then return pa == va to avoid infinite recursion.
153 if (vm->Sysseg == 0) {
154 *pa = va;
155 return (NBPG - offset);
157 lowram = vm->lowram;
158 if (vm->mmutype == -2) {
159 struct ste *sta2;
161 addr = (u_long)&sta[va >> SG4_SHIFT1];
163 * Can't use KREAD to read kernel segment table entries.
164 * Fortunately it is 1-to-1 mapped so we don't have to.
166 if (sta == vm->Sysseg) {
167 if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
168 read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
169 goto invalid;
170 } else if (KREAD(kd, addr, &ste))
171 goto invalid;
172 if ((ste & SG_V) == 0) {
173 _kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
174 ste);
175 return((off_t)0);
177 sta2 = (struct ste *)(ste & SG4_ADDR1);
178 addr = (u_long)&sta2[(va & SG4_MASK2) >> SG4_SHIFT2];
180 * Address from level 1 STE is a physical address,
181 * so don't use kvm_read.
183 if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
184 read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
185 goto invalid;
186 if ((ste & SG_V) == 0) {
187 _kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
188 ste);
189 return((off_t)0);
191 sta2 = (struct ste *)(ste & SG4_ADDR2);
192 addr = (u_long)&sta2[(va & SG4_MASK3) >> SG4_SHIFT3];
193 } else {
194 addr = (u_long)&sta[va >> SEGSHIFT];
196 * Can't use KREAD to read kernel segment table entries.
197 * Fortunately it is 1-to-1 mapped so we don't have to.
199 if (sta == vm->Sysseg) {
200 if (lseek(kd->pmfd, (off_t)addr, 0) == -1 ||
201 read(kd->pmfd, (char *)&ste, sizeof(ste)) < 0)
202 goto invalid;
203 } else if (KREAD(kd, addr, &ste))
204 goto invalid;
205 if ((ste & SG_V) == 0) {
206 _kvm_err(kd, 0, "invalid segment (%x)", ste);
207 return((off_t)0);
209 p = btop(va & SG_PMASK);
210 addr = (ste & SG_FRAME) + (p * sizeof(struct pte));
213 * Address from STE is a physical address so don't use kvm_read.
215 if (lseek(kd->pmfd, (off_t)(addr - lowram), 0) == -1 ||
216 read(kd->pmfd, (char *)&pte, sizeof(pte)) < 0)
217 goto invalid;
218 addr = pte & PG_FRAME;
219 if (pte == PG_NV) {
220 _kvm_err(kd, 0, "page not valid");
221 return (0);
223 *pa = addr - lowram + offset;
225 return (NBPG - offset);
226 invalid:
227 _kvm_err(kd, 0, "invalid address (%x)", va);
228 return (0);
232 _kvm_kvatop(kd, va, pa)
233 kvm_t *kd;
234 u_long va;
235 u_long *pa;
237 return (_kvm_vatop(kd, (u_long)kd->vmst->Sysseg, va, pa));
241 * Translate a user virtual address to a physical address.
244 _kvm_uvatop(kd, p, va, pa)
245 kvm_t *kd;
246 const struct proc *p;
247 u_long va;
248 u_long *pa;
250 register struct vmspace *vms = p->p_vmspace;
251 int kva;
254 * If this is a live kernel we just look it up in the kernel
255 * virtually allocated flat 4mb page table (i.e. let the kernel
256 * do the table walk). In this way, we avoid needing to know
257 * the MMU type.
259 if (ISALIVE(kd)) {
260 struct pte *ptab;
261 int pte, offset;
263 kva = (int)&vms->vm_pmap.pm_ptab;
264 if (KREAD(kd, kva, &ptab)) {
265 _kvm_err(kd, 0, "invalid address (%x)", va);
266 return (0);
268 kva = (int)&ptab[btop(va)];
269 if (KREAD(kd, kva, &pte) || (pte & PG_V) == 0) {
270 _kvm_err(kd, 0, "invalid address (%x)", va);
271 return (0);
273 offset = va & PGOFSET;
274 *pa = (pte & PG_FRAME) | offset;
275 return (NBPG - offset);
278 * Otherwise, we just walk the table ourself.
280 kva = (int)&vms->vm_pmap.pm_stab;
281 if (KREAD(kd, kva, &kva)) {
282 _kvm_err(kd, 0, "invalid address (%x)", va);
283 return (0);
285 return (_kvm_vatop(kd, kva, va, pa));