Sync usage with man page.
[netbsd-mini2440.git] / sys / arch / i386 / i386 / kvm86.c
blob7b39bdadd259ab11141b4f3a66a85d9e1eea161e
1 /* $NetBSD: kvm86.c,v 1.20 2009/11/07 07:27:44 cegger Exp $ */
3 /*
4 * Copyright (c) 2002
5 * Matthias Drochner. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: kvm86.c,v 1.20 2009/11/07 07:27:44 cegger Exp $");
32 #include "opt_multiprocessor.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/cpu.h>
41 #include <uvm/uvm.h>
43 #include <machine/tss.h>
44 #include <machine/gdt.h>
45 #include <machine/pte.h>
46 #include <machine/pmap.h>
47 #include <machine/kvm86.h>
49 /* assembler functions in kvm86call.s */
50 extern int kvm86_call(struct trapframe *);
51 extern void kvm86_ret(struct trapframe *, int);
53 struct kvm86_data {
54 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
55 pt_entry_t pgtbl[PGTABLE_SIZE]; /* must be aliged */
57 struct segment_descriptor sd;
59 struct i386tss tss;
60 u_long iomap[0x10000/32]; /* full size io permission map */
63 static void kvm86_map(struct kvm86_data *, paddr_t, uint32_t);
64 static void kvm86_mapbios(struct kvm86_data *);
67 * global VM for BIOS calls
69 struct kvm86_data *bioscallvmd;
70 /* page for trampoline and stack */
71 void *bioscallscratchpage;
72 /* where this page is mapped in the vm86 */
73 #define BIOSCALLSCRATCHPAGE_VMVA 0x1000
74 /* a virtual page to map in vm86 memory temporarily */
75 vaddr_t bioscalltmpva;
77 int kvm86_tss_sel;
79 kmutex_t kvm86_mp_lock;
81 #define KVM86_IOPL3 /* not strictly necessary, saves a lot of traps */
83 void
84 kvm86_init(void)
86 size_t vmdsize;
87 char *buf;
88 struct kvm86_data *vmd;
89 struct i386tss *tss;
90 int i;
91 int slot;
93 vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE;
95 buf = malloc(vmdsize, M_DEVBUF, M_NOWAIT | M_ZERO);
96 if ((u_long)buf & (PAGE_SIZE - 1)) {
97 printf("struct kvm86_data unaligned\n");
98 return;
100 /* first page is stack */
101 vmd = (struct kvm86_data *)(buf + PAGE_SIZE);
102 tss = &vmd->tss;
105 * we want to access all IO ports, so we need a full-size
106 * permission bitmap
108 memcpy(tss, &curcpu()->ci_tss, sizeof(*tss));
109 tss->tss_esp0 = (int)vmd;
110 tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
111 for (i = 0; i < sizeof(vmd->iomap) / 4; i++)
112 vmd->iomap[i] = 0;
113 tss->tss_iobase = ((char *)vmd->iomap - (char *)tss) << 16;
115 /* setup TSS descriptor (including our iomap) */
116 mutex_enter(&cpu_lock);
117 slot = gdt_get_slot();
118 kvm86_tss_sel = GSEL(slot, SEL_KPL);
119 setgdt(slot, tss, sizeof(*tss) + sizeof(vmd->iomap) - 1,
120 SDT_SYS386TSS, SEL_KPL, 0, 0);
121 mutex_exit(&cpu_lock);
123 /* prepare VM for BIOS calls */
124 kvm86_mapbios(vmd);
125 bioscallscratchpage = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
126 kvm86_map(vmd, vtophys((vaddr_t)bioscallscratchpage),
127 BIOSCALLSCRATCHPAGE_VMVA);
128 bioscallvmd = vmd;
129 bioscalltmpva = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY);
130 mutex_init(&kvm86_mp_lock, MUTEX_DEFAULT, IPL_NONE);
134 * XXX pass some stuff to the assembler code
135 * XXX this should be done cleanly (in call argument to kvm86_call())
137 static void kvm86_prepare(struct kvm86_data *);
138 static void
139 kvm86_prepare(struct kvm86_data *vmd)
141 extern paddr_t vm86newptd;
142 extern struct trapframe *vm86frame;
143 extern pt_entry_t *vm86pgtableva;
145 vm86newptd = vtophys((vaddr_t)vmd) | PG_V | PG_RW | PG_U | PG_u;
146 vm86pgtableva = vmd->pgtbl;
147 vm86frame = (struct trapframe *)vmd - 1;
150 static void
151 kvm86_map(struct kvm86_data *vmd, paddr_t pa, uint32_t vmva)
154 vmd->pgtbl[vmva >> 12] = pa | PG_V | PG_RW | PG_U | PG_u;
157 static void
158 kvm86_mapbios(struct kvm86_data *vmd)
160 paddr_t pa;
162 /* map first physical page (vector table, BIOS data) */
163 kvm86_map(vmd, 0, 0);
165 /* map ISA hole */
166 for (pa = 0xa0000; pa < 0x100000; pa += PAGE_SIZE)
167 kvm86_map(vmd, pa, pa);
170 void *
171 kvm86_bios_addpage(uint32_t vmva)
173 void *mem;
175 if (bioscallvmd->pgtbl[vmva >> 12]) /* allocated? */
176 return (0);
178 mem = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
179 if ((u_long)mem & (PAGE_SIZE - 1)) {
180 printf("kvm86_bios_addpage: unaligned");
181 return (0);
183 kvm86_map(bioscallvmd, vtophys((vaddr_t)mem), vmva);
185 return (mem);
188 void
189 kvm86_bios_delpage(uint32_t vmva, void *kva)
192 bioscallvmd->pgtbl[vmva >> 12] = 0;
193 free(kva, M_DEVBUF);
196 size_t
197 kvm86_bios_read(uint32_t vmva, char *buf, size_t len)
199 size_t todo, now;
200 paddr_t vmpa;
202 todo = len;
203 while (todo > 0) {
204 now = min(todo, PAGE_SIZE - (vmva & (PAGE_SIZE - 1)));
206 if (!bioscallvmd->pgtbl[vmva >> 12])
207 break;
208 vmpa = bioscallvmd->pgtbl[vmva >> 12] & ~(PAGE_SIZE - 1);
209 pmap_kenter_pa(bioscalltmpva, vmpa, VM_PROT_READ, 0);
210 pmap_update(pmap_kernel());
212 memcpy(buf, (void *)(bioscalltmpva + (vmva & (PAGE_SIZE - 1))),
213 now);
214 buf += now;
215 todo -= now;
216 vmva += now;
218 return (len - todo);
222 kvm86_bioscall(int intno, struct trapframe *tf)
224 static const unsigned char call[] = {
225 0xfa, /* CLI */
226 0xcd, /* INTxx */
228 0xfb, /* STI */
229 0xf4 /* HLT */
231 int ret;
233 mutex_enter(&kvm86_mp_lock);
234 memcpy(bioscallscratchpage, call, sizeof(call));
235 *((unsigned char *)bioscallscratchpage + 2) = intno;
237 tf->tf_eip = BIOSCALLSCRATCHPAGE_VMVA;
238 tf->tf_cs = 0;
239 tf->tf_esp = BIOSCALLSCRATCHPAGE_VMVA + PAGE_SIZE - 2;
240 tf->tf_ss = 0;
241 tf->tf_eflags = PSL_USERSET | PSL_VM;
242 #ifdef KVM86_IOPL3
243 tf->tf_eflags |= PSL_IOPL;
244 #endif
245 tf->tf_ds = tf->tf_es = tf->tf_fs = tf->tf_gs = 0;
247 kvm86_prepare(bioscallvmd); /* XXX */
248 kpreempt_disable();
249 ret = kvm86_call(tf);
250 kpreempt_enable();
251 mutex_exit(&kvm86_mp_lock);
252 return ret;
256 kvm86_bioscall_simple(int intno, struct bioscallregs *r)
258 struct trapframe tf;
259 int res;
261 memset(&tf, 0, sizeof(struct trapframe));
262 tf.tf_eax = r->EAX;
263 tf.tf_ebx = r->EBX;
264 tf.tf_ecx = r->ECX;
265 tf.tf_edx = r->EDX;
266 tf.tf_esi = r->ESI;
267 tf.tf_edi = r->EDI;
268 tf.tf_vm86_es = r->ES;
270 res = kvm86_bioscall(intno, &tf);
272 r->EAX = tf.tf_eax;
273 r->EBX = tf.tf_ebx;
274 r->ECX = tf.tf_ecx;
275 r->EDX = tf.tf_edx;
276 r->ESI = tf.tf_esi;
277 r->EDI = tf.tf_edi;
278 r->ES = tf.tf_vm86_es;
279 r->EFLAGS = tf.tf_eflags;
281 return (res);
284 void
285 kvm86_gpfault(struct trapframe *tf)
287 unsigned char *kva, insn, trapno;
288 uint16_t *sp;
290 kva = (unsigned char *)((tf->tf_cs << 4) + tf->tf_eip);
291 insn = *kva;
292 #ifdef KVM86DEBUG
293 printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
294 tf->tf_cs, tf->tf_eip, insn, tf->tf_eflags);
295 #endif
297 KASSERT(tf->tf_eflags & PSL_VM);
299 switch (insn) {
300 case 0xf4: /* HLT - normal exit */
301 kvm86_ret(tf, 0);
302 break;
303 case 0xcd: /* INTxx */
304 /* fake a return stack frame and call real mode handler */
305 trapno = *(kva + 1);
306 sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
307 *(--sp) = tf->tf_eflags;
308 *(--sp) = tf->tf_cs;
309 *(--sp) = tf->tf_eip + 2;
310 tf->tf_esp -= 6;
311 tf->tf_cs = *(uint16_t *)(trapno * 4 + 2);
312 tf->tf_eip = *(uint16_t *)(trapno * 4);
313 break;
314 case 0xcf: /* IRET */
315 sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
316 tf->tf_eip = *(sp++);
317 tf->tf_cs = *(sp++);
318 tf->tf_eflags = *(sp++);
319 tf->tf_esp += 6;
320 tf->tf_eflags |= PSL_VM; /* outside of 16bit flag reg */
321 break;
322 #ifndef KVM86_IOPL3 /* XXX check VME? */
323 case 0xfa: /* CLI */
324 case 0xfb: /* STI */
325 /* XXX ignore for now */
326 tf->tf_eip++;
327 break;
328 case 0x9c: /* PUSHF */
329 sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
330 *(--sp) = tf->tf_eflags;
331 tf->tf_esp -= 2;
332 tf->tf_eip++;
333 break;
334 case 0x9d: /* POPF */
335 sp = (uint16_t *)((tf->tf_ss << 4) + tf->tf_esp);
336 tf->tf_eflags = *(sp++);
337 tf->tf_esp += 2;
338 tf->tf_eip++;
339 tf->tf_eflags |= PSL_VM; /* outside of 16bit flag reg */
340 break;
341 #endif
342 default:
343 #ifdef KVM86DEBUG
344 printf("kvm86_gpfault: unhandled\n");
345 #else
346 printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
347 tf->tf_cs, tf->tf_eip, insn, tf->tf_eflags);
348 #endif
350 * signal error to caller
352 kvm86_ret(tf, -1);
353 break;