1 /* $NetBSD: kvm86.c,v 1.20 2009/11/07 07:27:44 cegger Exp $ */
5 * Matthias Drochner. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: kvm86.c,v 1.20 2009/11/07 07:27:44 cegger Exp $");
32 #include "opt_multiprocessor.h"
34 #include <sys/param.h>
35 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
43 #include <machine/tss.h>
44 #include <machine/gdt.h>
45 #include <machine/pte.h>
46 #include <machine/pmap.h>
47 #include <machine/kvm86.h>
49 /* assembler functions in kvm86call.s */
50 extern int kvm86_call(struct trapframe
*);
51 extern void kvm86_ret(struct trapframe
*, int);
54 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
55 pt_entry_t pgtbl
[PGTABLE_SIZE
]; /* must be aliged */
57 struct segment_descriptor sd
;
60 u_long iomap
[0x10000/32]; /* full size io permission map */
63 static void kvm86_map(struct kvm86_data
*, paddr_t
, uint32_t);
64 static void kvm86_mapbios(struct kvm86_data
*);
67 * global VM for BIOS calls
69 struct kvm86_data
*bioscallvmd
;
70 /* page for trampoline and stack */
71 void *bioscallscratchpage
;
72 /* where this page is mapped in the vm86 */
73 #define BIOSCALLSCRATCHPAGE_VMVA 0x1000
74 /* a virtual page to map in vm86 memory temporarily */
75 vaddr_t bioscalltmpva
;
79 kmutex_t kvm86_mp_lock
;
81 #define KVM86_IOPL3 /* not strictly necessary, saves a lot of traps */
88 struct kvm86_data
*vmd
;
93 vmdsize
= round_page(sizeof(struct kvm86_data
)) + PAGE_SIZE
;
95 buf
= malloc(vmdsize
, M_DEVBUF
, M_NOWAIT
| M_ZERO
);
96 if ((u_long
)buf
& (PAGE_SIZE
- 1)) {
97 printf("struct kvm86_data unaligned\n");
100 /* first page is stack */
101 vmd
= (struct kvm86_data
*)(buf
+ PAGE_SIZE
);
105 * we want to access all IO ports, so we need a full-size
108 memcpy(tss
, &curcpu()->ci_tss
, sizeof(*tss
));
109 tss
->tss_esp0
= (int)vmd
;
110 tss
->tss_ss0
= GSEL(GDATA_SEL
, SEL_KPL
);
111 for (i
= 0; i
< sizeof(vmd
->iomap
) / 4; i
++)
113 tss
->tss_iobase
= ((char *)vmd
->iomap
- (char *)tss
) << 16;
115 /* setup TSS descriptor (including our iomap) */
116 mutex_enter(&cpu_lock
);
117 slot
= gdt_get_slot();
118 kvm86_tss_sel
= GSEL(slot
, SEL_KPL
);
119 setgdt(slot
, tss
, sizeof(*tss
) + sizeof(vmd
->iomap
) - 1,
120 SDT_SYS386TSS
, SEL_KPL
, 0, 0);
121 mutex_exit(&cpu_lock
);
123 /* prepare VM for BIOS calls */
125 bioscallscratchpage
= malloc(PAGE_SIZE
, M_DEVBUF
, M_NOWAIT
);
126 kvm86_map(vmd
, vtophys((vaddr_t
)bioscallscratchpage
),
127 BIOSCALLSCRATCHPAGE_VMVA
);
129 bioscalltmpva
= uvm_km_alloc(kernel_map
, PAGE_SIZE
, 0, UVM_KMF_VAONLY
);
130 mutex_init(&kvm86_mp_lock
, MUTEX_DEFAULT
, IPL_NONE
);
134 * XXX pass some stuff to the assembler code
135 * XXX this should be done cleanly (in call argument to kvm86_call())
137 static void kvm86_prepare(struct kvm86_data
*);
139 kvm86_prepare(struct kvm86_data
*vmd
)
141 extern paddr_t vm86newptd
;
142 extern struct trapframe
*vm86frame
;
143 extern pt_entry_t
*vm86pgtableva
;
145 vm86newptd
= vtophys((vaddr_t
)vmd
) | PG_V
| PG_RW
| PG_U
| PG_u
;
146 vm86pgtableva
= vmd
->pgtbl
;
147 vm86frame
= (struct trapframe
*)vmd
- 1;
151 kvm86_map(struct kvm86_data
*vmd
, paddr_t pa
, uint32_t vmva
)
154 vmd
->pgtbl
[vmva
>> 12] = pa
| PG_V
| PG_RW
| PG_U
| PG_u
;
158 kvm86_mapbios(struct kvm86_data
*vmd
)
162 /* map first physical page (vector table, BIOS data) */
163 kvm86_map(vmd
, 0, 0);
166 for (pa
= 0xa0000; pa
< 0x100000; pa
+= PAGE_SIZE
)
167 kvm86_map(vmd
, pa
, pa
);
171 kvm86_bios_addpage(uint32_t vmva
)
175 if (bioscallvmd
->pgtbl
[vmva
>> 12]) /* allocated? */
178 mem
= malloc(PAGE_SIZE
, M_DEVBUF
, M_NOWAIT
);
179 if ((u_long
)mem
& (PAGE_SIZE
- 1)) {
180 printf("kvm86_bios_addpage: unaligned");
183 kvm86_map(bioscallvmd
, vtophys((vaddr_t
)mem
), vmva
);
189 kvm86_bios_delpage(uint32_t vmva
, void *kva
)
192 bioscallvmd
->pgtbl
[vmva
>> 12] = 0;
197 kvm86_bios_read(uint32_t vmva
, char *buf
, size_t len
)
204 now
= min(todo
, PAGE_SIZE
- (vmva
& (PAGE_SIZE
- 1)));
206 if (!bioscallvmd
->pgtbl
[vmva
>> 12])
208 vmpa
= bioscallvmd
->pgtbl
[vmva
>> 12] & ~(PAGE_SIZE
- 1);
209 pmap_kenter_pa(bioscalltmpva
, vmpa
, VM_PROT_READ
, 0);
210 pmap_update(pmap_kernel());
212 memcpy(buf
, (void *)(bioscalltmpva
+ (vmva
& (PAGE_SIZE
- 1))),
222 kvm86_bioscall(int intno
, struct trapframe
*tf
)
224 static const unsigned char call
[] = {
233 mutex_enter(&kvm86_mp_lock
);
234 memcpy(bioscallscratchpage
, call
, sizeof(call
));
235 *((unsigned char *)bioscallscratchpage
+ 2) = intno
;
237 tf
->tf_eip
= BIOSCALLSCRATCHPAGE_VMVA
;
239 tf
->tf_esp
= BIOSCALLSCRATCHPAGE_VMVA
+ PAGE_SIZE
- 2;
241 tf
->tf_eflags
= PSL_USERSET
| PSL_VM
;
243 tf
->tf_eflags
|= PSL_IOPL
;
245 tf
->tf_ds
= tf
->tf_es
= tf
->tf_fs
= tf
->tf_gs
= 0;
247 kvm86_prepare(bioscallvmd
); /* XXX */
249 ret
= kvm86_call(tf
);
251 mutex_exit(&kvm86_mp_lock
);
256 kvm86_bioscall_simple(int intno
, struct bioscallregs
*r
)
261 memset(&tf
, 0, sizeof(struct trapframe
));
268 tf
.tf_vm86_es
= r
->ES
;
270 res
= kvm86_bioscall(intno
, &tf
);
278 r
->ES
= tf
.tf_vm86_es
;
279 r
->EFLAGS
= tf
.tf_eflags
;
285 kvm86_gpfault(struct trapframe
*tf
)
287 unsigned char *kva
, insn
, trapno
;
290 kva
= (unsigned char *)((tf
->tf_cs
<< 4) + tf
->tf_eip
);
293 printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
294 tf
->tf_cs
, tf
->tf_eip
, insn
, tf
->tf_eflags
);
297 KASSERT(tf
->tf_eflags
& PSL_VM
);
300 case 0xf4: /* HLT - normal exit */
303 case 0xcd: /* INTxx */
304 /* fake a return stack frame and call real mode handler */
306 sp
= (uint16_t *)((tf
->tf_ss
<< 4) + tf
->tf_esp
);
307 *(--sp
) = tf
->tf_eflags
;
309 *(--sp
) = tf
->tf_eip
+ 2;
311 tf
->tf_cs
= *(uint16_t *)(trapno
* 4 + 2);
312 tf
->tf_eip
= *(uint16_t *)(trapno
* 4);
314 case 0xcf: /* IRET */
315 sp
= (uint16_t *)((tf
->tf_ss
<< 4) + tf
->tf_esp
);
316 tf
->tf_eip
= *(sp
++);
318 tf
->tf_eflags
= *(sp
++);
320 tf
->tf_eflags
|= PSL_VM
; /* outside of 16bit flag reg */
322 #ifndef KVM86_IOPL3 /* XXX check VME? */
325 /* XXX ignore for now */
328 case 0x9c: /* PUSHF */
329 sp
= (uint16_t *)((tf
->tf_ss
<< 4) + tf
->tf_esp
);
330 *(--sp
) = tf
->tf_eflags
;
334 case 0x9d: /* POPF */
335 sp
= (uint16_t *)((tf
->tf_ss
<< 4) + tf
->tf_esp
);
336 tf
->tf_eflags
= *(sp
++);
339 tf
->tf_eflags
|= PSL_VM
; /* outside of 16bit flag reg */
344 printf("kvm86_gpfault: unhandled\n");
346 printf("kvm86_gpfault: cs=%x, eip=%x, insn=%x, eflags=%x\n",
347 tf
->tf_cs
, tf
->tf_eip
, insn
, tf
->tf_eflags
);
350 * signal error to caller