2 * Copyright (c) 2018-2021 Maxime Villard, m00nbsd.net
5 * This code is part of the NVMM hypervisor.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/types.h>
39 #include <machine/segments.h>
40 #include <machine/psl.h>
46 #include <machine/pte.h>
47 #define PAGE_SIZE 4096
51 #include <machine/pmap.h>
52 #define PTE_P X86_PG_V /* 0x001: P (Valid) */
53 #define PTE_W X86_PG_RW /* 0x002: R/W (Read/Write) */
54 #define PSL_MBO PSL_RESERVED_DEFAULT /* 0x00000002 */
55 #define SDT_SYS386BSY SDT_SYSBSY /* 11: system 64-bit TSS busy */
57 #endif /* __NetBSD__ */
59 static uint8_t mmiobuf
[PAGE_SIZE
];
60 static uint8_t *instbuf
;
62 /* -------------------------------------------------------------------------- */
65 mem_callback(struct nvmm_mem
*mem
)
69 if (mem
->gpa
< 0x1000 || mem
->gpa
+ mem
->size
> 0x1000 + PAGE_SIZE
) {
70 printf("Out of page\n");
74 off
= mem
->gpa
- 0x1000;
76 printf("-> gpa = %p\n", (void *)mem
->gpa
);
79 memcpy(mmiobuf
+ off
, mem
->data
, mem
->size
);
81 memcpy(mem
->data
, mmiobuf
+ off
, mem
->size
);
86 handle_memory(struct nvmm_machine
*mach
, struct nvmm_vcpu
*vcpu
)
90 ret
= nvmm_assist_mem(mach
, vcpu
);
92 err(errno
, "nvmm_assist_mem");
99 run_machine(struct nvmm_machine
*mach
, struct nvmm_vcpu
*vcpu
)
101 struct nvmm_vcpu_exit
*exit
= vcpu
->exit
;
104 if (nvmm_vcpu_run(mach
, vcpu
) == -1)
105 err(errno
, "nvmm_vcpu_run");
107 switch (exit
->reason
) {
108 case NVMM_VCPU_EXIT_NONE
:
111 case NVMM_VCPU_EXIT_RDMSR
:
115 case NVMM_VCPU_EXIT_MEMORY
:
116 handle_memory(mach
, vcpu
);
119 case NVMM_VCPU_EXIT_SHUTDOWN
:
120 printf("Shutting down!\n");
124 printf("Invalid VMEXIT: 0x%lx\n", exit
->reason
);
130 static struct nvmm_assist_callbacks callbacks
= {
135 /* -------------------------------------------------------------------------- */
146 run_test(struct nvmm_machine
*mach
, struct nvmm_vcpu
*vcpu
,
147 const struct test
*test
)
152 size
= (size_t)test
->code_end
- (size_t)test
->code_begin
;
154 memset(mmiobuf
, 0, PAGE_SIZE
);
155 memcpy(instbuf
, test
->code_begin
, size
);
157 run_machine(mach
, vcpu
);
159 res
= (uint64_t *)(mmiobuf
+ test
->off
);
160 if (*res
== test
->wanted
) {
161 printf("Test '%s' passed\n", test
->name
);
163 printf("Test '%s' failed, wanted 0x%lx, got 0x%lx\n", test
->name
,
165 errx(-1, "run_test failed");
169 /* -------------------------------------------------------------------------- */
171 extern uint8_t test1_begin
, test1_end
;
172 extern uint8_t test2_begin
, test2_end
;
173 extern uint8_t test3_begin
, test3_end
;
174 extern uint8_t test4_begin
, test4_end
;
175 extern uint8_t test5_begin
, test5_end
;
176 extern uint8_t test6_begin
, test6_end
;
177 extern uint8_t test7_begin
, test7_end
;
178 extern uint8_t test8_begin
, test8_end
;
179 extern uint8_t test9_begin
, test9_end
;
180 extern uint8_t test10_begin
, test10_end
;
181 extern uint8_t test11_begin
, test11_end
;
182 extern uint8_t test12_begin
, test12_end
;
183 extern uint8_t test13_begin
, test13_end
;
184 extern uint8_t test14_begin
, test14_end
;
185 extern uint8_t test_64bit_15_begin
, test_64bit_15_end
;
186 extern uint8_t test_64bit_16_begin
, test_64bit_16_end
;
188 static const struct test tests64
[] = {
189 { "64bit test1 - MOV", &test1_begin
, &test1_end
, 0x3004, 0 },
190 { "64bit test2 - OR", &test2_begin
, &test2_end
, 0x16FF, 0 },
191 { "64bit test3 - AND", &test3_begin
, &test3_end
, 0x1FC0, 0 },
192 { "64bit test4 - XOR", &test4_begin
, &test4_end
, 0x10CF, 0 },
193 { "64bit test5 - Address Sizes", &test5_begin
, &test5_end
, 0x1F00, 0 },
194 { "64bit test6 - DMO", &test6_begin
, &test6_end
, 0xFFAB, 0 },
195 { "64bit test7 - STOS", &test7_begin
, &test7_end
, 0x00123456, 0 },
196 { "64bit test8 - LODS", &test8_begin
, &test8_end
, 0x12345678, 0 },
197 { "64bit test9 - MOVS", &test9_begin
, &test9_end
, 0x12345678, 0 },
198 { "64bit test10 - MOVZXB", &test10_begin
, &test10_end
, 0x00000078, 0 },
199 { "64bit test11 - MOVZXW", &test11_begin
, &test11_end
, 0x00005678, 0 },
200 { "64bit test12 - CMP", &test12_begin
, &test12_end
, 0x00000001, 0 },
201 { "64bit test13 - SUB", &test13_begin
, &test13_end
, 0x0000000F0000A0FF, 0 },
202 { "64bit test14 - TEST", &test14_begin
, &test14_end
, 0x00000001, 0 },
203 { "64bit test15 - XCHG", &test_64bit_15_begin
, &test_64bit_15_end
, 0x123456, 0 },
204 { "64bit test16 - XCHG", &test_64bit_16_begin
, &test_64bit_16_end
,
206 { NULL
, NULL
, NULL
, -1, 0 }
210 init_seg(struct nvmm_x64_state_seg
*seg
, int type
, int sel
)
213 seg
->attrib
.type
= type
;
214 seg
->attrib
.s
= (type
& 0b10000) != 0;
221 seg
->limit
= 0x0000FFFF;
222 seg
->base
= 0x00000000;
226 reset_machine64(struct nvmm_machine
*mach
, struct nvmm_vcpu
*vcpu
)
228 struct nvmm_x64_state
*state
= vcpu
->state
;
230 if (nvmm_vcpu_getstate(mach
, vcpu
, NVMM_X64_STATE_ALL
) == -1)
231 err(errno
, "nvmm_vcpu_getstate");
233 memset(state
, 0, sizeof(*state
));
236 state
->gprs
[NVMM_X64_GPR_RFLAGS
] = PSL_MBO
;
237 init_seg(&state
->segs
[NVMM_X64_SEG_CS
], SDT_MEMERA
, GSEL(GCODE_SEL
, SEL_KPL
));
238 init_seg(&state
->segs
[NVMM_X64_SEG_SS
], SDT_MEMRWA
, GSEL(GDATA_SEL
, SEL_KPL
));
239 init_seg(&state
->segs
[NVMM_X64_SEG_DS
], SDT_MEMRWA
, GSEL(GDATA_SEL
, SEL_KPL
));
240 init_seg(&state
->segs
[NVMM_X64_SEG_ES
], SDT_MEMRWA
, GSEL(GDATA_SEL
, SEL_KPL
));
241 init_seg(&state
->segs
[NVMM_X64_SEG_FS
], SDT_MEMRWA
, GSEL(GDATA_SEL
, SEL_KPL
));
242 init_seg(&state
->segs
[NVMM_X64_SEG_GS
], SDT_MEMRWA
, GSEL(GDATA_SEL
, SEL_KPL
));
245 init_seg(&state
->segs
[NVMM_X64_SEG_GDT
], 0, 0);
246 init_seg(&state
->segs
[NVMM_X64_SEG_IDT
], 0, 0);
247 init_seg(&state
->segs
[NVMM_X64_SEG_LDT
], SDT_SYSLDT
, 0);
248 init_seg(&state
->segs
[NVMM_X64_SEG_TR
], SDT_SYS386BSY
, 0);
250 /* Protected mode enabled. */
251 state
->crs
[NVMM_X64_CR_CR0
] = CR0_PG
|CR0_PE
|CR0_NE
|CR0_TS
|CR0_MP
|CR0_WP
|CR0_AM
;
253 /* 64bit mode enabled. */
254 state
->crs
[NVMM_X64_CR_CR4
] = CR4_PAE
;
255 state
->msrs
[NVMM_X64_MSR_EFER
] = EFER_LME
| EFER_SCE
| EFER_LMA
;
257 /* Stolen from x86/pmap.c */
258 #define PATENTRY(n, type) (type << ((n) * 8))
259 #define PAT_UC 0x0ULL
260 #define PAT_WC 0x1ULL
261 #define PAT_WT 0x4ULL
262 #define PAT_WP 0x5ULL
263 #define PAT_WB 0x6ULL
264 #define PAT_UCMINUS 0x7ULL
265 state
->msrs
[NVMM_X64_MSR_PAT
] =
266 PATENTRY(0, PAT_WB
) | PATENTRY(1, PAT_WT
) |
267 PATENTRY(2, PAT_UCMINUS
) | PATENTRY(3, PAT_UC
) |
268 PATENTRY(4, PAT_WB
) | PATENTRY(5, PAT_WT
) |
269 PATENTRY(6, PAT_UCMINUS
) | PATENTRY(7, PAT_UC
);
272 state
->crs
[NVMM_X64_CR_CR3
] = 0x3000;
274 state
->gprs
[NVMM_X64_GPR_RIP
] = 0x2000;
276 if (nvmm_vcpu_setstate(mach
, vcpu
, NVMM_X64_STATE_ALL
) == -1)
277 err(errno
, "nvmm_vcpu_setstate");
281 map_pages64(struct nvmm_machine
*mach
)
283 pt_entry_t
*L4
, *L3
, *L2
, *L1
;
286 instbuf
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
288 if (instbuf
== MAP_FAILED
)
291 if (nvmm_hva_map(mach
, (uintptr_t)instbuf
, PAGE_SIZE
) == -1)
292 err(errno
, "nvmm_hva_map");
293 ret
= nvmm_gpa_map(mach
, (uintptr_t)instbuf
, 0x2000, PAGE_SIZE
,
294 PROT_READ
|PROT_EXEC
);
296 err(errno
, "nvmm_gpa_map");
298 L4
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
300 if (L4
== MAP_FAILED
)
302 L3
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
304 if (L3
== MAP_FAILED
)
306 L2
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
308 if (L2
== MAP_FAILED
)
310 L1
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
312 if (L1
== MAP_FAILED
)
315 if (nvmm_hva_map(mach
, (uintptr_t)L4
, PAGE_SIZE
) == -1)
316 err(errno
, "nvmm_hva_map");
317 if (nvmm_hva_map(mach
, (uintptr_t)L3
, PAGE_SIZE
) == -1)
318 err(errno
, "nvmm_hva_map");
319 if (nvmm_hva_map(mach
, (uintptr_t)L2
, PAGE_SIZE
) == -1)
320 err(errno
, "nvmm_hva_map");
321 if (nvmm_hva_map(mach
, (uintptr_t)L1
, PAGE_SIZE
) == -1)
322 err(errno
, "nvmm_hva_map");
324 ret
= nvmm_gpa_map(mach
, (uintptr_t)L4
, 0x3000, PAGE_SIZE
,
325 PROT_READ
|PROT_WRITE
);
327 err(errno
, "nvmm_gpa_map");
328 ret
= nvmm_gpa_map(mach
, (uintptr_t)L3
, 0x4000, PAGE_SIZE
,
329 PROT_READ
|PROT_WRITE
);
331 err(errno
, "nvmm_gpa_map");
332 ret
= nvmm_gpa_map(mach
, (uintptr_t)L2
, 0x5000, PAGE_SIZE
,
333 PROT_READ
|PROT_WRITE
);
335 err(errno
, "nvmm_gpa_map");
336 ret
= nvmm_gpa_map(mach
, (uintptr_t)L1
, 0x6000, PAGE_SIZE
,
337 PROT_READ
|PROT_WRITE
);
339 err(errno
, "nvmm_gpa_map");
341 memset(L4
, 0, PAGE_SIZE
);
342 memset(L3
, 0, PAGE_SIZE
);
343 memset(L2
, 0, PAGE_SIZE
);
344 memset(L1
, 0, PAGE_SIZE
);
346 L4
[0] = PTE_P
| PTE_W
| 0x4000;
347 L3
[0] = PTE_P
| PTE_W
| 0x5000;
348 L2
[0] = PTE_P
| PTE_W
| 0x6000;
349 L1
[0x2000 / PAGE_SIZE
] = PTE_P
| PTE_W
| 0x2000;
350 L1
[0x1000 / PAGE_SIZE
] = PTE_P
| PTE_W
| 0x1000;
354 * 0x1000: MMIO address, unmapped
355 * 0x2000: Instructions, mapped
364 struct nvmm_machine mach
;
365 struct nvmm_vcpu vcpu
;
368 if (nvmm_machine_create(&mach
) == -1)
369 err(errno
, "nvmm_machine_create");
370 if (nvmm_vcpu_create(&mach
, 0, &vcpu
) == -1)
371 err(errno
, "nvmm_vcpu_create");
372 nvmm_vcpu_configure(&mach
, &vcpu
, NVMM_VCPU_CONF_CALLBACKS
, &callbacks
);
375 for (i
= 0; tests64
[i
].name
!= NULL
; i
++) {
376 reset_machine64(&mach
, &vcpu
);
377 run_test(&mach
, &vcpu
, &tests64
[i
]);
380 if (nvmm_vcpu_destroy(&mach
, &vcpu
) == -1)
381 err(errno
, "nvmm_vcpu_destroy");
382 if (nvmm_machine_destroy(&mach
) == -1)
383 err(errno
, "nvmm_machine_destroy");
386 /* -------------------------------------------------------------------------- */
388 extern uint8_t test_16bit_1_begin
, test_16bit_1_end
;
389 extern uint8_t test_16bit_2_begin
, test_16bit_2_end
;
390 extern uint8_t test_16bit_3_begin
, test_16bit_3_end
;
391 extern uint8_t test_16bit_4_begin
, test_16bit_4_end
;
392 extern uint8_t test_16bit_5_begin
, test_16bit_5_end
;
393 extern uint8_t test_16bit_6_begin
, test_16bit_6_end
;
395 static const struct test tests16
[] = {
396 { "16bit test1 - MOV single", &test_16bit_1_begin
, &test_16bit_1_end
,
397 0x023, 0x10f1 - 0x1000 },
398 { "16bit test2 - MOV dual", &test_16bit_2_begin
, &test_16bit_2_end
,
399 0x123, 0x10f3 - 0x1000 },
400 { "16bit test3 - MOV dual+disp", &test_16bit_3_begin
, &test_16bit_3_end
,
401 0x678, 0x10f1 - 0x1000 },
402 { "16bit test4 - Mixed", &test_16bit_4_begin
, &test_16bit_4_end
,
403 0x1011, 0x10f6 - 0x1000 },
404 { "16bit test5 - disp16-only", &test_16bit_5_begin
, &test_16bit_5_end
,
405 0x12, 0x1234 - 0x1000 },
406 { "16bit test6 - XCHG", &test_16bit_6_begin
, &test_16bit_6_end
,
407 0x1234, 0x1234 - 0x1000 },
408 { NULL
, NULL
, NULL
, -1, -1 }
412 reset_machine16(struct nvmm_machine
*mach
, struct nvmm_vcpu
*vcpu
)
414 struct nvmm_x64_state
*state
= vcpu
->state
;
416 if (nvmm_vcpu_getstate(mach
, vcpu
, NVMM_X64_STATE_ALL
) == -1)
417 err(errno
, "nvmm_vcpu_getstate");
419 state
->segs
[NVMM_X64_SEG_CS
].base
= 0;
420 state
->segs
[NVMM_X64_SEG_CS
].limit
= 0x2FFF;
421 state
->gprs
[NVMM_X64_GPR_RIP
] = 0x2000;
423 if (nvmm_vcpu_setstate(mach
, vcpu
, NVMM_X64_STATE_ALL
) == -1)
424 err(errno
, "nvmm_vcpu_setstate");
428 map_pages16(struct nvmm_machine
*mach
)
432 instbuf
= mmap(NULL
, PAGE_SIZE
, PROT_READ
|PROT_WRITE
, MAP_ANON
|MAP_PRIVATE
,
434 if (instbuf
== MAP_FAILED
)
437 if (nvmm_hva_map(mach
, (uintptr_t)instbuf
, PAGE_SIZE
) == -1)
438 err(errno
, "nvmm_hva_map");
439 ret
= nvmm_gpa_map(mach
, (uintptr_t)instbuf
, 0x2000, PAGE_SIZE
,
440 PROT_READ
|PROT_EXEC
);
442 err(errno
, "nvmm_gpa_map");
446 * 0x1000: MMIO address, unmapped
447 * 0x2000: Instructions, mapped
452 struct nvmm_machine mach
;
453 struct nvmm_vcpu vcpu
;
456 if (nvmm_machine_create(&mach
) == -1)
457 err(errno
, "nvmm_machine_create");
458 if (nvmm_vcpu_create(&mach
, 0, &vcpu
) == -1)
459 err(errno
, "nvmm_vcpu_create");
460 nvmm_vcpu_configure(&mach
, &vcpu
, NVMM_VCPU_CONF_CALLBACKS
, &callbacks
);
463 for (i
= 0; tests16
[i
].name
!= NULL
; i
++) {
464 reset_machine16(&mach
, &vcpu
);
465 run_test(&mach
, &vcpu
, &tests16
[i
]);
468 if (nvmm_vcpu_destroy(&mach
, &vcpu
) == -1)
469 err(errno
, "nvmm_vcpu_destroy");
470 if (nvmm_machine_destroy(&mach
) == -1)
471 err(errno
, "nvmm_machine_destroy");
474 /* -------------------------------------------------------------------------- */
476 int main(int argc
, char *argv
[])
478 if (nvmm_init() == -1)
479 err(errno
, "nvmm_init");