4 * Copyright (c) 2005-2008 Fabrice Bellard
5 * Copyright (c) 2011 Stefan Weil
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
26 #include <sys/ioctl.h>
29 #include <sys/ioccom.h>
42 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
43 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
45 # define LOG_INT(...) do { } while (0)
46 # define LOG_INT_STATE(env) do { } while (0)
52 #define KQEMU_DEVICE "\\\\.\\kqemu"
54 #define KQEMU_DEVICE "/dev/kqemu"
57 static void qpi_init(void);
60 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
61 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
62 #define kqemu_closefd(x) CloseHandle(x)
64 #define KQEMU_INVALID_FD -1
65 int kqemu_fd
= KQEMU_INVALID_FD
;
66 #define kqemu_closefd(x) close(x)
73 int kqemu_allowed
= 0;
74 uint64_t *pages_to_flush
;
75 unsigned int nb_pages_to_flush
;
76 uint64_t *ram_pages_to_update
;
77 unsigned int nb_ram_pages_to_update
;
78 uint64_t *modified_ram_pages
;
79 unsigned int nb_modified_ram_pages
;
80 uint8_t *modified_ram_pages_table
;
82 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
83 ram_addr_t kqemu_phys_ram_size
;
84 uint8_t *kqemu_phys_ram_base
;
86 #define cpuid(index, eax, ebx, ecx, edx) \
87 asm volatile ("cpuid" \
88 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
92 static int is_cpuid_supported(void)
97 static int is_cpuid_supported(void)
100 asm volatile ("pushf\n"
103 "xorl $0x00200000, %0\n"
108 : "=a" (v0
), "=d" (v1
)
115 static void kqemu_update_cpuid(CPUState
*env
)
117 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
118 uint32_t eax
, ebx
, ecx
, edx
;
120 /* the following features are kept identical on the host and
121 target cpus because they are important for user code. Strictly
122 speaking, only SSE really matters because the OS must support
123 it if the user code uses it. */
124 critical_features_mask
=
125 CPUID_CMOV
| CPUID_CX8
|
126 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
127 CPUID_SSE2
| CPUID_SEP
;
128 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
129 if (!is_cpuid_supported()) {
133 cpuid(1, eax
, ebx
, ecx
, edx
);
138 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
139 compatibility mode, so in order to have the best performances
140 it is better not to use it */
141 features
&= ~CPUID_SEP
;
143 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
144 (features
& critical_features_mask
);
145 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
146 (ext_features
& ext_features_mask
);
147 /* XXX: we could update more of the target CPUID state so that the
148 non accelerated code sees exactly the same CPU features as the
152 int kqemu_init(CPUState
*env
)
154 struct kqemu_init kinit
;
164 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
165 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
166 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
168 if (kqemu_fd
== KQEMU_INVALID_FD
) {
169 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
170 KQEMU_DEVICE
, GetLastError());
174 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
175 if (kqemu_fd
== KQEMU_INVALID_FD
) {
176 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
177 KQEMU_DEVICE
, strerror(errno
));
183 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
184 &version
, sizeof(version
), &temp
, NULL
);
186 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
188 if (version
!= KQEMU_VERSION
) {
189 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
190 version
, KQEMU_VERSION
);
194 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
199 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
201 if (!ram_pages_to_update
)
204 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
206 if (!modified_ram_pages
)
208 modified_ram_pages_table
=
209 g_malloc0(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
210 if (!modified_ram_pages_table
)
213 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
214 kinit
.ram_base
= kqemu_phys_ram_base
;
215 kinit
.ram_size
= kqemu_phys_ram_size
;
216 kinit
.ram_dirty
= phys_ram_dirty
;
217 kinit
.pages_to_flush
= pages_to_flush
;
218 kinit
.ram_pages_to_update
= ram_pages_to_update
;
219 kinit
.modified_ram_pages
= modified_ram_pages
;
221 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
222 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
224 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
227 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
229 kqemu_closefd(kqemu_fd
);
230 kqemu_fd
= KQEMU_INVALID_FD
;
233 kqemu_update_cpuid(env
);
234 env
->kqemu_enabled
= kqemu_allowed
;
235 nb_pages_to_flush
= 0;
236 nb_ram_pages_to_update
= 0;
242 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
244 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
245 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
246 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
248 pages_to_flush
[nb_pages_to_flush
++] = addr
;
251 void kqemu_flush(CPUState
*env
, int global
)
253 LOG_INT("kqemu_flush:\n");
254 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
257 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
259 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
260 (unsigned long)ram_addr
);
261 /* we only track transitions to dirty state */
262 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
264 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
265 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
267 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
270 static void kqemu_reset_modified_ram_pages(void)
273 unsigned long page_index
;
275 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
276 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
277 modified_ram_pages_table
[page_index
] = 0;
279 nb_modified_ram_pages
= 0;
282 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
284 unsigned long page_index
;
290 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
291 if (!modified_ram_pages_table
[page_index
]) {
293 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
295 modified_ram_pages_table
[page_index
] = 1;
296 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
297 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
300 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
301 &nb_modified_ram_pages
,
302 sizeof(nb_modified_ram_pages
),
303 NULL
, 0, &temp
, NULL
);
305 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
306 &nb_modified_ram_pages
);
308 kqemu_reset_modified_ram_pages();
313 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
314 ram_addr_t phys_offset
)
316 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
320 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
321 start_addr
&= TARGET_PAGE_MASK
;
322 kphys_mem
->phys_addr
= start_addr
;
323 kphys_mem
->size
= end
- start_addr
;
324 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
325 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
328 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
331 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
334 if (qpi_io_memory
== io_index
) {
335 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
337 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
344 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
345 kphys_mem
, sizeof(*kphys_mem
),
346 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
349 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
352 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
354 (unsigned long)size
, (unsigned long)phys_offset
);
370 uint8_t fpregs1
[8 * 10];
386 uint8_t fpregs1
[8 * 16];
387 uint8_t xmm_regs
[16 * 16];
391 static struct fpxstate fpx1
__attribute__((aligned(16)));
393 static void restore_native_fp_frstor(CPUState
*env
)
396 struct fpstate fp1
, *fp
= &fp1
;
398 fp
->fpuc
= env
->fpuc
;
399 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
401 for (i
=7; i
>=0; i
--) {
403 if (env
->fptags
[i
]) {
406 /* the FPU automatically computes it */
411 for(i
= 0;i
< 8; i
++) {
412 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
415 asm volatile ("frstor %0" : "=m" (*fp
));
418 static void save_native_fp_fsave(CPUState
*env
)
422 struct fpstate fp1
, *fp
= &fp1
;
424 asm volatile ("fsave %0" : : "m" (*fp
));
425 env
->fpuc
= fp
->fpuc
;
426 env
->fpstt
= (fp
->fpus
>> 11) & 7;
427 env
->fpus
= fp
->fpus
& ~0x3800;
429 for(i
= 0;i
< 8; i
++) {
430 env
->fptags
[i
] = ((fptag
& 3) == 3);
434 for(i
= 0;i
< 8; i
++) {
435 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
438 /* we must restore the default rounding state */
439 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
440 asm volatile("fldcw %0" : : "m" (fpuc
));
443 static void restore_native_fp_fxrstor(CPUState
*env
)
445 struct fpxstate
*fp
= &fpx1
;
448 fp
->fpuc
= env
->fpuc
;
449 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
451 for(i
= 0; i
< 8; i
++)
452 fptag
|= (env
->fptags
[i
] << i
);
453 fp
->fptag
= fptag
^ 0xff;
456 for(i
= 0;i
< 8; i
++) {
457 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
460 if (env
->cpuid_features
& CPUID_SSE
) {
461 fp
->mxcsr
= env
->mxcsr
;
462 /* XXX: check if DAZ is not available */
463 fp
->mxcsr_mask
= 0xffff;
464 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
466 asm volatile ("fxrstor %0" : "=m" (*fp
));
469 static void save_native_fp_fxsave(CPUState
*env
)
471 struct fpxstate
*fp
= &fpx1
;
475 asm volatile ("fxsave %0" : : "m" (*fp
));
476 env
->fpuc
= fp
->fpuc
;
477 env
->fpstt
= (fp
->fpus
>> 11) & 7;
478 env
->fpus
= fp
->fpus
& ~0x3800;
479 fptag
= fp
->fptag
^ 0xff;
480 for(i
= 0;i
< 8; i
++) {
481 env
->fptags
[i
] = (fptag
>> i
) & 1;
484 for(i
= 0;i
< 8; i
++) {
485 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
488 if (env
->cpuid_features
& CPUID_SSE
) {
489 env
->mxcsr
= fp
->mxcsr
;
490 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
493 /* we must restore the default rounding state */
494 asm volatile ("fninit");
495 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
496 asm volatile("fldcw %0" : : "m" (fpuc
));
499 static int do_syscall(CPUState
*env
,
500 struct kqemu_cpu_state
*kenv
)
504 selector
= (env
->star
>> 32) & 0xffff;
506 if (env
->hflags
& HF_LMA_MASK
) {
509 env
->regs
[R_ECX
] = kenv
->next_eip
;
510 env
->regs
[11] = env
->eflags
;
512 code64
= env
->hflags
& HF_CS64_MASK
;
514 cpu_x86_set_cpl(env
, 0);
515 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
517 DESC_G_MASK
| DESC_P_MASK
|
519 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
520 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
522 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
524 DESC_W_MASK
| DESC_A_MASK
);
525 env
->eflags
&= ~env
->fmask
;
527 env
->eip
= env
->lstar
;
529 env
->eip
= env
->cstar
;
533 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
535 cpu_x86_set_cpl(env
, 0);
536 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
538 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
540 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
541 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
543 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
545 DESC_W_MASK
| DESC_A_MASK
);
546 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
547 env
->eip
= (uint32_t)env
->star
;
552 #ifdef CONFIG_PROFILER
554 #define PC_REC_SIZE 1
555 #define PC_REC_HASH_BITS 16
556 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
558 typedef struct PCRecord
{
561 struct PCRecord
*next
;
564 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
565 static int nb_pc_records
;
567 static void kqemu_record_pc(unsigned long pc
)
572 h
= pc
/ PC_REC_SIZE
;
573 h
= h
^ (h
>> PC_REC_HASH_BITS
);
574 h
&= (PC_REC_HASH_SIZE
- 1);
575 pr
= &pc_rec_hash
[h
];
586 r
= malloc(sizeof(PCRecord
));
594 static int pc_rec_cmp(const void *p1
, const void *p2
)
596 PCRecord
*r1
= *(PCRecord
**)p1
;
597 PCRecord
*r2
= *(PCRecord
**)p2
;
598 if (r1
->count
< r2
->count
)
600 else if (r1
->count
== r2
->count
)
606 static void kqemu_record_flush(void)
608 PCRecord
*r
, *r_next
;
611 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
612 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
616 pc_rec_hash
[h
] = NULL
;
621 void kqemu_record_dump(void)
628 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
631 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
632 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
637 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
639 f
= fopen("/tmp/kqemu.stats", "w");
641 perror("/tmp/kqemu.stats");
644 fprintf(f
, "total: %" PRId64
"\n", total
);
646 for(i
= 0; i
< nb_pc_records
; i
++) {
649 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
652 (double)r
->count
/ (double)total
* 100.0,
653 (double)sum
/ (double)total
* 100.0);
658 kqemu_record_flush();
662 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
663 const SegmentCache
*sc
)
665 ksc
->selector
= sc
->selector
;
666 ksc
->flags
= sc
->flags
;
667 ksc
->limit
= sc
->limit
;
668 ksc
->base
= sc
->base
;
671 static inline void kqemu_save_seg(SegmentCache
*sc
,
672 const struct kqemu_segment_cache
*ksc
)
674 sc
->selector
= ksc
->selector
;
675 sc
->flags
= ksc
->flags
;
676 sc
->limit
= ksc
->limit
;
677 sc
->base
= ksc
->base
;
680 int kqemu_cpu_exec(CPUState
*env
)
682 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
684 #ifdef CONFIG_PROFILER
691 #ifdef CONFIG_PROFILER
692 ti
= profile_getclock();
694 LOG_INT("kqemu: cpu_exec: enter\n");
696 for(i
= 0; i
< CPU_NB_REGS
; i
++)
697 kenv
->regs
[i
] = env
->regs
[i
];
698 kenv
->eip
= env
->eip
;
699 kenv
->eflags
= env
->eflags
;
700 for(i
= 0; i
< 6; i
++)
701 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
702 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
703 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
704 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
705 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
706 kenv
->cr0
= env
->cr
[0];
707 kenv
->cr2
= env
->cr
[2];
708 kenv
->cr3
= env
->cr
[3];
709 kenv
->cr4
= env
->cr
[4];
710 kenv
->a20_mask
= env
->a20_mask
;
711 kenv
->efer
= env
->efer
;
712 kenv
->tsc_offset
= 0;
713 kenv
->star
= env
->star
;
714 kenv
->sysenter_cs
= env
->sysenter_cs
;
715 kenv
->sysenter_esp
= env
->sysenter_esp
;
716 kenv
->sysenter_eip
= env
->sysenter_eip
;
718 kenv
->lstar
= env
->lstar
;
719 kenv
->cstar
= env
->cstar
;
720 kenv
->fmask
= env
->fmask
;
721 kenv
->kernelgsbase
= env
->kernelgsbase
;
723 if (env
->dr
[7] & 0xff) {
724 kenv
->dr7
= env
->dr
[7];
725 kenv
->dr0
= env
->dr
[0];
726 kenv
->dr1
= env
->dr
[1];
727 kenv
->dr2
= env
->dr
[2];
728 kenv
->dr3
= env
->dr
[3];
732 kenv
->dr6
= env
->dr
[6];
733 cpl
= (env
->hflags
& HF_CPL_MASK
);
735 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
736 kenv
->user_only
= (env
->kqemu_enabled
== 1);
737 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
738 nb_ram_pages_to_update
= 0;
739 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
741 kqemu_reset_modified_ram_pages();
743 if (env
->cpuid_features
& CPUID_FXSR
)
744 restore_native_fp_fxrstor(env
);
746 restore_native_fp_frstor(env
);
749 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
750 kenv
, sizeof(struct kqemu_cpu_state
),
751 kenv
, sizeof(struct kqemu_cpu_state
),
758 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
761 if (env
->cpuid_features
& CPUID_FXSR
)
762 save_native_fp_fxsave(env
);
764 save_native_fp_fsave(env
);
766 for(i
= 0; i
< CPU_NB_REGS
; i
++)
767 env
->regs
[i
] = kenv
->regs
[i
];
768 env
->eip
= kenv
->eip
;
769 env
->eflags
= kenv
->eflags
;
770 for(i
= 0; i
< 6; i
++)
771 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
772 cpu_x86_set_cpl(env
, kenv
->cpl
);
773 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
774 env
->cr
[0] = kenv
->cr0
;
775 env
->cr
[4] = kenv
->cr4
;
776 env
->cr
[3] = kenv
->cr3
;
777 env
->cr
[2] = kenv
->cr2
;
778 env
->dr
[6] = kenv
->dr6
;
780 env
->kernelgsbase
= kenv
->kernelgsbase
;
783 /* flush pages as indicated by kqemu */
784 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
787 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
788 tlb_flush_page(env
, pages_to_flush
[i
]);
791 nb_pages_to_flush
= 0;
793 #ifdef CONFIG_PROFILER
794 kqemu_time
+= profile_getclock() - ti
;
798 if (kenv
->nb_ram_pages_to_update
> 0) {
799 cpu_tlb_update_dirty(env
);
802 if (kenv
->nb_modified_ram_pages
> 0) {
803 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
805 addr
= modified_ram_pages
[i
];
806 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
810 /* restore the hidden flags */
812 unsigned int new_hflags
;
814 if ((env
->hflags
& HF_LMA_MASK
) &&
815 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
817 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
821 /* legacy / compatibility case */
822 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
823 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
824 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
825 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
826 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
827 (env
->eflags
& VM_MASK
) ||
828 !(env
->hflags
& HF_CS32_MASK
)) {
829 /* XXX: try to avoid this test. The problem comes from the
830 fact that is real mode or vm86 mode we only modify the
831 'base' and 'selector' fields of the segment cache to go
832 faster. A solution may be to force addseg to one in
834 new_hflags
|= HF_ADDSEG_MASK
;
836 new_hflags
|= ((env
->segs
[R_DS
].base
|
837 env
->segs
[R_ES
].base
|
838 env
->segs
[R_SS
].base
) != 0) <<
842 env
->hflags
= (env
->hflags
&
843 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
846 /* update FPU flags */
847 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
848 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
849 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
850 env
->hflags
|= HF_OSFXSR_MASK
;
852 env
->hflags
&= ~HF_OSFXSR_MASK
;
854 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
855 if (ret
== KQEMU_RET_SYSCALL
) {
856 /* syscall instruction */
857 return do_syscall(env
, kenv
);
859 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
860 env
->exception_index
= ret
& 0xff;
862 env
->exception_is_int
= 1;
863 env
->exception_next_eip
= kenv
->next_eip
;
864 #ifdef CONFIG_PROFILER
865 kqemu_ret_int_count
++;
867 LOG_INT("kqemu: interrupt v=%02x:\n", env
->exception_index
);
870 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
871 env
->exception_index
= ret
& 0xff;
872 env
->error_code
= kenv
->error_code
;
873 env
->exception_is_int
= 0;
874 env
->exception_next_eip
= 0;
875 #ifdef CONFIG_PROFILER
876 kqemu_ret_excp_count
++;
878 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
879 env
->exception_index
, env
->error_code
);
882 } else if (ret
== KQEMU_RET_INTR
) {
883 #ifdef CONFIG_PROFILER
884 kqemu_ret_intr_count
++;
888 } else if (ret
== KQEMU_RET_SOFTMMU
) {
889 #ifdef CONFIG_PROFILER
891 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
898 cpu_dump_state(env
, stderr
, fprintf
, 0);
899 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
905 void kqemu_cpu_interrupt(CPUState
*env
)
908 /* cancelling the I/O request causes KQEMU to finish executing the
909 current block and successfully returning. */
915 QEMU paravirtualization interface. The current interface only
916 allows to modify the IF and IOPL flags when running in
919 At this point it is not very satisfactory. I leave it for reference
920 as it adds little complexity.
923 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
925 static uint32_t qpi_mem_readb(void *opaque
, hwaddr addr
)
930 static uint32_t qpi_mem_readw(void *opaque
, hwaddr addr
)
935 static void qpi_mem_writeb(void *opaque
, hwaddr addr
, uint32_t val
)
939 static void qpi_mem_writew(void *opaque
, hwaddr addr
, uint32_t val
)
943 static uint32_t qpi_mem_readl(void *opaque
, hwaddr addr
)
947 env
= cpu_single_env
;
950 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
953 /* Note: after writing to this address, the guest code must make sure
954 it is exiting the current TB. pushf/popf can be used for that
956 static void qpi_mem_writel(void *opaque
, hwaddr addr
, uint32_t val
)
960 env
= cpu_single_env
;
963 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
964 (val
& (IF_MASK
| IOPL_MASK
));
967 static CPUReadMemoryFunc
* const qpi_mem_read
[3] = {
973 static CPUWriteMemoryFunc
* const qpi_mem_write
[3] = {
979 static void qpi_init(void)
981 kqemu_comm_base
= 0xff000000 | 1;
982 qpi_io_memory
= cpu_register_io_memory(
984 qpi_mem_write
, NULL
);
985 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
986 0x1000, qpi_io_memory
);