4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
23 #include "host-utils.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
39 #define raise_exception_err(a, b)\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
46 static const uint8_t parity_table
[256] = {
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
67 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
75 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
82 static const uint8_t rclw_table
[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
90 static const uint8_t rclb_table
[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk
[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock
);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock
);
122 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
124 load_eflags(t0
, update_mask
);
127 target_ulong
helper_read_eflags(void)
130 eflags
= helper_cc_compute_all(CC_OP
);
131 eflags
|= (DF
& DF_MASK
);
132 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
148 index
= selector
& ~7;
149 if ((index
+ 7) > dt
->limit
)
151 ptr
= dt
->base
+ index
;
152 *e1_ptr
= ldl_kernel(ptr
);
153 *e2_ptr
= ldl_kernel(ptr
+ 4);
157 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
160 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
161 if (e2
& DESC_G_MASK
)
162 limit
= (limit
<< 12) | 0xfff;
166 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
168 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
173 sc
->base
= get_seg_base(e1
, e2
);
174 sc
->limit
= get_seg_limit(e1
, e2
);
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg
, int selector
)
182 cpu_x86_load_seg_cache(env
, seg
, selector
,
183 (selector
<< 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
187 uint32_t *esp_ptr
, int dpl
)
189 int type
, index
, shift
;
194 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
195 for(i
=0;i
<env
->tr
.limit
;i
++) {
196 printf("%02x ", env
->tr
.base
[i
]);
197 if ((i
& 7) == 7) printf("\n");
203 if (!(env
->tr
.flags
& DESC_P_MASK
))
204 cpu_abort(env
, "invalid tss");
205 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
207 cpu_abort(env
, "invalid tss type");
209 index
= (dpl
* 4 + 2) << shift
;
210 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
211 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
213 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
214 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
216 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg
, int selector
)
227 if ((selector
& 0xfffc) != 0) {
228 if (load_segment(&e1
, &e2
, selector
) != 0)
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if (!(e2
& DESC_S_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
234 cpl
= env
->hflags
& HF_CPL_MASK
;
235 if (seg_reg
== R_CS
) {
236 if (!(e2
& DESC_CS_MASK
))
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 /* XXX: is it correct ? */
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 } else if (seg_reg
== R_SS
) {
244 /* SS must be writable data */
245 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 if (dpl
!= cpl
|| dpl
!= rpl
)
248 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 /* not readable code */
251 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
252 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
255 if (dpl
< cpl
|| dpl
< rpl
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
259 if (!(e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
261 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
262 get_seg_base(e1
, e2
),
263 get_seg_limit(e1
, e2
),
266 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
267 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector
,
277 uint32_t e1
, uint32_t e2
, int source
,
280 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
281 target_ulong tss_base
;
282 uint32_t new_regs
[8], new_segs
[6];
283 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
284 uint32_t old_eflags
, eflags_mask
;
289 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
292 /* if task gate, we read the TSS segment and we load it */
294 if (!(e2
& DESC_P_MASK
))
295 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
296 tss_selector
= e1
>> 16;
297 if (tss_selector
& 4)
298 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
299 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
300 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (e2
& DESC_S_MASK
)
302 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
303 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 if (!(e2
& DESC_P_MASK
))
309 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
315 tss_limit
= get_seg_limit(e1
, e2
);
316 tss_base
= get_seg_base(e1
, e2
);
317 if ((tss_selector
& 4) != 0 ||
318 tss_limit
< tss_limit_max
)
319 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
320 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
322 old_tss_limit_max
= 103;
324 old_tss_limit_max
= 43;
326 /* read all the registers from the new TSS */
329 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
330 new_eip
= ldl_kernel(tss_base
+ 0x20);
331 new_eflags
= ldl_kernel(tss_base
+ 0x24);
332 for(i
= 0; i
< 8; i
++)
333 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
334 for(i
= 0; i
< 6; i
++)
335 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
336 new_ldt
= lduw_kernel(tss_base
+ 0x60);
337 new_trap
= ldl_kernel(tss_base
+ 0x64);
341 new_eip
= lduw_kernel(tss_base
+ 0x0e);
342 new_eflags
= lduw_kernel(tss_base
+ 0x10);
343 for(i
= 0; i
< 8; i
++)
344 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
345 for(i
= 0; i
< 4; i
++)
346 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
347 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1
= ldub_kernel(env
->tr
.base
);
359 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
360 stb_kernel(env
->tr
.base
, v1
);
361 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
363 /* clear busy bit (it is restartable) */
364 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
367 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
368 e2
= ldl_kernel(ptr
+ 4);
369 e2
&= ~DESC_TSS_BUSY_MASK
;
370 stl_kernel(ptr
+ 4, e2
);
372 old_eflags
= compute_eflags();
373 if (source
== SWITCH_TSS_IRET
)
374 old_eflags
&= ~NT_MASK
;
376 /* save the current state in the old TSS */
379 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
380 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
388 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
389 for(i
= 0; i
< 6; i
++)
390 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
393 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
394 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
402 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
403 for(i
= 0; i
< 4; i
++)
404 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
407 /* now if an exception occurs, it will occurs in the next task
410 if (source
== SWITCH_TSS_CALL
) {
411 stw_kernel(tss_base
, env
->tr
.selector
);
412 new_eflags
|= NT_MASK
;
416 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
419 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
420 e2
= ldl_kernel(ptr
+ 4);
421 e2
|= DESC_TSS_BUSY_MASK
;
422 stl_kernel(ptr
+ 4, e2
);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env
->cr
[0] |= CR0_TS_MASK
;
428 env
->hflags
|= HF_TS_MASK
;
429 env
->tr
.selector
= tss_selector
;
430 env
->tr
.base
= tss_base
;
431 env
->tr
.limit
= tss_limit
;
432 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
434 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
435 cpu_x86_update_cr3(env
, new_cr3
);
438 /* load all registers without an exception, then reload them with
439 possible exception */
441 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
442 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
444 eflags_mask
&= 0xffff;
445 load_eflags(new_eflags
, eflags_mask
);
446 /* XXX: what to do in 16 bit case ? */
455 if (new_eflags
& VM_MASK
) {
456 for(i
= 0; i
< 6; i
++)
457 load_seg_vm(i
, new_segs
[i
]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env
, 3);
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i
= 0; i
< 6; i
++)
465 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
468 env
->ldt
.selector
= new_ldt
& ~4;
475 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
477 if ((new_ldt
& 0xfffc) != 0) {
479 index
= new_ldt
& ~7;
480 if ((index
+ 7) > dt
->limit
)
481 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
482 ptr
= dt
->base
+ index
;
483 e1
= ldl_kernel(ptr
);
484 e2
= ldl_kernel(ptr
+ 4);
485 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
487 if (!(e2
& DESC_P_MASK
))
488 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
489 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
492 /* load the segments */
493 if (!(new_eflags
& VM_MASK
)) {
494 tss_load_seg(R_CS
, new_segs
[R_CS
]);
495 tss_load_seg(R_SS
, new_segs
[R_SS
]);
496 tss_load_seg(R_ES
, new_segs
[R_ES
]);
497 tss_load_seg(R_DS
, new_segs
[R_DS
]);
498 tss_load_seg(R_FS
, new_segs
[R_FS
]);
499 tss_load_seg(R_GS
, new_segs
[R_GS
]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip
> env
->segs
[R_CS
].limit
) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF
, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env
->dr
[7] & 0x55) {
511 for (i
= 0; i
< 4; i
++) {
512 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
513 hw_breakpoint_remove(env
, i
);
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr
, int size
)
523 int io_offset
, val
, mask
;
525 /* TSS must be a valid 32 bit one */
526 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
527 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
530 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
531 io_offset
+= (addr
>> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset
+ 1) > env
->tr
.limit
)
535 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
537 mask
= (1 << size
) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val
& mask
) != 0) {
541 raise_exception_err(EXCP0D_GPF
, 0);
545 void helper_check_iob(uint32_t t0
)
550 void helper_check_iow(uint32_t t0
)
555 void helper_check_iol(uint32_t t0
)
560 void helper_outb(uint32_t port
, uint32_t data
)
562 cpu_outb(env
, port
, data
& 0xff);
565 target_ulong
helper_inb(uint32_t port
)
567 return cpu_inb(env
, port
);
570 void helper_outw(uint32_t port
, uint32_t data
)
572 cpu_outw(env
, port
, data
& 0xffff);
575 target_ulong
helper_inw(uint32_t port
)
577 return cpu_inw(env
, port
);
580 void helper_outl(uint32_t port
, uint32_t data
)
582 cpu_outl(env
, port
, data
);
585 target_ulong
helper_inl(uint32_t port
)
587 return cpu_inl(env
, port
);
590 static inline unsigned int get_sp_mask(unsigned int e2
)
592 if (e2
& DESC_B_MASK
)
599 #define SET_ESP(val, sp_mask)\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
609 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
612 /* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
616 /* XXX: add a is_user flag to have proper security support */
617 #define PUSHW(ssp, sp, sp_mask, val)\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
623 #define PUSHL(ssp, sp, sp_mask, val)\
626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
629 #define POPW(ssp, sp, sp_mask, val)\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
635 #define POPL(ssp, sp, sp_mask, val)\
637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
641 /* protected mode interrupt */
642 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
643 unsigned int next_eip
, int is_hw
)
646 target_ulong ptr
, ssp
;
647 int type
, dpl
, selector
, ss_dpl
, cpl
;
648 int has_error_code
, new_stack
, shift
;
649 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
650 uint32_t old_eip
, sp_mask
;
653 if (!is_int
&& !is_hw
) {
672 if (intno
* 8 + 7 > dt
->limit
)
673 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
674 ptr
= dt
->base
+ intno
* 8;
675 e1
= ldl_kernel(ptr
);
676 e2
= ldl_kernel(ptr
+ 4);
677 /* check gate type */
678 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2
& DESC_P_MASK
))
683 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
684 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
685 if (has_error_code
) {
688 /* push the error code */
689 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
691 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
695 esp
= (ESP
- (2 << shift
)) & mask
;
696 ssp
= env
->segs
[R_SS
].base
+ esp
;
698 stl_kernel(ssp
, error_code
);
700 stw_kernel(ssp
, error_code
);
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
710 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
713 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
714 cpl
= env
->hflags
& HF_CPL_MASK
;
715 /* check privilege if software int */
716 if (is_int
&& dpl
< cpl
)
717 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
718 /* check valid bit */
719 if (!(e2
& DESC_P_MASK
))
720 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
722 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
723 if ((selector
& 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF
, 0);
726 if (load_segment(&e1
, &e2
, selector
) != 0)
727 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
728 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
729 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
730 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
732 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
733 if (!(e2
& DESC_P_MASK
))
734 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
735 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
738 if ((ss
& 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
741 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
742 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
743 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
746 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 if (!(ss_e2
& DESC_S_MASK
) ||
748 (ss_e2
& DESC_CS_MASK
) ||
749 !(ss_e2
& DESC_W_MASK
))
750 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
751 if (!(ss_e2
& DESC_P_MASK
))
752 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 sp_mask
= get_sp_mask(ss_e2
);
755 ssp
= get_seg_base(ss_e1
, ss_e2
);
756 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
757 /* to same privilege */
758 if (env
->eflags
& VM_MASK
)
759 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
761 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
762 ssp
= env
->segs
[R_SS
].base
;
766 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
767 new_stack
= 0; /* avoid warning */
768 sp_mask
= 0; /* avoid warning */
769 ssp
= 0; /* avoid warning */
770 esp
= 0; /* avoid warning */
776 /* XXX: check that enough room is available */
777 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
778 if (env
->eflags
& VM_MASK
)
784 if (env
->eflags
& VM_MASK
) {
785 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
786 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
787 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, ESP
);
793 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
796 if (has_error_code
) {
797 PUSHL(ssp
, esp
, sp_mask
, error_code
);
801 if (env
->eflags
& VM_MASK
) {
802 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
803 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
804 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, ESP
);
810 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
813 if (has_error_code
) {
814 PUSHW(ssp
, esp
, sp_mask
, error_code
);
819 if (env
->eflags
& VM_MASK
) {
820 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
825 ss
= (ss
& ~3) | dpl
;
826 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
827 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
829 SET_ESP(esp
, sp_mask
);
831 selector
= (selector
& ~3) | dpl
;
832 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
833 get_seg_base(e1
, e2
),
834 get_seg_limit(e1
, e2
),
836 cpu_x86_set_cpl(env
, dpl
);
839 /* interrupt gate clear IF mask */
840 if ((type
& 1) == 0) {
841 env
->eflags
&= ~IF_MASK
;
843 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
848 #define PUSHQ(sp, val)\
851 stq_kernel(sp, (val));\
854 #define POPQ(sp, val)\
856 val = ldq_kernel(sp);\
860 static inline target_ulong
get_rsp_from_tss(int level
)
865 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
866 env
->tr
.base
, env
->tr
.limit
);
869 if (!(env
->tr
.flags
& DESC_P_MASK
))
870 cpu_abort(env
, "invalid tss");
871 index
= 8 * level
+ 4;
872 if ((index
+ 7) > env
->tr
.limit
)
873 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
874 return ldq_kernel(env
->tr
.base
+ index
);
877 /* 64 bit interrupt */
878 static void do_interrupt64(int intno
, int is_int
, int error_code
,
879 target_ulong next_eip
, int is_hw
)
883 int type
, dpl
, selector
, cpl
, ist
;
884 int has_error_code
, new_stack
;
885 uint32_t e1
, e2
, e3
, ss
;
886 target_ulong old_eip
, esp
, offset
;
889 if (!is_int
&& !is_hw
) {
908 if (intno
* 16 + 15 > dt
->limit
)
909 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
910 ptr
= dt
->base
+ intno
* 16;
911 e1
= ldl_kernel(ptr
);
912 e2
= ldl_kernel(ptr
+ 4);
913 e3
= ldl_kernel(ptr
+ 8);
914 /* check gate type */
915 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
921 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
924 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
925 cpl
= env
->hflags
& HF_CPL_MASK
;
926 /* check privilege if software int */
927 if (is_int
&& dpl
< cpl
)
928 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
929 /* check valid bit */
930 if (!(e2
& DESC_P_MASK
))
931 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
933 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
935 if ((selector
& 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF
, 0);
938 if (load_segment(&e1
, &e2
, selector
) != 0)
939 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
940 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
944 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
945 if (!(e2
& DESC_P_MASK
))
946 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
947 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
948 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
949 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
950 /* to inner privilege */
952 esp
= get_rsp_from_tss(ist
+ 3);
954 esp
= get_rsp_from_tss(dpl
);
955 esp
&= ~0xfLL
; /* align stack */
958 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
959 /* to same privilege */
960 if (env
->eflags
& VM_MASK
)
961 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
964 esp
= get_rsp_from_tss(ist
+ 3);
967 esp
&= ~0xfLL
; /* align stack */
970 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
971 new_stack
= 0; /* avoid warning */
972 esp
= 0; /* avoid warning */
975 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
977 PUSHQ(esp
, compute_eflags());
978 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
980 if (has_error_code
) {
981 PUSHQ(esp
, error_code
);
986 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
990 selector
= (selector
& ~3) | dpl
;
991 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
992 get_seg_base(e1
, e2
),
993 get_seg_limit(e1
, e2
),
995 cpu_x86_set_cpl(env
, dpl
);
998 /* interrupt gate clear IF mask */
999 if ((type
& 1) == 0) {
1000 env
->eflags
&= ~IF_MASK
;
1002 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1006 #ifdef TARGET_X86_64
1007 #if defined(CONFIG_USER_ONLY)
1008 void helper_syscall(int next_eip_addend
)
1010 env
->exception_index
= EXCP_SYSCALL
;
1011 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1015 void helper_syscall(int next_eip_addend
)
1019 if (!(env
->efer
& MSR_EFER_SCE
)) {
1020 raise_exception_err(EXCP06_ILLOP
, 0);
1022 selector
= (env
->star
>> 32) & 0xffff;
1023 if (env
->hflags
& HF_LMA_MASK
) {
1026 ECX
= env
->eip
+ next_eip_addend
;
1027 env
->regs
[11] = compute_eflags();
1029 code64
= env
->hflags
& HF_CS64_MASK
;
1031 cpu_x86_set_cpl(env
, 0);
1032 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1034 DESC_G_MASK
| DESC_P_MASK
|
1036 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1037 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1039 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1041 DESC_W_MASK
| DESC_A_MASK
);
1042 env
->eflags
&= ~env
->fmask
;
1043 load_eflags(env
->eflags
, 0);
1045 env
->eip
= env
->lstar
;
1047 env
->eip
= env
->cstar
;
1049 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1051 cpu_x86_set_cpl(env
, 0);
1052 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1054 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1056 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1057 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1059 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1061 DESC_W_MASK
| DESC_A_MASK
);
1062 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1063 env
->eip
= (uint32_t)env
->star
;
1069 #ifdef TARGET_X86_64
1070 void helper_sysret(int dflag
)
1074 if (!(env
->efer
& MSR_EFER_SCE
)) {
1075 raise_exception_err(EXCP06_ILLOP
, 0);
1077 cpl
= env
->hflags
& HF_CPL_MASK
;
1078 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1079 raise_exception_err(EXCP0D_GPF
, 0);
1081 selector
= (env
->star
>> 48) & 0xffff;
1082 if (env
->hflags
& HF_LMA_MASK
) {
1084 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1086 DESC_G_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1092 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1094 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1095 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1096 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1097 env
->eip
= (uint32_t)ECX
;
1099 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1101 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1102 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1103 DESC_W_MASK
| DESC_A_MASK
);
1104 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1105 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1106 cpu_x86_set_cpl(env
, 3);
1108 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1110 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1111 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1112 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1113 env
->eip
= (uint32_t)ECX
;
1114 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1116 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1117 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1118 DESC_W_MASK
| DESC_A_MASK
);
1119 env
->eflags
|= IF_MASK
;
1120 cpu_x86_set_cpl(env
, 3);
1123 if (kqemu_is_ok(env
)) {
1124 if (env
->hflags
& HF_LMA_MASK
)
1125 CC_OP
= CC_OP_EFLAGS
;
1126 env
->exception_index
= -1;
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1135 unsigned int next_eip
)
1138 target_ulong ptr
, ssp
;
1140 uint32_t offset
, esp
;
1141 uint32_t old_cs
, old_eip
;
1143 /* real mode (simpler !) */
1145 if (intno
* 4 + 3 > dt
->limit
)
1146 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1147 ptr
= dt
->base
+ intno
* 4;
1148 offset
= lduw_kernel(ptr
);
1149 selector
= lduw_kernel(ptr
+ 2);
1151 ssp
= env
->segs
[R_SS
].base
;
1156 old_cs
= env
->segs
[R_CS
].selector
;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1159 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1160 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1162 /* update processor state */
1163 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1165 env
->segs
[R_CS
].selector
= selector
;
1166 env
->segs
[R_CS
].base
= (selector
<< 4);
1167 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1170 /* fake user mode interrupt */
1171 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1172 target_ulong next_eip
)
1176 int dpl
, cpl
, shift
;
1180 if (env
->hflags
& HF_LMA_MASK
) {
1185 ptr
= dt
->base
+ (intno
<< shift
);
1186 e2
= ldl_kernel(ptr
+ 4);
1188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1189 cpl
= env
->hflags
& HF_CPL_MASK
;
1190 /* check privilege if software int */
1191 if (is_int
&& dpl
< cpl
)
1192 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1206 void do_interrupt(int intno
, int is_int
, int error_code
,
1207 target_ulong next_eip
, int is_hw
)
1209 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1210 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1213 count
, intno
, error_code
, is_int
,
1214 env
->hflags
& HF_CPL_MASK
,
1215 env
->segs
[R_CS
].selector
, EIP
,
1216 (int)env
->segs
[R_CS
].base
+ EIP
,
1217 env
->segs
[R_SS
].selector
, ESP
);
1218 if (intno
== 0x0e) {
1219 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1221 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1224 log_cpu_state(env
, X86_DUMP_CCOP
);
1230 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1231 for(i
= 0; i
< 16; i
++) {
1232 qemu_log(" %02x", ldub(ptr
+ i
));
1240 if (env
->cr
[0] & CR0_PE_MASK
) {
1241 #ifdef TARGET_X86_64
1242 if (env
->hflags
& HF_LMA_MASK
) {
1243 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1247 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1250 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1254 /* This should come from sysemu.h - if we could include it here... */
1255 void qemu_system_reset_request(void);
1258 * Check nested exceptions and change to double or triple fault if
1259 * needed. It should only be called, if this is not an interrupt.
1260 * Returns the new exception number.
1262 static int check_exception(int intno
, int *error_code
)
1264 int first_contributory
= env
->old_exception
== 0 ||
1265 (env
->old_exception
>= 10 &&
1266 env
->old_exception
<= 13);
1267 int second_contributory
= intno
== 0 ||
1268 (intno
>= 10 && intno
<= 13);
1270 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1271 env
->old_exception
, intno
);
1273 #if !defined(CONFIG_USER_ONLY)
1274 if (env
->old_exception
== EXCP08_DBLE
) {
1275 if (env
->hflags
& HF_SVMI_MASK
)
1276 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1278 if (qemu_loglevel_mask(CPU_LOG_RESET
))
1279 fprintf(logfile
, "Triple fault\n");
1281 qemu_system_reset_request();
1286 if ((first_contributory
&& second_contributory
)
1287 || (env
->old_exception
== EXCP0E_PAGE
&&
1288 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1289 intno
= EXCP08_DBLE
;
1293 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1294 (intno
== EXCP08_DBLE
))
1295 env
->old_exception
= intno
;
1301 * Signal an interruption. It is executed in the main CPU loop.
1302 * is_int is TRUE if coming from the int instruction. next_eip is the
1303 * EIP value AFTER the interrupt instruction. It is only relevant if
1306 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1307 int next_eip_addend
)
1310 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1311 intno
= check_exception(intno
, &error_code
);
1313 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1316 env
->exception_index
= intno
;
1317 env
->error_code
= error_code
;
1318 env
->exception_is_int
= is_int
;
1319 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1323 /* shortcuts to generate exceptions */
1325 void raise_exception_err(int exception_index
, int error_code
)
1327 raise_interrupt(exception_index
, 0, error_code
, 0);
1330 void raise_exception(int exception_index
)
1332 raise_interrupt(exception_index
, 0, 0, 0);
1337 #if defined(CONFIG_USER_ONLY)
1339 void do_smm_enter(void)
1343 void helper_rsm(void)
1349 #ifdef TARGET_X86_64
1350 #define SMM_REVISION_ID 0x00020064
1352 #define SMM_REVISION_ID 0x00020000
1355 void do_smm_enter(void)
1357 target_ulong sm_state
;
1361 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1362 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1364 env
->hflags
|= HF_SMM_MASK
;
1365 cpu_smm_update(env
);
1367 sm_state
= env
->smbase
+ 0x8000;
1369 #ifdef TARGET_X86_64
1370 for(i
= 0; i
< 6; i
++) {
1372 offset
= 0x7e00 + i
* 16;
1373 stw_phys(sm_state
+ offset
, dt
->selector
);
1374 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1375 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1376 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1379 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1380 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1382 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1383 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1384 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1385 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1387 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1388 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1390 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1391 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1392 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1393 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1395 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1397 stq_phys(sm_state
+ 0x7ff8, EAX
);
1398 stq_phys(sm_state
+ 0x7ff0, ECX
);
1399 stq_phys(sm_state
+ 0x7fe8, EDX
);
1400 stq_phys(sm_state
+ 0x7fe0, EBX
);
1401 stq_phys(sm_state
+ 0x7fd8, ESP
);
1402 stq_phys(sm_state
+ 0x7fd0, EBP
);
1403 stq_phys(sm_state
+ 0x7fc8, ESI
);
1404 stq_phys(sm_state
+ 0x7fc0, EDI
);
1405 for(i
= 8; i
< 16; i
++)
1406 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1407 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1408 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1409 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1410 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1412 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1413 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1414 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1416 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1417 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1419 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1420 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1421 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1422 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1423 stl_phys(sm_state
+ 0x7fec, EDI
);
1424 stl_phys(sm_state
+ 0x7fe8, ESI
);
1425 stl_phys(sm_state
+ 0x7fe4, EBP
);
1426 stl_phys(sm_state
+ 0x7fe0, ESP
);
1427 stl_phys(sm_state
+ 0x7fdc, EBX
);
1428 stl_phys(sm_state
+ 0x7fd8, EDX
);
1429 stl_phys(sm_state
+ 0x7fd4, ECX
);
1430 stl_phys(sm_state
+ 0x7fd0, EAX
);
1431 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1432 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1434 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1435 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1436 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1437 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1439 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1440 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1441 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1442 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1444 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1445 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1447 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1448 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1450 for(i
= 0; i
< 6; i
++) {
1453 offset
= 0x7f84 + i
* 12;
1455 offset
= 0x7f2c + (i
- 3) * 12;
1456 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1457 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1458 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1459 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1461 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1463 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1464 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1466 /* init SMM cpu state */
1468 #ifdef TARGET_X86_64
1469 cpu_load_efer(env
, 0);
1471 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1472 env
->eip
= 0x00008000;
1473 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1475 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1477 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1478 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1479 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1481 cpu_x86_update_cr0(env
,
1482 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1483 cpu_x86_update_cr4(env
, 0);
1484 env
->dr
[7] = 0x00000400;
1485 CC_OP
= CC_OP_EFLAGS
;
1488 void helper_rsm(void)
1490 target_ulong sm_state
;
1494 sm_state
= env
->smbase
+ 0x8000;
1495 #ifdef TARGET_X86_64
1496 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1498 for(i
= 0; i
< 6; i
++) {
1499 offset
= 0x7e00 + i
* 16;
1500 cpu_x86_load_seg_cache(env
, i
,
1501 lduw_phys(sm_state
+ offset
),
1502 ldq_phys(sm_state
+ offset
+ 8),
1503 ldl_phys(sm_state
+ offset
+ 4),
1504 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1507 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1508 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1510 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1511 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1512 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1513 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1515 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1516 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1518 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1519 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1520 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1521 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1523 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1524 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1525 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1526 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1527 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1528 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1529 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1530 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1531 for(i
= 8; i
< 16; i
++)
1532 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1533 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1534 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1535 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1536 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1537 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1539 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1540 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1541 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1543 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1544 if (val
& 0x20000) {
1545 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1548 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1549 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1550 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1551 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1552 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1553 EDI
= ldl_phys(sm_state
+ 0x7fec);
1554 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1555 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1556 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1557 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1558 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1559 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1560 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1561 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1562 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1564 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1565 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1566 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1567 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1569 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1570 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1571 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1572 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1574 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1575 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1577 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1578 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1580 for(i
= 0; i
< 6; i
++) {
1582 offset
= 0x7f84 + i
* 12;
1584 offset
= 0x7f2c + (i
- 3) * 12;
1585 cpu_x86_load_seg_cache(env
, i
,
1586 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1587 ldl_phys(sm_state
+ offset
+ 8),
1588 ldl_phys(sm_state
+ offset
+ 4),
1589 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1591 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1593 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1594 if (val
& 0x20000) {
1595 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1598 CC_OP
= CC_OP_EFLAGS
;
1599 env
->hflags
&= ~HF_SMM_MASK
;
1600 cpu_smm_update(env
);
1602 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1603 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1606 #endif /* !CONFIG_USER_ONLY */
1609 /* division, flags are undefined */
1611 void helper_divb_AL(target_ulong t0
)
1613 unsigned int num
, den
, q
, r
;
1615 num
= (EAX
& 0xffff);
1618 raise_exception(EXCP00_DIVZ
);
1622 raise_exception(EXCP00_DIVZ
);
1624 r
= (num
% den
) & 0xff;
1625 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1628 void helper_idivb_AL(target_ulong t0
)
1635 raise_exception(EXCP00_DIVZ
);
1639 raise_exception(EXCP00_DIVZ
);
1641 r
= (num
% den
) & 0xff;
1642 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1645 void helper_divw_AX(target_ulong t0
)
1647 unsigned int num
, den
, q
, r
;
1649 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1650 den
= (t0
& 0xffff);
1652 raise_exception(EXCP00_DIVZ
);
1656 raise_exception(EXCP00_DIVZ
);
1658 r
= (num
% den
) & 0xffff;
1659 EAX
= (EAX
& ~0xffff) | q
;
1660 EDX
= (EDX
& ~0xffff) | r
;
1663 void helper_idivw_AX(target_ulong t0
)
1667 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1670 raise_exception(EXCP00_DIVZ
);
1673 if (q
!= (int16_t)q
)
1674 raise_exception(EXCP00_DIVZ
);
1676 r
= (num
% den
) & 0xffff;
1677 EAX
= (EAX
& ~0xffff) | q
;
1678 EDX
= (EDX
& ~0xffff) | r
;
1681 void helper_divl_EAX(target_ulong t0
)
1683 unsigned int den
, r
;
1686 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1689 raise_exception(EXCP00_DIVZ
);
1694 raise_exception(EXCP00_DIVZ
);
1699 void helper_idivl_EAX(target_ulong t0
)
1704 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1707 raise_exception(EXCP00_DIVZ
);
1711 if (q
!= (int32_t)q
)
1712 raise_exception(EXCP00_DIVZ
);
1719 /* XXX: exception */
1720 void helper_aam(int base
)
1726 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1730 void helper_aad(int base
)
1734 ah
= (EAX
>> 8) & 0xff;
1735 al
= ((ah
* base
) + al
) & 0xff;
1736 EAX
= (EAX
& ~0xffff) | al
;
1740 void helper_aaa(void)
1746 eflags
= helper_cc_compute_all(CC_OP
);
1749 ah
= (EAX
>> 8) & 0xff;
1751 icarry
= (al
> 0xf9);
1752 if (((al
& 0x0f) > 9 ) || af
) {
1753 al
= (al
+ 6) & 0x0f;
1754 ah
= (ah
+ 1 + icarry
) & 0xff;
1755 eflags
|= CC_C
| CC_A
;
1757 eflags
&= ~(CC_C
| CC_A
);
1760 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1764 void helper_aas(void)
1770 eflags
= helper_cc_compute_all(CC_OP
);
1773 ah
= (EAX
>> 8) & 0xff;
1776 if (((al
& 0x0f) > 9 ) || af
) {
1777 al
= (al
- 6) & 0x0f;
1778 ah
= (ah
- 1 - icarry
) & 0xff;
1779 eflags
|= CC_C
| CC_A
;
1781 eflags
&= ~(CC_C
| CC_A
);
1784 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1788 void helper_daa(void)
1793 eflags
= helper_cc_compute_all(CC_OP
);
1799 if (((al
& 0x0f) > 9 ) || af
) {
1800 al
= (al
+ 6) & 0xff;
1803 if ((al
> 0x9f) || cf
) {
1804 al
= (al
+ 0x60) & 0xff;
1807 EAX
= (EAX
& ~0xff) | al
;
1808 /* well, speed is not an issue here, so we compute the flags by hand */
1809 eflags
|= (al
== 0) << 6; /* zf */
1810 eflags
|= parity_table
[al
]; /* pf */
1811 eflags
|= (al
& 0x80); /* sf */
1815 void helper_das(void)
1817 int al
, al1
, af
, cf
;
1820 eflags
= helper_cc_compute_all(CC_OP
);
1827 if (((al
& 0x0f) > 9 ) || af
) {
1831 al
= (al
- 6) & 0xff;
1833 if ((al1
> 0x99) || cf
) {
1834 al
= (al
- 0x60) & 0xff;
1837 EAX
= (EAX
& ~0xff) | al
;
1838 /* well, speed is not an issue here, so we compute the flags by hand */
1839 eflags
|= (al
== 0) << 6; /* zf */
1840 eflags
|= parity_table
[al
]; /* pf */
1841 eflags
|= (al
& 0x80); /* sf */
1845 void helper_into(int next_eip_addend
)
1848 eflags
= helper_cc_compute_all(CC_OP
);
1849 if (eflags
& CC_O
) {
1850 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1854 void helper_cmpxchg8b(target_ulong a0
)
1859 eflags
= helper_cc_compute_all(CC_OP
);
1861 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1862 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1865 /* always do the store */
1867 EDX
= (uint32_t)(d
>> 32);
1874 #ifdef TARGET_X86_64
1875 void helper_cmpxchg16b(target_ulong a0
)
1880 if ((a0
& 0xf) != 0)
1881 raise_exception(EXCP0D_GPF
);
1882 eflags
= helper_cc_compute_all(CC_OP
);
1885 if (d0
== EAX
&& d1
== EDX
) {
1890 /* always do the store */
1901 void helper_single_step(void)
1903 #ifndef CONFIG_USER_ONLY
1904 check_hw_breakpoints(env
, 1);
1905 env
->dr
[6] |= DR6_BS
;
1907 raise_exception(EXCP01_DB
);
1910 void helper_cpuid(void)
1912 uint32_t eax
, ebx
, ecx
, edx
;
1914 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1916 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1923 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1926 uint32_t esp_mask
, esp
, ebp
;
1928 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1929 ssp
= env
->segs
[R_SS
].base
;
1938 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1941 stl(ssp
+ (esp
& esp_mask
), t1
);
1948 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1951 stw(ssp
+ (esp
& esp_mask
), t1
);
1955 #ifdef TARGET_X86_64
1956 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1958 target_ulong esp
, ebp
;
1978 stw(esp
, lduw(ebp
));
1986 void helper_lldt(int selector
)
1990 int index
, entry_limit
;
1994 if ((selector
& 0xfffc) == 0) {
1995 /* XXX: NULL selector case: invalid LDT */
2000 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2002 index
= selector
& ~7;
2003 #ifdef TARGET_X86_64
2004 if (env
->hflags
& HF_LMA_MASK
)
2009 if ((index
+ entry_limit
) > dt
->limit
)
2010 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2011 ptr
= dt
->base
+ index
;
2012 e1
= ldl_kernel(ptr
);
2013 e2
= ldl_kernel(ptr
+ 4);
2014 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2015 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2016 if (!(e2
& DESC_P_MASK
))
2017 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2018 #ifdef TARGET_X86_64
2019 if (env
->hflags
& HF_LMA_MASK
) {
2021 e3
= ldl_kernel(ptr
+ 8);
2022 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2023 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2027 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2030 env
->ldt
.selector
= selector
;
2033 void helper_ltr(int selector
)
2037 int index
, type
, entry_limit
;
2041 if ((selector
& 0xfffc) == 0) {
2042 /* NULL selector case: invalid TR */
2048 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2050 index
= selector
& ~7;
2051 #ifdef TARGET_X86_64
2052 if (env
->hflags
& HF_LMA_MASK
)
2057 if ((index
+ entry_limit
) > dt
->limit
)
2058 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2059 ptr
= dt
->base
+ index
;
2060 e1
= ldl_kernel(ptr
);
2061 e2
= ldl_kernel(ptr
+ 4);
2062 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2063 if ((e2
& DESC_S_MASK
) ||
2064 (type
!= 1 && type
!= 9))
2065 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2066 if (!(e2
& DESC_P_MASK
))
2067 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2068 #ifdef TARGET_X86_64
2069 if (env
->hflags
& HF_LMA_MASK
) {
2071 e3
= ldl_kernel(ptr
+ 8);
2072 e4
= ldl_kernel(ptr
+ 12);
2073 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2074 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2075 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2076 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2080 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2082 e2
|= DESC_TSS_BUSY_MASK
;
2083 stl_kernel(ptr
+ 4, e2
);
2085 env
->tr
.selector
= selector
;
2088 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2089 void helper_load_seg(int seg_reg
, int selector
)
2098 cpl
= env
->hflags
& HF_CPL_MASK
;
2099 if ((selector
& 0xfffc) == 0) {
2100 /* null selector case */
2102 #ifdef TARGET_X86_64
2103 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2106 raise_exception_err(EXCP0D_GPF
, 0);
2107 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2114 index
= selector
& ~7;
2115 if ((index
+ 7) > dt
->limit
)
2116 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2117 ptr
= dt
->base
+ index
;
2118 e1
= ldl_kernel(ptr
);
2119 e2
= ldl_kernel(ptr
+ 4);
2121 if (!(e2
& DESC_S_MASK
))
2122 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2124 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2125 if (seg_reg
== R_SS
) {
2126 /* must be writable segment */
2127 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2128 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2129 if (rpl
!= cpl
|| dpl
!= cpl
)
2130 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2132 /* must be readable segment */
2133 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2134 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2136 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2137 /* if not conforming code, test rights */
2138 if (dpl
< cpl
|| dpl
< rpl
)
2139 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2143 if (!(e2
& DESC_P_MASK
)) {
2144 if (seg_reg
== R_SS
)
2145 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2147 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2150 /* set the access bit if not already set */
2151 if (!(e2
& DESC_A_MASK
)) {
2153 stl_kernel(ptr
+ 4, e2
);
2156 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2157 get_seg_base(e1
, e2
),
2158 get_seg_limit(e1
, e2
),
2161 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2162 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2167 /* protected mode jump */
2168 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2169 int next_eip_addend
)
2172 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2173 target_ulong next_eip
;
2175 if ((new_cs
& 0xfffc) == 0)
2176 raise_exception_err(EXCP0D_GPF
, 0);
2177 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2178 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2179 cpl
= env
->hflags
& HF_CPL_MASK
;
2180 if (e2
& DESC_S_MASK
) {
2181 if (!(e2
& DESC_CS_MASK
))
2182 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2183 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2184 if (e2
& DESC_C_MASK
) {
2185 /* conforming code segment */
2187 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2189 /* non conforming code segment */
2192 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2194 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2196 if (!(e2
& DESC_P_MASK
))
2197 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2198 limit
= get_seg_limit(e1
, e2
);
2199 if (new_eip
> limit
&&
2200 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2201 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2202 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2203 get_seg_base(e1
, e2
), limit
, e2
);
2206 /* jump to call or task gate */
2207 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2209 cpl
= env
->hflags
& HF_CPL_MASK
;
2210 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2212 case 1: /* 286 TSS */
2213 case 9: /* 386 TSS */
2214 case 5: /* task gate */
2215 if (dpl
< cpl
|| dpl
< rpl
)
2216 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2217 next_eip
= env
->eip
+ next_eip_addend
;
2218 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2219 CC_OP
= CC_OP_EFLAGS
;
2221 case 4: /* 286 call gate */
2222 case 12: /* 386 call gate */
2223 if ((dpl
< cpl
) || (dpl
< rpl
))
2224 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2225 if (!(e2
& DESC_P_MASK
))
2226 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2228 new_eip
= (e1
& 0xffff);
2230 new_eip
|= (e2
& 0xffff0000);
2231 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2232 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2233 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2234 /* must be code segment */
2235 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2236 (DESC_S_MASK
| DESC_CS_MASK
)))
2237 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2238 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2239 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2240 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2241 if (!(e2
& DESC_P_MASK
))
2242 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2243 limit
= get_seg_limit(e1
, e2
);
2244 if (new_eip
> limit
)
2245 raise_exception_err(EXCP0D_GPF
, 0);
2246 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2247 get_seg_base(e1
, e2
), limit
, e2
);
2251 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2257 /* real mode call */
2258 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2259 int shift
, int next_eip
)
2262 uint32_t esp
, esp_mask
;
2267 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2268 ssp
= env
->segs
[R_SS
].base
;
2270 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2271 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2273 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2274 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2277 SET_ESP(esp
, esp_mask
);
2279 env
->segs
[R_CS
].selector
= new_cs
;
2280 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2283 /* protected mode call */
2284 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2285 int shift
, int next_eip_addend
)
2288 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2289 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2290 uint32_t val
, limit
, old_sp_mask
;
2291 target_ulong ssp
, old_ssp
, next_eip
;
2293 next_eip
= env
->eip
+ next_eip_addend
;
2294 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2295 LOG_PCALL_STATE(env
);
2296 if ((new_cs
& 0xfffc) == 0)
2297 raise_exception_err(EXCP0D_GPF
, 0);
2298 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2299 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2300 cpl
= env
->hflags
& HF_CPL_MASK
;
2301 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2302 if (e2
& DESC_S_MASK
) {
2303 if (!(e2
& DESC_CS_MASK
))
2304 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2305 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2306 if (e2
& DESC_C_MASK
) {
2307 /* conforming code segment */
2309 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2311 /* non conforming code segment */
2314 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2316 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2318 if (!(e2
& DESC_P_MASK
))
2319 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2321 #ifdef TARGET_X86_64
2322 /* XXX: check 16/32 bit cases in long mode */
2327 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2328 PUSHQ(rsp
, next_eip
);
2329 /* from this point, not restartable */
2331 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2332 get_seg_base(e1
, e2
),
2333 get_seg_limit(e1
, e2
), e2
);
2339 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2340 ssp
= env
->segs
[R_SS
].base
;
2342 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2343 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2345 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2346 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2349 limit
= get_seg_limit(e1
, e2
);
2350 if (new_eip
> limit
)
2351 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2352 /* from this point, not restartable */
2353 SET_ESP(sp
, sp_mask
);
2354 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2355 get_seg_base(e1
, e2
), limit
, e2
);
2359 /* check gate type */
2360 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2361 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2364 case 1: /* available 286 TSS */
2365 case 9: /* available 386 TSS */
2366 case 5: /* task gate */
2367 if (dpl
< cpl
|| dpl
< rpl
)
2368 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2369 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2370 CC_OP
= CC_OP_EFLAGS
;
2372 case 4: /* 286 call gate */
2373 case 12: /* 386 call gate */
2376 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2381 if (dpl
< cpl
|| dpl
< rpl
)
2382 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2383 /* check valid bit */
2384 if (!(e2
& DESC_P_MASK
))
2385 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2386 selector
= e1
>> 16;
2387 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2388 param_count
= e2
& 0x1f;
2389 if ((selector
& 0xfffc) == 0)
2390 raise_exception_err(EXCP0D_GPF
, 0);
2392 if (load_segment(&e1
, &e2
, selector
) != 0)
2393 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2394 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2395 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2396 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2398 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2399 if (!(e2
& DESC_P_MASK
))
2400 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2402 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2403 /* to inner privilege */
2404 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2405 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2406 ss
, sp
, param_count
, ESP
);
2407 if ((ss
& 0xfffc) == 0)
2408 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2409 if ((ss
& 3) != dpl
)
2410 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2411 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2412 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2413 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2415 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2416 if (!(ss_e2
& DESC_S_MASK
) ||
2417 (ss_e2
& DESC_CS_MASK
) ||
2418 !(ss_e2
& DESC_W_MASK
))
2419 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2420 if (!(ss_e2
& DESC_P_MASK
))
2421 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2423 // push_size = ((param_count * 2) + 8) << shift;
2425 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2426 old_ssp
= env
->segs
[R_SS
].base
;
2428 sp_mask
= get_sp_mask(ss_e2
);
2429 ssp
= get_seg_base(ss_e1
, ss_e2
);
2431 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2432 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2433 for(i
= param_count
- 1; i
>= 0; i
--) {
2434 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2435 PUSHL(ssp
, sp
, sp_mask
, val
);
2438 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2439 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2440 for(i
= param_count
- 1; i
>= 0; i
--) {
2441 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2442 PUSHW(ssp
, sp
, sp_mask
, val
);
2447 /* to same privilege */
2449 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2450 ssp
= env
->segs
[R_SS
].base
;
2451 // push_size = (4 << shift);
2456 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2457 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2459 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2460 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2463 /* from this point, not restartable */
2466 ss
= (ss
& ~3) | dpl
;
2467 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2469 get_seg_limit(ss_e1
, ss_e2
),
2473 selector
= (selector
& ~3) | dpl
;
2474 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2475 get_seg_base(e1
, e2
),
2476 get_seg_limit(e1
, e2
),
2478 cpu_x86_set_cpl(env
, dpl
);
2479 SET_ESP(sp
, sp_mask
);
2483 if (kqemu_is_ok(env
)) {
2484 env
->exception_index
= -1;
2490 /* real and vm86 mode iret */
2491 void helper_iret_real(int shift
)
2493 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2497 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2499 ssp
= env
->segs
[R_SS
].base
;
2502 POPL(ssp
, sp
, sp_mask
, new_eip
);
2503 POPL(ssp
, sp
, sp_mask
, new_cs
);
2505 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2508 POPW(ssp
, sp
, sp_mask
, new_eip
);
2509 POPW(ssp
, sp
, sp_mask
, new_cs
);
2510 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2512 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2513 env
->segs
[R_CS
].selector
= new_cs
;
2514 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2516 if (env
->eflags
& VM_MASK
)
2517 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2519 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2521 eflags_mask
&= 0xffff;
2522 load_eflags(new_eflags
, eflags_mask
);
2523 env
->hflags2
&= ~HF2_NMI_MASK
;
2526 static inline void validate_seg(int seg_reg
, int cpl
)
2531 /* XXX: on x86_64, we do not want to nullify FS and GS because
2532 they may still contain a valid base. I would be interested to
2533 know how a real x86_64 CPU behaves */
2534 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2535 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2538 e2
= env
->segs
[seg_reg
].flags
;
2539 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2540 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2541 /* data or non conforming code segment */
2543 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2548 /* protected mode iret */
2549 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2551 uint32_t new_cs
, new_eflags
, new_ss
;
2552 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2553 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2554 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2555 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2557 #ifdef TARGET_X86_64
2562 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2564 ssp
= env
->segs
[R_SS
].base
;
2565 new_eflags
= 0; /* avoid warning */
2566 #ifdef TARGET_X86_64
2572 POPQ(sp
, new_eflags
);
2578 POPL(ssp
, sp
, sp_mask
, new_eip
);
2579 POPL(ssp
, sp
, sp_mask
, new_cs
);
2582 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2583 if (new_eflags
& VM_MASK
)
2584 goto return_to_vm86
;
2588 POPW(ssp
, sp
, sp_mask
, new_eip
);
2589 POPW(ssp
, sp
, sp_mask
, new_cs
);
2591 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2593 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2594 new_cs
, new_eip
, shift
, addend
);
2595 LOG_PCALL_STATE(env
);
2596 if ((new_cs
& 0xfffc) == 0)
2597 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2598 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2599 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2600 if (!(e2
& DESC_S_MASK
) ||
2601 !(e2
& DESC_CS_MASK
))
2602 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2603 cpl
= env
->hflags
& HF_CPL_MASK
;
2606 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2607 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2608 if (e2
& DESC_C_MASK
) {
2610 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2613 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2615 if (!(e2
& DESC_P_MASK
))
2616 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2619 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2620 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2621 /* return to same privilege level */
2622 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2623 get_seg_base(e1
, e2
),
2624 get_seg_limit(e1
, e2
),
2627 /* return to different privilege level */
2628 #ifdef TARGET_X86_64
2637 POPL(ssp
, sp
, sp_mask
, new_esp
);
2638 POPL(ssp
, sp
, sp_mask
, new_ss
);
2642 POPW(ssp
, sp
, sp_mask
, new_esp
);
2643 POPW(ssp
, sp
, sp_mask
, new_ss
);
2645 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2647 if ((new_ss
& 0xfffc) == 0) {
2648 #ifdef TARGET_X86_64
2649 /* NULL ss is allowed in long mode if cpl != 3*/
2650 /* XXX: test CS64 ? */
2651 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2652 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2654 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2655 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2656 DESC_W_MASK
| DESC_A_MASK
);
2657 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2661 raise_exception_err(EXCP0D_GPF
, 0);
2664 if ((new_ss
& 3) != rpl
)
2665 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2666 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2667 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2668 if (!(ss_e2
& DESC_S_MASK
) ||
2669 (ss_e2
& DESC_CS_MASK
) ||
2670 !(ss_e2
& DESC_W_MASK
))
2671 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2672 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2674 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2675 if (!(ss_e2
& DESC_P_MASK
))
2676 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2677 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2678 get_seg_base(ss_e1
, ss_e2
),
2679 get_seg_limit(ss_e1
, ss_e2
),
2683 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2684 get_seg_base(e1
, e2
),
2685 get_seg_limit(e1
, e2
),
2687 cpu_x86_set_cpl(env
, rpl
);
2689 #ifdef TARGET_X86_64
2690 if (env
->hflags
& HF_CS64_MASK
)
2694 sp_mask
= get_sp_mask(ss_e2
);
2696 /* validate data segments */
2697 validate_seg(R_ES
, rpl
);
2698 validate_seg(R_DS
, rpl
);
2699 validate_seg(R_FS
, rpl
);
2700 validate_seg(R_GS
, rpl
);
2704 SET_ESP(sp
, sp_mask
);
2707 /* NOTE: 'cpl' is the _old_ CPL */
2708 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2710 eflags_mask
|= IOPL_MASK
;
2711 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2713 eflags_mask
|= IF_MASK
;
2715 eflags_mask
&= 0xffff;
2716 load_eflags(new_eflags
, eflags_mask
);
2721 POPL(ssp
, sp
, sp_mask
, new_esp
);
2722 POPL(ssp
, sp
, sp_mask
, new_ss
);
2723 POPL(ssp
, sp
, sp_mask
, new_es
);
2724 POPL(ssp
, sp
, sp_mask
, new_ds
);
2725 POPL(ssp
, sp
, sp_mask
, new_fs
);
2726 POPL(ssp
, sp
, sp_mask
, new_gs
);
2728 /* modify processor state */
2729 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2730 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2731 load_seg_vm(R_CS
, new_cs
& 0xffff);
2732 cpu_x86_set_cpl(env
, 3);
2733 load_seg_vm(R_SS
, new_ss
& 0xffff);
2734 load_seg_vm(R_ES
, new_es
& 0xffff);
2735 load_seg_vm(R_DS
, new_ds
& 0xffff);
2736 load_seg_vm(R_FS
, new_fs
& 0xffff);
2737 load_seg_vm(R_GS
, new_gs
& 0xffff);
2739 env
->eip
= new_eip
& 0xffff;
2743 void helper_iret_protected(int shift
, int next_eip
)
2745 int tss_selector
, type
;
2748 /* specific case for TSS */
2749 if (env
->eflags
& NT_MASK
) {
2750 #ifdef TARGET_X86_64
2751 if (env
->hflags
& HF_LMA_MASK
)
2752 raise_exception_err(EXCP0D_GPF
, 0);
2754 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2755 if (tss_selector
& 4)
2756 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2757 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2758 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2759 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2760 /* NOTE: we check both segment and busy TSS */
2762 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2763 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2765 helper_ret_protected(shift
, 1, 0);
2767 env
->hflags2
&= ~HF2_NMI_MASK
;
2769 if (kqemu_is_ok(env
)) {
2770 CC_OP
= CC_OP_EFLAGS
;
2771 env
->exception_index
= -1;
2777 void helper_lret_protected(int shift
, int addend
)
2779 helper_ret_protected(shift
, 0, addend
);
2781 if (kqemu_is_ok(env
)) {
2782 env
->exception_index
= -1;
2788 void helper_sysenter(void)
2790 if (env
->sysenter_cs
== 0) {
2791 raise_exception_err(EXCP0D_GPF
, 0);
2793 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2794 cpu_x86_set_cpl(env
, 0);
2796 #ifdef TARGET_X86_64
2797 if (env
->hflags
& HF_LMA_MASK
) {
2798 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2800 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2802 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2806 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2808 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2810 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2812 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2814 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2816 DESC_W_MASK
| DESC_A_MASK
);
2817 ESP
= env
->sysenter_esp
;
2818 EIP
= env
->sysenter_eip
;
2821 void helper_sysexit(int dflag
)
2825 cpl
= env
->hflags
& HF_CPL_MASK
;
2826 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2827 raise_exception_err(EXCP0D_GPF
, 0);
2829 cpu_x86_set_cpl(env
, 3);
2830 #ifdef TARGET_X86_64
2832 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2834 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2835 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2836 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2837 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2839 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2840 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2841 DESC_W_MASK
| DESC_A_MASK
);
2845 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2847 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2848 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2849 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2850 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2852 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2853 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2854 DESC_W_MASK
| DESC_A_MASK
);
2859 if (kqemu_is_ok(env
)) {
2860 env
->exception_index
= -1;
2866 #if defined(CONFIG_USER_ONLY)
2867 target_ulong
helper_read_crN(int reg
)
2872 void helper_write_crN(int reg
, target_ulong t0
)
2876 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2880 target_ulong
helper_read_crN(int reg
)
2884 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2890 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2891 val
= cpu_get_apic_tpr(env
);
2900 void helper_write_crN(int reg
, target_ulong t0
)
2902 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2905 cpu_x86_update_cr0(env
, t0
);
2908 cpu_x86_update_cr3(env
, t0
);
2911 cpu_x86_update_cr4(env
, t0
);
2914 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2915 cpu_set_apic_tpr(env
, t0
);
2917 env
->v_tpr
= t0
& 0x0f;
2925 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2930 hw_breakpoint_remove(env
, reg
);
2932 hw_breakpoint_insert(env
, reg
);
2933 } else if (reg
== 7) {
2934 for (i
= 0; i
< 4; i
++)
2935 hw_breakpoint_remove(env
, i
);
2937 for (i
= 0; i
< 4; i
++)
2938 hw_breakpoint_insert(env
, i
);
2944 void helper_lmsw(target_ulong t0
)
2946 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2947 if already set to one. */
2948 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2949 helper_write_crN(0, t0
);
2952 void helper_clts(void)
2954 env
->cr
[0] &= ~CR0_TS_MASK
;
2955 env
->hflags
&= ~HF_TS_MASK
;
2958 void helper_invlpg(target_ulong addr
)
2960 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2961 tlb_flush_page(env
, addr
);
2964 void helper_rdtsc(void)
2968 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2969 raise_exception(EXCP0D_GPF
);
2971 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2973 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2974 EAX
= (uint32_t)(val
);
2975 EDX
= (uint32_t)(val
>> 32);
2978 void helper_rdpmc(void)
2980 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2981 raise_exception(EXCP0D_GPF
);
2983 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2985 /* currently unimplemented */
2986 raise_exception_err(EXCP06_ILLOP
, 0);
2989 #if defined(CONFIG_USER_ONLY)
2990 void helper_wrmsr(void)
2994 void helper_rdmsr(void)
2998 void helper_wrmsr(void)
3002 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3004 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3006 switch((uint32_t)ECX
) {
3007 case MSR_IA32_SYSENTER_CS
:
3008 env
->sysenter_cs
= val
& 0xffff;
3010 case MSR_IA32_SYSENTER_ESP
:
3011 env
->sysenter_esp
= val
;
3013 case MSR_IA32_SYSENTER_EIP
:
3014 env
->sysenter_eip
= val
;
3016 case MSR_IA32_APICBASE
:
3017 cpu_set_apic_base(env
, val
);
3021 uint64_t update_mask
;
3023 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3024 update_mask
|= MSR_EFER_SCE
;
3025 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3026 update_mask
|= MSR_EFER_LME
;
3027 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3028 update_mask
|= MSR_EFER_FFXSR
;
3029 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3030 update_mask
|= MSR_EFER_NXE
;
3031 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3032 update_mask
|= MSR_EFER_SVME
;
3033 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3034 update_mask
|= MSR_EFER_FFXSR
;
3035 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3036 (val
& update_mask
));
3045 case MSR_VM_HSAVE_PA
:
3046 env
->vm_hsave
= val
;
3048 #ifdef TARGET_X86_64
3059 env
->segs
[R_FS
].base
= val
;
3062 env
->segs
[R_GS
].base
= val
;
3064 case MSR_KERNELGSBASE
:
3065 env
->kernelgsbase
= val
;
3068 case MSR_MTRRphysBase(0):
3069 case MSR_MTRRphysBase(1):
3070 case MSR_MTRRphysBase(2):
3071 case MSR_MTRRphysBase(3):
3072 case MSR_MTRRphysBase(4):
3073 case MSR_MTRRphysBase(5):
3074 case MSR_MTRRphysBase(6):
3075 case MSR_MTRRphysBase(7):
3076 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3078 case MSR_MTRRphysMask(0):
3079 case MSR_MTRRphysMask(1):
3080 case MSR_MTRRphysMask(2):
3081 case MSR_MTRRphysMask(3):
3082 case MSR_MTRRphysMask(4):
3083 case MSR_MTRRphysMask(5):
3084 case MSR_MTRRphysMask(6):
3085 case MSR_MTRRphysMask(7):
3086 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3088 case MSR_MTRRfix64K_00000
:
3089 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3091 case MSR_MTRRfix16K_80000
:
3092 case MSR_MTRRfix16K_A0000
:
3093 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3095 case MSR_MTRRfix4K_C0000
:
3096 case MSR_MTRRfix4K_C8000
:
3097 case MSR_MTRRfix4K_D0000
:
3098 case MSR_MTRRfix4K_D8000
:
3099 case MSR_MTRRfix4K_E0000
:
3100 case MSR_MTRRfix4K_E8000
:
3101 case MSR_MTRRfix4K_F0000
:
3102 case MSR_MTRRfix4K_F8000
:
3103 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3105 case MSR_MTRRdefType
:
3106 env
->mtrr_deftype
= val
;
3109 /* XXX: exception ? */
3114 void helper_rdmsr(void)
3118 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3120 switch((uint32_t)ECX
) {
3121 case MSR_IA32_SYSENTER_CS
:
3122 val
= env
->sysenter_cs
;
3124 case MSR_IA32_SYSENTER_ESP
:
3125 val
= env
->sysenter_esp
;
3127 case MSR_IA32_SYSENTER_EIP
:
3128 val
= env
->sysenter_eip
;
3130 case MSR_IA32_APICBASE
:
3131 val
= cpu_get_apic_base(env
);
3142 case MSR_VM_HSAVE_PA
:
3143 val
= env
->vm_hsave
;
3145 case MSR_IA32_PERF_STATUS
:
3146 /* tsc_increment_by_tick */
3148 /* CPU multiplier */
3149 val
|= (((uint64_t)4ULL) << 40);
3151 #ifdef TARGET_X86_64
3162 val
= env
->segs
[R_FS
].base
;
3165 val
= env
->segs
[R_GS
].base
;
3167 case MSR_KERNELGSBASE
:
3168 val
= env
->kernelgsbase
;
3172 case MSR_QPI_COMMBASE
:
3173 if (env
->kqemu_enabled
) {
3174 val
= kqemu_comm_base
;
3180 case MSR_MTRRphysBase(0):
3181 case MSR_MTRRphysBase(1):
3182 case MSR_MTRRphysBase(2):
3183 case MSR_MTRRphysBase(3):
3184 case MSR_MTRRphysBase(4):
3185 case MSR_MTRRphysBase(5):
3186 case MSR_MTRRphysBase(6):
3187 case MSR_MTRRphysBase(7):
3188 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3190 case MSR_MTRRphysMask(0):
3191 case MSR_MTRRphysMask(1):
3192 case MSR_MTRRphysMask(2):
3193 case MSR_MTRRphysMask(3):
3194 case MSR_MTRRphysMask(4):
3195 case MSR_MTRRphysMask(5):
3196 case MSR_MTRRphysMask(6):
3197 case MSR_MTRRphysMask(7):
3198 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3200 case MSR_MTRRfix64K_00000
:
3201 val
= env
->mtrr_fixed
[0];
3203 case MSR_MTRRfix16K_80000
:
3204 case MSR_MTRRfix16K_A0000
:
3205 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3207 case MSR_MTRRfix4K_C0000
:
3208 case MSR_MTRRfix4K_C8000
:
3209 case MSR_MTRRfix4K_D0000
:
3210 case MSR_MTRRfix4K_D8000
:
3211 case MSR_MTRRfix4K_E0000
:
3212 case MSR_MTRRfix4K_E8000
:
3213 case MSR_MTRRfix4K_F0000
:
3214 case MSR_MTRRfix4K_F8000
:
3215 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3217 case MSR_MTRRdefType
:
3218 val
= env
->mtrr_deftype
;
3221 if (env
->cpuid_features
& CPUID_MTRR
)
3222 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3224 /* XXX: exception ? */
3228 /* XXX: exception ? */
3232 EAX
= (uint32_t)(val
);
3233 EDX
= (uint32_t)(val
>> 32);
3237 target_ulong
helper_lsl(target_ulong selector1
)
3240 uint32_t e1
, e2
, eflags
, selector
;
3241 int rpl
, dpl
, cpl
, type
;
3243 selector
= selector1
& 0xffff;
3244 eflags
= helper_cc_compute_all(CC_OP
);
3245 if (load_segment(&e1
, &e2
, selector
) != 0)
3248 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3249 cpl
= env
->hflags
& HF_CPL_MASK
;
3250 if (e2
& DESC_S_MASK
) {
3251 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3254 if (dpl
< cpl
|| dpl
< rpl
)
3258 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3269 if (dpl
< cpl
|| dpl
< rpl
) {
3271 CC_SRC
= eflags
& ~CC_Z
;
3275 limit
= get_seg_limit(e1
, e2
);
3276 CC_SRC
= eflags
| CC_Z
;
3280 target_ulong
helper_lar(target_ulong selector1
)
3282 uint32_t e1
, e2
, eflags
, selector
;
3283 int rpl
, dpl
, cpl
, type
;
3285 selector
= selector1
& 0xffff;
3286 eflags
= helper_cc_compute_all(CC_OP
);
3287 if ((selector
& 0xfffc) == 0)
3289 if (load_segment(&e1
, &e2
, selector
) != 0)
3292 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3293 cpl
= env
->hflags
& HF_CPL_MASK
;
3294 if (e2
& DESC_S_MASK
) {
3295 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3298 if (dpl
< cpl
|| dpl
< rpl
)
3302 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3316 if (dpl
< cpl
|| dpl
< rpl
) {
3318 CC_SRC
= eflags
& ~CC_Z
;
3322 CC_SRC
= eflags
| CC_Z
;
3323 return e2
& 0x00f0ff00;
3326 void helper_verr(target_ulong selector1
)
3328 uint32_t e1
, e2
, eflags
, selector
;
3331 selector
= selector1
& 0xffff;
3332 eflags
= helper_cc_compute_all(CC_OP
);
3333 if ((selector
& 0xfffc) == 0)
3335 if (load_segment(&e1
, &e2
, selector
) != 0)
3337 if (!(e2
& DESC_S_MASK
))
3340 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3341 cpl
= env
->hflags
& HF_CPL_MASK
;
3342 if (e2
& DESC_CS_MASK
) {
3343 if (!(e2
& DESC_R_MASK
))
3345 if (!(e2
& DESC_C_MASK
)) {
3346 if (dpl
< cpl
|| dpl
< rpl
)
3350 if (dpl
< cpl
|| dpl
< rpl
) {
3352 CC_SRC
= eflags
& ~CC_Z
;
3356 CC_SRC
= eflags
| CC_Z
;
3359 void helper_verw(target_ulong selector1
)
3361 uint32_t e1
, e2
, eflags
, selector
;
3364 selector
= selector1
& 0xffff;
3365 eflags
= helper_cc_compute_all(CC_OP
);
3366 if ((selector
& 0xfffc) == 0)
3368 if (load_segment(&e1
, &e2
, selector
) != 0)
3370 if (!(e2
& DESC_S_MASK
))
3373 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3374 cpl
= env
->hflags
& HF_CPL_MASK
;
3375 if (e2
& DESC_CS_MASK
) {
3378 if (dpl
< cpl
|| dpl
< rpl
)
3380 if (!(e2
& DESC_W_MASK
)) {
3382 CC_SRC
= eflags
& ~CC_Z
;
3386 CC_SRC
= eflags
| CC_Z
;
3389 /* x87 FPU helpers */
3391 static void fpu_set_exception(int mask
)
3394 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3395 env
->fpus
|= FPUS_SE
| FPUS_B
;
3398 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3401 fpu_set_exception(FPUS_ZE
);
3405 static void fpu_raise_exception(void)
3407 if (env
->cr
[0] & CR0_NE_MASK
) {
3408 raise_exception(EXCP10_COPR
);
3410 #if !defined(CONFIG_USER_ONLY)
3417 void helper_flds_FT0(uint32_t val
)
3424 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3427 void helper_fldl_FT0(uint64_t val
)
3434 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3437 void helper_fildl_FT0(int32_t val
)
3439 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3442 void helper_flds_ST0(uint32_t val
)
3449 new_fpstt
= (env
->fpstt
- 1) & 7;
3451 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3452 env
->fpstt
= new_fpstt
;
3453 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3456 void helper_fldl_ST0(uint64_t val
)
3463 new_fpstt
= (env
->fpstt
- 1) & 7;
3465 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3466 env
->fpstt
= new_fpstt
;
3467 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3470 void helper_fildl_ST0(int32_t val
)
3473 new_fpstt
= (env
->fpstt
- 1) & 7;
3474 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3475 env
->fpstt
= new_fpstt
;
3476 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3479 void helper_fildll_ST0(int64_t val
)
3482 new_fpstt
= (env
->fpstt
- 1) & 7;
3483 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3484 env
->fpstt
= new_fpstt
;
3485 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3488 uint32_t helper_fsts_ST0(void)
3494 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3498 uint64_t helper_fstl_ST0(void)
3504 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3508 int32_t helper_fist_ST0(void)
3511 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3512 if (val
!= (int16_t)val
)
3517 int32_t helper_fistl_ST0(void)
3520 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3524 int64_t helper_fistll_ST0(void)
3527 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3531 int32_t helper_fistt_ST0(void)
3534 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3535 if (val
!= (int16_t)val
)
3540 int32_t helper_fisttl_ST0(void)
3543 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3547 int64_t helper_fisttll_ST0(void)
3550 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3554 void helper_fldt_ST0(target_ulong ptr
)
3557 new_fpstt
= (env
->fpstt
- 1) & 7;
3558 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3559 env
->fpstt
= new_fpstt
;
3560 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3563 void helper_fstt_ST0(target_ulong ptr
)
3565 helper_fstt(ST0
, ptr
);
3568 void helper_fpush(void)
3573 void helper_fpop(void)
3578 void helper_fdecstp(void)
3580 env
->fpstt
= (env
->fpstt
- 1) & 7;
3581 env
->fpus
&= (~0x4700);
3584 void helper_fincstp(void)
3586 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3587 env
->fpus
&= (~0x4700);
3592 void helper_ffree_STN(int st_index
)
3594 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3597 void helper_fmov_ST0_FT0(void)
3602 void helper_fmov_FT0_STN(int st_index
)
3607 void helper_fmov_ST0_STN(int st_index
)
3612 void helper_fmov_STN_ST0(int st_index
)
3617 void helper_fxchg_ST0_STN(int st_index
)
3625 /* FPU operations */
3627 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3629 void helper_fcom_ST0_FT0(void)
3633 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3634 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3637 void helper_fucom_ST0_FT0(void)
3641 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3642 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3645 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3647 void helper_fcomi_ST0_FT0(void)
3652 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3653 eflags
= helper_cc_compute_all(CC_OP
);
3654 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3658 void helper_fucomi_ST0_FT0(void)
3663 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3664 eflags
= helper_cc_compute_all(CC_OP
);
3665 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3669 void helper_fadd_ST0_FT0(void)
3674 void helper_fmul_ST0_FT0(void)
3679 void helper_fsub_ST0_FT0(void)
3684 void helper_fsubr_ST0_FT0(void)
3689 void helper_fdiv_ST0_FT0(void)
3691 ST0
= helper_fdiv(ST0
, FT0
);
3694 void helper_fdivr_ST0_FT0(void)
3696 ST0
= helper_fdiv(FT0
, ST0
);
3699 /* fp operations between STN and ST0 */
3701 void helper_fadd_STN_ST0(int st_index
)
3703 ST(st_index
) += ST0
;
3706 void helper_fmul_STN_ST0(int st_index
)
3708 ST(st_index
) *= ST0
;
3711 void helper_fsub_STN_ST0(int st_index
)
3713 ST(st_index
) -= ST0
;
3716 void helper_fsubr_STN_ST0(int st_index
)
3723 void helper_fdiv_STN_ST0(int st_index
)
3727 *p
= helper_fdiv(*p
, ST0
);
3730 void helper_fdivr_STN_ST0(int st_index
)
3734 *p
= helper_fdiv(ST0
, *p
);
3737 /* misc FPU operations */
3738 void helper_fchs_ST0(void)
3740 ST0
= floatx_chs(ST0
);
3743 void helper_fabs_ST0(void)
3745 ST0
= floatx_abs(ST0
);
3748 void helper_fld1_ST0(void)
3753 void helper_fldl2t_ST0(void)
3758 void helper_fldl2e_ST0(void)
3763 void helper_fldpi_ST0(void)
3768 void helper_fldlg2_ST0(void)
3773 void helper_fldln2_ST0(void)
3778 void helper_fldz_ST0(void)
3783 void helper_fldz_FT0(void)
3788 uint32_t helper_fnstsw(void)
3790 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3793 uint32_t helper_fnstcw(void)
3798 static void update_fp_status(void)
3802 /* set rounding mode */
3803 switch(env
->fpuc
& RC_MASK
) {
3806 rnd_type
= float_round_nearest_even
;
3809 rnd_type
= float_round_down
;
3812 rnd_type
= float_round_up
;
3815 rnd_type
= float_round_to_zero
;
3818 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3820 switch((env
->fpuc
>> 8) & 3) {
3832 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3836 void helper_fldcw(uint32_t val
)
3842 void helper_fclex(void)
3844 env
->fpus
&= 0x7f00;
3847 void helper_fwait(void)
3849 if (env
->fpus
& FPUS_SE
)
3850 fpu_raise_exception();
3853 void helper_fninit(void)
3870 void helper_fbld_ST0(target_ulong ptr
)
3878 for(i
= 8; i
>= 0; i
--) {
3880 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3883 if (ldub(ptr
+ 9) & 0x80)
3889 void helper_fbst_ST0(target_ulong ptr
)
3892 target_ulong mem_ref
, mem_end
;
3895 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3897 mem_end
= mem_ref
+ 9;
3904 while (mem_ref
< mem_end
) {
3909 v
= ((v
/ 10) << 4) | (v
% 10);
3912 while (mem_ref
< mem_end
) {
3917 void helper_f2xm1(void)
3919 ST0
= pow(2.0,ST0
) - 1.0;
3922 void helper_fyl2x(void)
3924 CPU86_LDouble fptemp
;
3928 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3932 env
->fpus
&= (~0x4700);
3937 void helper_fptan(void)
3939 CPU86_LDouble fptemp
;
3942 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3948 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3949 /* the above code is for |arg| < 2**52 only */
3953 void helper_fpatan(void)
3955 CPU86_LDouble fptemp
, fpsrcop
;
3959 ST1
= atan2(fpsrcop
,fptemp
);
3963 void helper_fxtract(void)
3965 CPU86_LDoubleU temp
;
3966 unsigned int expdif
;
3969 expdif
= EXPD(temp
) - EXPBIAS
;
3970 /*DP exponent bias*/
3977 void helper_fprem1(void)
3979 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3980 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3982 signed long long int q
;
3984 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3985 ST0
= 0.0 / 0.0; /* NaN */
3986 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3992 fpsrcop1
.d
= fpsrcop
;
3994 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3997 /* optimisation? taken from the AMD docs */
3998 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3999 /* ST0 is unchanged */
4004 dblq
= fpsrcop
/ fptemp
;
4005 /* round dblq towards nearest integer */
4007 ST0
= fpsrcop
- fptemp
* dblq
;
4009 /* convert dblq to q by truncating towards zero */
4011 q
= (signed long long int)(-dblq
);
4013 q
= (signed long long int)dblq
;
4015 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4016 /* (C0,C3,C1) <-- (q2,q1,q0) */
4017 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4018 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4019 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4021 env
->fpus
|= 0x400; /* C2 <-- 1 */
4022 fptemp
= pow(2.0, expdif
- 50);
4023 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4024 /* fpsrcop = integer obtained by chopping */
4025 fpsrcop
= (fpsrcop
< 0.0) ?
4026 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4027 ST0
-= (ST1
* fpsrcop
* fptemp
);
4031 void helper_fprem(void)
4033 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4034 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4036 signed long long int q
;
4038 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4039 ST0
= 0.0 / 0.0; /* NaN */
4040 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4044 fpsrcop
= (CPU86_LDouble
)ST0
;
4045 fptemp
= (CPU86_LDouble
)ST1
;
4046 fpsrcop1
.d
= fpsrcop
;
4048 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4051 /* optimisation? taken from the AMD docs */
4052 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053 /* ST0 is unchanged */
4057 if ( expdif
< 53 ) {
4058 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4059 /* round dblq towards zero */
4060 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4061 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4063 /* convert dblq to q by truncating towards zero */
4065 q
= (signed long long int)(-dblq
);
4067 q
= (signed long long int)dblq
;
4069 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070 /* (C0,C3,C1) <-- (q2,q1,q0) */
4071 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4072 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4073 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4075 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4076 env
->fpus
|= 0x400; /* C2 <-- 1 */
4077 fptemp
= pow(2.0, (double)(expdif
- N
));
4078 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4079 /* fpsrcop = integer obtained by chopping */
4080 fpsrcop
= (fpsrcop
< 0.0) ?
4081 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4082 ST0
-= (ST1
* fpsrcop
* fptemp
);
4086 void helper_fyl2xp1(void)
4088 CPU86_LDouble fptemp
;
4091 if ((fptemp
+1.0)>0.0) {
4092 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4096 env
->fpus
&= (~0x4700);
4101 void helper_fsqrt(void)
4103 CPU86_LDouble fptemp
;
4107 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4113 void helper_fsincos(void)
4115 CPU86_LDouble fptemp
;
4118 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4124 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4125 /* the above code is for |arg| < 2**63 only */
4129 void helper_frndint(void)
4131 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4134 void helper_fscale(void)
4136 ST0
= ldexp (ST0
, (int)(ST1
));
4139 void helper_fsin(void)
4141 CPU86_LDouble fptemp
;
4144 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4148 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4149 /* the above code is for |arg| < 2**53 only */
4153 void helper_fcos(void)
4155 CPU86_LDouble fptemp
;
4158 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4162 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4163 /* the above code is for |arg5 < 2**63 only */
4167 void helper_fxam_ST0(void)
4169 CPU86_LDoubleU temp
;
4174 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4176 env
->fpus
|= 0x200; /* C1 <-- 1 */
4178 /* XXX: test fptags too */
4179 expdif
= EXPD(temp
);
4180 if (expdif
== MAXEXPD
) {
4181 #ifdef USE_X86LDOUBLE
4182 if (MANTD(temp
) == 0x8000000000000000ULL
)
4184 if (MANTD(temp
) == 0)
4186 env
->fpus
|= 0x500 /*Infinity*/;
4188 env
->fpus
|= 0x100 /*NaN*/;
4189 } else if (expdif
== 0) {
4190 if (MANTD(temp
) == 0)
4191 env
->fpus
|= 0x4000 /*Zero*/;
4193 env
->fpus
|= 0x4400 /*Denormal*/;
4199 void helper_fstenv(target_ulong ptr
, int data32
)
4201 int fpus
, fptag
, exp
, i
;
4205 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4207 for (i
=7; i
>=0; i
--) {
4209 if (env
->fptags
[i
]) {
4212 tmp
.d
= env
->fpregs
[i
].d
;
4215 if (exp
== 0 && mant
== 0) {
4218 } else if (exp
== 0 || exp
== MAXEXPD
4219 #ifdef USE_X86LDOUBLE
4220 || (mant
& (1LL << 63)) == 0
4223 /* NaNs, infinity, denormal */
4230 stl(ptr
, env
->fpuc
);
4232 stl(ptr
+ 8, fptag
);
4233 stl(ptr
+ 12, 0); /* fpip */
4234 stl(ptr
+ 16, 0); /* fpcs */
4235 stl(ptr
+ 20, 0); /* fpoo */
4236 stl(ptr
+ 24, 0); /* fpos */
4239 stw(ptr
, env
->fpuc
);
4241 stw(ptr
+ 4, fptag
);
4249 void helper_fldenv(target_ulong ptr
, int data32
)
4254 env
->fpuc
= lduw(ptr
);
4255 fpus
= lduw(ptr
+ 4);
4256 fptag
= lduw(ptr
+ 8);
4259 env
->fpuc
= lduw(ptr
);
4260 fpus
= lduw(ptr
+ 2);
4261 fptag
= lduw(ptr
+ 4);
4263 env
->fpstt
= (fpus
>> 11) & 7;
4264 env
->fpus
= fpus
& ~0x3800;
4265 for(i
= 0;i
< 8; i
++) {
4266 env
->fptags
[i
] = ((fptag
& 3) == 3);
4271 void helper_fsave(target_ulong ptr
, int data32
)
4276 helper_fstenv(ptr
, data32
);
4278 ptr
+= (14 << data32
);
4279 for(i
= 0;i
< 8; i
++) {
4281 helper_fstt(tmp
, ptr
);
4299 void helper_frstor(target_ulong ptr
, int data32
)
4304 helper_fldenv(ptr
, data32
);
4305 ptr
+= (14 << data32
);
4307 for(i
= 0;i
< 8; i
++) {
4308 tmp
= helper_fldt(ptr
);
4314 void helper_fxsave(target_ulong ptr
, int data64
)
4316 int fpus
, fptag
, i
, nb_xmm_regs
;
4320 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4322 for(i
= 0; i
< 8; i
++) {
4323 fptag
|= (env
->fptags
[i
] << i
);
4325 stw(ptr
, env
->fpuc
);
4327 stw(ptr
+ 4, fptag
^ 0xff);
4328 #ifdef TARGET_X86_64
4330 stq(ptr
+ 0x08, 0); /* rip */
4331 stq(ptr
+ 0x10, 0); /* rdp */
4335 stl(ptr
+ 0x08, 0); /* eip */
4336 stl(ptr
+ 0x0c, 0); /* sel */
4337 stl(ptr
+ 0x10, 0); /* dp */
4338 stl(ptr
+ 0x14, 0); /* sel */
4342 for(i
= 0;i
< 8; i
++) {
4344 helper_fstt(tmp
, addr
);
4348 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4349 /* XXX: finish it */
4350 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4351 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4352 if (env
->hflags
& HF_CS64_MASK
)
4357 /* Fast FXSAVE leaves out the XMM registers */
4358 if (!(env
->efer
& MSR_EFER_FFXSR
)
4359 || (env
->hflags
& HF_CPL_MASK
)
4360 || !(env
->hflags
& HF_LMA_MASK
)) {
4361 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4362 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4363 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4370 void helper_fxrstor(target_ulong ptr
, int data64
)
4372 int i
, fpus
, fptag
, nb_xmm_regs
;
4376 env
->fpuc
= lduw(ptr
);
4377 fpus
= lduw(ptr
+ 2);
4378 fptag
= lduw(ptr
+ 4);
4379 env
->fpstt
= (fpus
>> 11) & 7;
4380 env
->fpus
= fpus
& ~0x3800;
4382 for(i
= 0;i
< 8; i
++) {
4383 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4387 for(i
= 0;i
< 8; i
++) {
4388 tmp
= helper_fldt(addr
);
4393 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4394 /* XXX: finish it */
4395 env
->mxcsr
= ldl(ptr
+ 0x18);
4397 if (env
->hflags
& HF_CS64_MASK
)
4402 /* Fast FXRESTORE leaves out the XMM registers */
4403 if (!(env
->efer
& MSR_EFER_FFXSR
)
4404 || (env
->hflags
& HF_CPL_MASK
)
4405 || !(env
->hflags
& HF_LMA_MASK
)) {
4406 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4407 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4408 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4415 #ifndef USE_X86LDOUBLE
4417 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4419 CPU86_LDoubleU temp
;
4424 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4425 /* exponent + sign */
4426 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4427 e
|= SIGND(temp
) >> 16;
4431 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4433 CPU86_LDoubleU temp
;
4437 /* XXX: handle overflow ? */
4438 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4439 e
|= (upper
>> 4) & 0x800; /* sign */
4440 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4442 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4445 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4452 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4454 CPU86_LDoubleU temp
;
4457 *pmant
= temp
.l
.lower
;
4458 *pexp
= temp
.l
.upper
;
4461 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4463 CPU86_LDoubleU temp
;
4465 temp
.l
.upper
= upper
;
4466 temp
.l
.lower
= mant
;
4471 #ifdef TARGET_X86_64
4473 //#define DEBUG_MULDIV
4475 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4484 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4488 add128(plow
, phigh
, 1, 0);
4491 /* return TRUE if overflow */
4492 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4494 uint64_t q
, r
, a1
, a0
;
4507 /* XXX: use a better algorithm */
4508 for(i
= 0; i
< 64; i
++) {
4510 a1
= (a1
<< 1) | (a0
>> 63);
4511 if (ab
|| a1
>= b
) {
4517 a0
= (a0
<< 1) | qb
;
4519 #if defined(DEBUG_MULDIV)
4520 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4521 *phigh
, *plow
, b
, a0
, a1
);
4529 /* return TRUE if overflow */
4530 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4533 sa
= ((int64_t)*phigh
< 0);
4535 neg128(plow
, phigh
);
4539 if (div64(plow
, phigh
, b
) != 0)
4542 if (*plow
> (1ULL << 63))
4546 if (*plow
>= (1ULL << 63))
4554 void helper_mulq_EAX_T0(target_ulong t0
)
4558 mulu64(&r0
, &r1
, EAX
, t0
);
4565 void helper_imulq_EAX_T0(target_ulong t0
)
4569 muls64(&r0
, &r1
, EAX
, t0
);
4573 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4576 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4580 muls64(&r0
, &r1
, t0
, t1
);
4582 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4586 void helper_divq_EAX(target_ulong t0
)
4590 raise_exception(EXCP00_DIVZ
);
4594 if (div64(&r0
, &r1
, t0
))
4595 raise_exception(EXCP00_DIVZ
);
4600 void helper_idivq_EAX(target_ulong t0
)
4604 raise_exception(EXCP00_DIVZ
);
4608 if (idiv64(&r0
, &r1
, t0
))
4609 raise_exception(EXCP00_DIVZ
);
4615 static void do_hlt(void)
4617 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4619 env
->exception_index
= EXCP_HLT
;
4623 void helper_hlt(int next_eip_addend
)
4625 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4626 EIP
+= next_eip_addend
;
4631 void helper_monitor(target_ulong ptr
)
4633 if ((uint32_t)ECX
!= 0)
4634 raise_exception(EXCP0D_GPF
);
4635 /* XXX: store address ? */
4636 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4639 void helper_mwait(int next_eip_addend
)
4641 if ((uint32_t)ECX
!= 0)
4642 raise_exception(EXCP0D_GPF
);
4643 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4644 EIP
+= next_eip_addend
;
4646 /* XXX: not complete but not completely erroneous */
4647 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4648 /* more than one CPU: do not sleep because another CPU may
4655 void helper_debug(void)
4657 env
->exception_index
= EXCP_DEBUG
;
4661 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4663 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4666 void helper_raise_exception(int exception_index
)
4668 raise_exception(exception_index
);
4671 void helper_cli(void)
4673 env
->eflags
&= ~IF_MASK
;
4676 void helper_sti(void)
4678 env
->eflags
|= IF_MASK
;
4682 /* vm86plus instructions */
4683 void helper_cli_vm(void)
4685 env
->eflags
&= ~VIF_MASK
;
4688 void helper_sti_vm(void)
4690 env
->eflags
|= VIF_MASK
;
4691 if (env
->eflags
& VIP_MASK
) {
4692 raise_exception(EXCP0D_GPF
);
4697 void helper_set_inhibit_irq(void)
4699 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4702 void helper_reset_inhibit_irq(void)
4704 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4707 void helper_boundw(target_ulong a0
, int v
)
4711 high
= ldsw(a0
+ 2);
4713 if (v
< low
|| v
> high
) {
4714 raise_exception(EXCP05_BOUND
);
4718 void helper_boundl(target_ulong a0
, int v
)
4723 if (v
< low
|| v
> high
) {
4724 raise_exception(EXCP05_BOUND
);
4728 static float approx_rsqrt(float a
)
4730 return 1.0 / sqrt(a
);
4733 static float approx_rcp(float a
)
4738 #if !defined(CONFIG_USER_ONLY)
4740 #define MMUSUFFIX _mmu
4743 #include "softmmu_template.h"
4746 #include "softmmu_template.h"
4749 #include "softmmu_template.h"
4752 #include "softmmu_template.h"
4756 #if !defined(CONFIG_USER_ONLY)
4757 /* try to fill the TLB and return an exception if error. If retaddr is
4758 NULL, it means that the function was called in C code (i.e. not
4759 from generated code or from helper.c) */
4760 /* XXX: fix it to restore all registers */
4761 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4763 TranslationBlock
*tb
;
4766 CPUX86State
*saved_env
;
4768 /* XXX: hack to restore env in all cases, even if not called from
4771 env
= cpu_single_env
;
4773 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4776 /* now we have a real cpu fault */
4777 pc
= (unsigned long)retaddr
;
4778 tb
= tb_find_pc(pc
);
4780 /* the PC is inside the translated code. It means that we have
4781 a virtual CPU fault */
4782 cpu_restore_state(tb
, env
, pc
, NULL
);
4785 raise_exception_err(env
->exception_index
, env
->error_code
);
4791 /* Secure Virtual Machine helpers */
4793 #if defined(CONFIG_USER_ONLY)
4795 void helper_vmrun(int aflag
, int next_eip_addend
)
4798 void helper_vmmcall(void)
4801 void helper_vmload(int aflag
)
4804 void helper_vmsave(int aflag
)
4807 void helper_stgi(void)
4810 void helper_clgi(void)
4813 void helper_skinit(void)
4816 void helper_invlpga(int aflag
)
4819 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4822 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4826 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4827 uint32_t next_eip_addend
)
4832 static inline void svm_save_seg(target_phys_addr_t addr
,
4833 const SegmentCache
*sc
)
4835 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4837 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4839 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4841 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4842 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4845 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4849 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4850 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4851 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4852 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4853 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4856 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4857 CPUState
*env
, int seg_reg
)
4859 SegmentCache sc1
, *sc
= &sc1
;
4860 svm_load_seg(addr
, sc
);
4861 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4862 sc
->base
, sc
->limit
, sc
->flags
);
4865 void helper_vmrun(int aflag
, int next_eip_addend
)
4871 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4876 addr
= (uint32_t)EAX
;
4878 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4880 env
->vm_vmcb
= addr
;
4882 /* save the current CPU state in the hsave page */
4883 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4884 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4886 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4887 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4889 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4890 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4891 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4892 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4893 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4894 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4896 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4897 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4899 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4901 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4903 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4905 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4908 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4909 EIP
+ next_eip_addend
);
4910 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4911 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4913 /* load the interception bitmaps so we do not need to access the
4915 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4916 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4917 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4918 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4919 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4920 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4922 /* enable intercepts */
4923 env
->hflags
|= HF_SVMI_MASK
;
4925 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4927 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4928 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4930 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4931 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4933 /* clear exit_info_2 so we behave like the real hardware */
4934 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4936 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4937 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4938 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4939 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4940 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4941 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4942 if (int_ctl
& V_INTR_MASKING_MASK
) {
4943 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4944 env
->hflags2
|= HF2_VINTR_MASK
;
4945 if (env
->eflags
& IF_MASK
)
4946 env
->hflags2
|= HF2_HIF_MASK
;
4950 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4952 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4953 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4954 CC_OP
= CC_OP_EFLAGS
;
4956 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4958 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4960 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4962 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4965 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4967 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4968 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4969 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4970 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4971 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4973 /* FIXME: guest state consistency checks */
4975 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4976 case TLB_CONTROL_DO_NOTHING
:
4978 case TLB_CONTROL_FLUSH_ALL_ASID
:
4979 /* FIXME: this is not 100% correct but should work for now */
4984 env
->hflags2
|= HF2_GIF_MASK
;
4986 if (int_ctl
& V_IRQ_MASK
) {
4987 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4990 /* maybe we need to inject an event */
4991 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4992 if (event_inj
& SVM_EVTINJ_VALID
) {
4993 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4994 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4995 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4996 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4998 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
4999 /* FIXME: need to implement valid_err */
5000 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5001 case SVM_EVTINJ_TYPE_INTR
:
5002 env
->exception_index
= vector
;
5003 env
->error_code
= event_inj_err
;
5004 env
->exception_is_int
= 0;
5005 env
->exception_next_eip
= -1;
5006 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5007 /* XXX: is it always correct ? */
5008 do_interrupt(vector
, 0, 0, 0, 1);
5010 case SVM_EVTINJ_TYPE_NMI
:
5011 env
->exception_index
= EXCP02_NMI
;
5012 env
->error_code
= event_inj_err
;
5013 env
->exception_is_int
= 0;
5014 env
->exception_next_eip
= EIP
;
5015 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5018 case SVM_EVTINJ_TYPE_EXEPT
:
5019 env
->exception_index
= vector
;
5020 env
->error_code
= event_inj_err
;
5021 env
->exception_is_int
= 0;
5022 env
->exception_next_eip
= -1;
5023 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5026 case SVM_EVTINJ_TYPE_SOFT
:
5027 env
->exception_index
= vector
;
5028 env
->error_code
= event_inj_err
;
5029 env
->exception_is_int
= 1;
5030 env
->exception_next_eip
= EIP
;
5031 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5035 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5039 void helper_vmmcall(void)
5041 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5042 raise_exception(EXCP06_ILLOP
);
5045 void helper_vmload(int aflag
)
5048 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5053 addr
= (uint32_t)EAX
;
5055 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5056 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5057 env
->segs
[R_FS
].base
);
5059 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5061 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5063 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5065 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5068 #ifdef TARGET_X86_64
5069 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5070 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5071 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5072 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5074 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5075 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5076 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5077 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5080 void helper_vmsave(int aflag
)
5083 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5088 addr
= (uint32_t)EAX
;
5090 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5091 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5092 env
->segs
[R_FS
].base
);
5094 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5096 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5098 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5100 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5103 #ifdef TARGET_X86_64
5104 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5105 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5106 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5107 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5109 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5110 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5111 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5112 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5115 void helper_stgi(void)
5117 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5118 env
->hflags2
|= HF2_GIF_MASK
;
5121 void helper_clgi(void)
5123 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5124 env
->hflags2
&= ~HF2_GIF_MASK
;
5127 void helper_skinit(void)
5129 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5130 /* XXX: not implemented */
5131 raise_exception(EXCP06_ILLOP
);
5134 void helper_invlpga(int aflag
)
5137 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5142 addr
= (uint32_t)EAX
;
5144 /* XXX: could use the ASID to see if it is needed to do the
5146 tlb_flush_page(env
, addr
);
5149 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5151 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5154 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5155 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5156 helper_vmexit(type
, param
);
5159 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5160 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5161 helper_vmexit(type
, param
);
5164 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5165 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5166 helper_vmexit(type
, param
);
5169 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5170 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5171 helper_vmexit(type
, param
);
5174 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5175 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5176 helper_vmexit(type
, param
);
5180 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5181 /* FIXME: this should be read in at vmrun (faster this way?) */
5182 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5184 switch((uint32_t)ECX
) {
5189 case 0xc0000000 ... 0xc0001fff:
5190 t0
= (8192 + ECX
- 0xc0000000) * 2;
5194 case 0xc0010000 ... 0xc0011fff:
5195 t0
= (16384 + ECX
- 0xc0010000) * 2;
5200 helper_vmexit(type
, param
);
5205 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5206 helper_vmexit(type
, param
);
5210 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5211 helper_vmexit(type
, param
);
5217 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5218 uint32_t next_eip_addend
)
5220 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5221 /* FIXME: this should be read in at vmrun (faster this way?) */
5222 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5223 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5224 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5226 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5227 env
->eip
+ next_eip_addend
);
5228 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5233 /* Note: currently only 32 bits of exit_code are used */
5234 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5238 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5239 exit_code
, exit_info_1
,
5240 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5243 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5244 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5245 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5247 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5250 /* Save the VM state in the vmcb */
5251 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5253 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5255 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5257 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5260 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5261 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5263 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5264 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5266 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5267 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5268 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5269 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5270 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5272 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5273 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5274 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5275 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5276 int_ctl
|= V_IRQ_MASK
;
5277 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5279 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5280 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5281 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5282 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5283 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5284 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5285 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5287 /* Reload the host state from vm_hsave */
5288 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5289 env
->hflags
&= ~HF_SVMI_MASK
;
5291 env
->intercept_exceptions
= 0;
5292 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5293 env
->tsc_offset
= 0;
5295 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5296 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5298 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5299 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5301 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5302 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5303 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5304 /* we need to set the efer after the crs so the hidden flags get
5307 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5309 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5310 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5311 CC_OP
= CC_OP_EFLAGS
;
5313 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5315 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5317 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5319 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5322 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5323 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5324 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5326 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5327 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5330 cpu_x86_set_cpl(env
, 0);
5331 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5332 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5334 env
->hflags2
&= ~HF2_GIF_MASK
;
5335 /* FIXME: Resets the current ASID register to zero (host ASID). */
5337 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5339 /* Clears the TSC_OFFSET inside the processor. */
5341 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5342 from the page table indicated the host's CR3. If the PDPEs contain
5343 illegal state, the processor causes a shutdown. */
5345 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5346 env
->cr
[0] |= CR0_PE_MASK
;
5347 env
->eflags
&= ~VM_MASK
;
5349 /* Disables all breakpoints in the host DR7 register. */
5351 /* Checks the reloaded host state for consistency. */
5353 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5354 host's code segment or non-canonical (in the case of long mode), a
5355 #GP fault is delivered inside the host.) */
5357 /* remove any pending exception */
5358 env
->exception_index
= -1;
5359 env
->error_code
= 0;
5360 env
->old_exception
= -1;
5368 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5369 void helper_enter_mmx(void)
5372 *(uint32_t *)(env
->fptags
) = 0;
5373 *(uint32_t *)(env
->fptags
+ 4) = 0;
5376 void helper_emms(void)
5378 /* set to empty state */
5379 *(uint32_t *)(env
->fptags
) = 0x01010101;
5380 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5384 void helper_movq(void *d
, void *s
)
5386 *(uint64_t *)d
= *(uint64_t *)s
;
5390 #include "ops_sse.h"
5393 #include "ops_sse.h"
5396 #include "helper_template.h"
5400 #include "helper_template.h"
5404 #include "helper_template.h"
5407 #ifdef TARGET_X86_64
5410 #include "helper_template.h"
5415 /* bit operations */
5416 target_ulong
helper_bsf(target_ulong t0
)
5423 while ((res
& 1) == 0) {
5430 target_ulong
helper_bsr(target_ulong t0
)
5433 target_ulong res
, mask
;
5436 count
= TARGET_LONG_BITS
- 1;
5437 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5438 while ((res
& mask
) == 0) {
5446 static int compute_all_eflags(void)
5451 static int compute_c_eflags(void)
5453 return CC_SRC
& CC_C
;
5456 uint32_t helper_cc_compute_all(int op
)
5459 default: /* should never happen */ return 0;
5461 case CC_OP_EFLAGS
: return compute_all_eflags();
5463 case CC_OP_MULB
: return compute_all_mulb();
5464 case CC_OP_MULW
: return compute_all_mulw();
5465 case CC_OP_MULL
: return compute_all_mull();
5467 case CC_OP_ADDB
: return compute_all_addb();
5468 case CC_OP_ADDW
: return compute_all_addw();
5469 case CC_OP_ADDL
: return compute_all_addl();
5471 case CC_OP_ADCB
: return compute_all_adcb();
5472 case CC_OP_ADCW
: return compute_all_adcw();
5473 case CC_OP_ADCL
: return compute_all_adcl();
5475 case CC_OP_SUBB
: return compute_all_subb();
5476 case CC_OP_SUBW
: return compute_all_subw();
5477 case CC_OP_SUBL
: return compute_all_subl();
5479 case CC_OP_SBBB
: return compute_all_sbbb();
5480 case CC_OP_SBBW
: return compute_all_sbbw();
5481 case CC_OP_SBBL
: return compute_all_sbbl();
5483 case CC_OP_LOGICB
: return compute_all_logicb();
5484 case CC_OP_LOGICW
: return compute_all_logicw();
5485 case CC_OP_LOGICL
: return compute_all_logicl();
5487 case CC_OP_INCB
: return compute_all_incb();
5488 case CC_OP_INCW
: return compute_all_incw();
5489 case CC_OP_INCL
: return compute_all_incl();
5491 case CC_OP_DECB
: return compute_all_decb();
5492 case CC_OP_DECW
: return compute_all_decw();
5493 case CC_OP_DECL
: return compute_all_decl();
5495 case CC_OP_SHLB
: return compute_all_shlb();
5496 case CC_OP_SHLW
: return compute_all_shlw();
5497 case CC_OP_SHLL
: return compute_all_shll();
5499 case CC_OP_SARB
: return compute_all_sarb();
5500 case CC_OP_SARW
: return compute_all_sarw();
5501 case CC_OP_SARL
: return compute_all_sarl();
5503 #ifdef TARGET_X86_64
5504 case CC_OP_MULQ
: return compute_all_mulq();
5506 case CC_OP_ADDQ
: return compute_all_addq();
5508 case CC_OP_ADCQ
: return compute_all_adcq();
5510 case CC_OP_SUBQ
: return compute_all_subq();
5512 case CC_OP_SBBQ
: return compute_all_sbbq();
5514 case CC_OP_LOGICQ
: return compute_all_logicq();
5516 case CC_OP_INCQ
: return compute_all_incq();
5518 case CC_OP_DECQ
: return compute_all_decq();
5520 case CC_OP_SHLQ
: return compute_all_shlq();
5522 case CC_OP_SARQ
: return compute_all_sarq();
5527 uint32_t helper_cc_compute_c(int op
)
5530 default: /* should never happen */ return 0;
5532 case CC_OP_EFLAGS
: return compute_c_eflags();
5534 case CC_OP_MULB
: return compute_c_mull();
5535 case CC_OP_MULW
: return compute_c_mull();
5536 case CC_OP_MULL
: return compute_c_mull();
5538 case CC_OP_ADDB
: return compute_c_addb();
5539 case CC_OP_ADDW
: return compute_c_addw();
5540 case CC_OP_ADDL
: return compute_c_addl();
5542 case CC_OP_ADCB
: return compute_c_adcb();
5543 case CC_OP_ADCW
: return compute_c_adcw();
5544 case CC_OP_ADCL
: return compute_c_adcl();
5546 case CC_OP_SUBB
: return compute_c_subb();
5547 case CC_OP_SUBW
: return compute_c_subw();
5548 case CC_OP_SUBL
: return compute_c_subl();
5550 case CC_OP_SBBB
: return compute_c_sbbb();
5551 case CC_OP_SBBW
: return compute_c_sbbw();
5552 case CC_OP_SBBL
: return compute_c_sbbl();
5554 case CC_OP_LOGICB
: return compute_c_logicb();
5555 case CC_OP_LOGICW
: return compute_c_logicw();
5556 case CC_OP_LOGICL
: return compute_c_logicl();
5558 case CC_OP_INCB
: return compute_c_incl();
5559 case CC_OP_INCW
: return compute_c_incl();
5560 case CC_OP_INCL
: return compute_c_incl();
5562 case CC_OP_DECB
: return compute_c_incl();
5563 case CC_OP_DECW
: return compute_c_incl();
5564 case CC_OP_DECL
: return compute_c_incl();
5566 case CC_OP_SHLB
: return compute_c_shlb();
5567 case CC_OP_SHLW
: return compute_c_shlw();
5568 case CC_OP_SHLL
: return compute_c_shll();
5570 case CC_OP_SARB
: return compute_c_sarl();
5571 case CC_OP_SARW
: return compute_c_sarl();
5572 case CC_OP_SARL
: return compute_c_sarl();
5574 #ifdef TARGET_X86_64
5575 case CC_OP_MULQ
: return compute_c_mull();
5577 case CC_OP_ADDQ
: return compute_c_addq();
5579 case CC_OP_ADCQ
: return compute_c_adcq();
5581 case CC_OP_SUBQ
: return compute_c_subq();
5583 case CC_OP_SBBQ
: return compute_c_sbbq();
5585 case CC_OP_LOGICQ
: return compute_c_logicq();
5587 case CC_OP_INCQ
: return compute_c_incl();
5589 case CC_OP_DECQ
: return compute_c_incl();
5591 case CC_OP_SHLQ
: return compute_c_shlq();
5593 case CC_OP_SARQ
: return compute_c_sarl();