4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "host-utils.h"
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(env) do { } while (0)
40 #define raise_exception_err(a, b)\
42 qemu_log("raise_exception line=%d\n", __LINE__);\
43 (raise_exception_err)(a, b);\
47 static const uint8_t parity_table
[256] = {
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
79 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
83 static const uint8_t rclw_table
[32] = {
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
91 static const uint8_t rclb_table
[32] = {
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
98 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
102 /* broken thread support */
104 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
106 void helper_lock(void)
108 spin_lock(&global_cpu_lock
);
111 void helper_unlock(void)
113 spin_unlock(&global_cpu_lock
);
116 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
118 load_eflags(t0
, update_mask
);
121 target_ulong
helper_read_eflags(void)
124 eflags
= helper_cc_compute_all(CC_OP
);
125 eflags
|= (DF
& DF_MASK
);
126 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
130 /* return non zero if error */
131 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
142 index
= selector
& ~7;
143 if ((index
+ 7) > dt
->limit
)
145 ptr
= dt
->base
+ index
;
146 *e1_ptr
= ldl_kernel(ptr
);
147 *e2_ptr
= ldl_kernel(ptr
+ 4);
151 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
154 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
155 if (e2
& DESC_G_MASK
)
156 limit
= (limit
<< 12) | 0xfff;
160 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
162 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
165 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
167 sc
->base
= get_seg_base(e1
, e2
);
168 sc
->limit
= get_seg_limit(e1
, e2
);
172 /* init the segment cache in vm86 mode. */
173 static inline void load_seg_vm(int seg
, int selector
)
176 cpu_x86_load_seg_cache(env
, seg
, selector
,
177 (selector
<< 4), 0xffff, 0);
180 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
181 uint32_t *esp_ptr
, int dpl
)
183 int type
, index
, shift
;
188 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
189 for(i
=0;i
<env
->tr
.limit
;i
++) {
190 printf("%02x ", env
->tr
.base
[i
]);
191 if ((i
& 7) == 7) printf("\n");
197 if (!(env
->tr
.flags
& DESC_P_MASK
))
198 cpu_abort(env
, "invalid tss");
199 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
201 cpu_abort(env
, "invalid tss type");
203 index
= (dpl
* 4 + 2) << shift
;
204 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
205 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
207 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
208 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
210 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
211 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
215 /* XXX: merge with load_seg() */
216 static void tss_load_seg(int seg_reg
, int selector
)
221 if ((selector
& 0xfffc) != 0) {
222 if (load_segment(&e1
, &e2
, selector
) != 0)
223 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
224 if (!(e2
& DESC_S_MASK
))
225 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
228 cpl
= env
->hflags
& HF_CPL_MASK
;
229 if (seg_reg
== R_CS
) {
230 if (!(e2
& DESC_CS_MASK
))
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 /* XXX: is it correct ? */
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
235 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
237 } else if (seg_reg
== R_SS
) {
238 /* SS must be writable data */
239 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
241 if (dpl
!= cpl
|| dpl
!= rpl
)
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
244 /* not readable code */
245 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
246 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
247 /* if data or non conforming code, checks the rights */
248 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
249 if (dpl
< cpl
|| dpl
< rpl
)
250 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 if (!(e2
& DESC_P_MASK
))
254 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
255 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
256 get_seg_base(e1
, e2
),
257 get_seg_limit(e1
, e2
),
260 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
261 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
265 #define SWITCH_TSS_JMP 0
266 #define SWITCH_TSS_IRET 1
267 #define SWITCH_TSS_CALL 2
269 /* XXX: restore CPU state in registers (PowerPC case) */
270 static void switch_tss(int tss_selector
,
271 uint32_t e1
, uint32_t e2
, int source
,
274 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
275 target_ulong tss_base
;
276 uint32_t new_regs
[8], new_segs
[6];
277 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
278 uint32_t old_eflags
, eflags_mask
;
283 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
284 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
286 /* if task gate, we read the TSS segment and we load it */
288 if (!(e2
& DESC_P_MASK
))
289 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
290 tss_selector
= e1
>> 16;
291 if (tss_selector
& 4)
292 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
293 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
294 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
295 if (e2
& DESC_S_MASK
)
296 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
297 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
299 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
302 if (!(e2
& DESC_P_MASK
))
303 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
309 tss_limit
= get_seg_limit(e1
, e2
);
310 tss_base
= get_seg_base(e1
, e2
);
311 if ((tss_selector
& 4) != 0 ||
312 tss_limit
< tss_limit_max
)
313 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
314 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
316 old_tss_limit_max
= 103;
318 old_tss_limit_max
= 43;
320 /* read all the registers from the new TSS */
323 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
324 new_eip
= ldl_kernel(tss_base
+ 0x20);
325 new_eflags
= ldl_kernel(tss_base
+ 0x24);
326 for(i
= 0; i
< 8; i
++)
327 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
328 for(i
= 0; i
< 6; i
++)
329 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
330 new_ldt
= lduw_kernel(tss_base
+ 0x60);
331 new_trap
= ldl_kernel(tss_base
+ 0x64);
335 new_eip
= lduw_kernel(tss_base
+ 0x0e);
336 new_eflags
= lduw_kernel(tss_base
+ 0x10);
337 for(i
= 0; i
< 8; i
++)
338 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
339 for(i
= 0; i
< 4; i
++)
340 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
341 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
346 /* XXX: avoid a compiler warning, see
347 http://support.amd.com/us/Processor_TechDocs/24593.pdf
348 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
351 /* NOTE: we must avoid memory exceptions during the task switch,
352 so we make dummy accesses before */
353 /* XXX: it can still fail in some cases, so a bigger hack is
354 necessary to valid the TLB after having done the accesses */
356 v1
= ldub_kernel(env
->tr
.base
);
357 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
358 stb_kernel(env
->tr
.base
, v1
);
359 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
361 /* clear busy bit (it is restartable) */
362 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
365 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
366 e2
= ldl_kernel(ptr
+ 4);
367 e2
&= ~DESC_TSS_BUSY_MASK
;
368 stl_kernel(ptr
+ 4, e2
);
370 old_eflags
= compute_eflags();
371 if (source
== SWITCH_TSS_IRET
)
372 old_eflags
&= ~NT_MASK
;
374 /* save the current state in the old TSS */
377 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
378 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
387 for(i
= 0; i
< 6; i
++)
388 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
391 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
392 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
401 for(i
= 0; i
< 4; i
++)
402 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
405 /* now if an exception occurs, it will occurs in the next task
408 if (source
== SWITCH_TSS_CALL
) {
409 stw_kernel(tss_base
, env
->tr
.selector
);
410 new_eflags
|= NT_MASK
;
414 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
417 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
418 e2
= ldl_kernel(ptr
+ 4);
419 e2
|= DESC_TSS_BUSY_MASK
;
420 stl_kernel(ptr
+ 4, e2
);
423 /* set the new CPU state */
424 /* from this point, any exception which occurs can give problems */
425 env
->cr
[0] |= CR0_TS_MASK
;
426 env
->hflags
|= HF_TS_MASK
;
427 env
->tr
.selector
= tss_selector
;
428 env
->tr
.base
= tss_base
;
429 env
->tr
.limit
= tss_limit
;
430 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
432 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
433 cpu_x86_update_cr3(env
, new_cr3
);
436 /* load all registers without an exception, then reload them with
437 possible exception */
439 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
440 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
442 eflags_mask
&= 0xffff;
443 load_eflags(new_eflags
, eflags_mask
);
444 /* XXX: what to do in 16 bit case ? */
453 if (new_eflags
& VM_MASK
) {
454 for(i
= 0; i
< 6; i
++)
455 load_seg_vm(i
, new_segs
[i
]);
456 /* in vm86, CPL is always 3 */
457 cpu_x86_set_cpl(env
, 3);
459 /* CPL is set the RPL of CS */
460 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
461 /* first just selectors as the rest may trigger exceptions */
462 for(i
= 0; i
< 6; i
++)
463 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
466 env
->ldt
.selector
= new_ldt
& ~4;
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
475 if ((new_ldt
& 0xfffc) != 0) {
477 index
= new_ldt
& ~7;
478 if ((index
+ 7) > dt
->limit
)
479 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
480 ptr
= dt
->base
+ index
;
481 e1
= ldl_kernel(ptr
);
482 e2
= ldl_kernel(ptr
+ 4);
483 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
484 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
485 if (!(e2
& DESC_P_MASK
))
486 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
487 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
490 /* load the segments */
491 if (!(new_eflags
& VM_MASK
)) {
492 tss_load_seg(R_CS
, new_segs
[R_CS
]);
493 tss_load_seg(R_SS
, new_segs
[R_SS
]);
494 tss_load_seg(R_ES
, new_segs
[R_ES
]);
495 tss_load_seg(R_DS
, new_segs
[R_DS
]);
496 tss_load_seg(R_FS
, new_segs
[R_FS
]);
497 tss_load_seg(R_GS
, new_segs
[R_GS
]);
500 /* check that EIP is in the CS segment limits */
501 if (new_eip
> env
->segs
[R_CS
].limit
) {
502 /* XXX: different exception if CALL ? */
503 raise_exception_err(EXCP0D_GPF
, 0);
506 #ifndef CONFIG_USER_ONLY
507 /* reset local breakpoints */
508 if (env
->dr
[7] & 0x55) {
509 for (i
= 0; i
< 4; i
++) {
510 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
511 hw_breakpoint_remove(env
, i
);
518 /* check if Port I/O is allowed in TSS */
519 static inline void check_io(int addr
, int size
)
521 int io_offset
, val
, mask
;
523 /* TSS must be a valid 32 bit one */
524 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
525 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
528 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
529 io_offset
+= (addr
>> 3);
530 /* Note: the check needs two bytes */
531 if ((io_offset
+ 1) > env
->tr
.limit
)
533 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
535 mask
= (1 << size
) - 1;
536 /* all bits must be zero to allow the I/O */
537 if ((val
& mask
) != 0) {
539 raise_exception_err(EXCP0D_GPF
, 0);
543 void helper_check_iob(uint32_t t0
)
548 void helper_check_iow(uint32_t t0
)
553 void helper_check_iol(uint32_t t0
)
558 void helper_outb(uint32_t port
, uint32_t data
)
560 cpu_outb(port
, data
& 0xff);
563 target_ulong
helper_inb(uint32_t port
)
565 return cpu_inb(port
);
568 void helper_outw(uint32_t port
, uint32_t data
)
570 cpu_outw(port
, data
& 0xffff);
573 target_ulong
helper_inw(uint32_t port
)
575 return cpu_inw(port
);
578 void helper_outl(uint32_t port
, uint32_t data
)
580 cpu_outl(port
, data
);
583 target_ulong
helper_inl(uint32_t port
)
585 return cpu_inl(port
);
588 static inline unsigned int get_sp_mask(unsigned int e2
)
590 if (e2
& DESC_B_MASK
)
596 static int exeption_has_error_code(int intno
)
612 #define SET_ESP(val, sp_mask)\
614 if ((sp_mask) == 0xffff)\
615 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
616 else if ((sp_mask) == 0xffffffffLL)\
617 ESP = (uint32_t)(val);\
622 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
625 /* in 64-bit machines, this can overflow. So this segment addition macro
626 * can be used to trim the value to 32-bit whenever needed */
627 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
629 /* XXX: add a is_user flag to have proper security support */
630 #define PUSHW(ssp, sp, sp_mask, val)\
633 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
636 #define PUSHL(ssp, sp, sp_mask, val)\
639 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
642 #define POPW(ssp, sp, sp_mask, val)\
644 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
648 #define POPL(ssp, sp, sp_mask, val)\
650 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
654 /* protected mode interrupt */
655 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
656 unsigned int next_eip
, int is_hw
)
659 target_ulong ptr
, ssp
;
660 int type
, dpl
, selector
, ss_dpl
, cpl
;
661 int has_error_code
, new_stack
, shift
;
662 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
663 uint32_t old_eip
, sp_mask
;
666 if (!is_int
&& !is_hw
)
667 has_error_code
= exeption_has_error_code(intno
);
674 if (intno
* 8 + 7 > dt
->limit
)
675 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
676 ptr
= dt
->base
+ intno
* 8;
677 e1
= ldl_kernel(ptr
);
678 e2
= ldl_kernel(ptr
+ 4);
679 /* check gate type */
680 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
682 case 5: /* task gate */
683 /* must do that check here to return the correct error code */
684 if (!(e2
& DESC_P_MASK
))
685 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
686 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
687 if (has_error_code
) {
690 /* push the error code */
691 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
693 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
697 esp
= (ESP
- (2 << shift
)) & mask
;
698 ssp
= env
->segs
[R_SS
].base
+ esp
;
700 stl_kernel(ssp
, error_code
);
702 stw_kernel(ssp
, error_code
);
706 case 6: /* 286 interrupt gate */
707 case 7: /* 286 trap gate */
708 case 14: /* 386 interrupt gate */
709 case 15: /* 386 trap gate */
712 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
715 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
716 cpl
= env
->hflags
& HF_CPL_MASK
;
717 /* check privilege if software int */
718 if (is_int
&& dpl
< cpl
)
719 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
720 /* check valid bit */
721 if (!(e2
& DESC_P_MASK
))
722 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
724 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
725 if ((selector
& 0xfffc) == 0)
726 raise_exception_err(EXCP0D_GPF
, 0);
728 if (load_segment(&e1
, &e2
, selector
) != 0)
729 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
730 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
731 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
732 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
734 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
735 if (!(e2
& DESC_P_MASK
))
736 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
737 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
738 /* to inner privilege */
739 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
740 if ((ss
& 0xfffc) == 0)
741 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
743 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
745 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
746 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
748 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
749 if (!(ss_e2
& DESC_S_MASK
) ||
750 (ss_e2
& DESC_CS_MASK
) ||
751 !(ss_e2
& DESC_W_MASK
))
752 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
753 if (!(ss_e2
& DESC_P_MASK
))
754 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
756 sp_mask
= get_sp_mask(ss_e2
);
757 ssp
= get_seg_base(ss_e1
, ss_e2
);
758 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
759 /* to same privilege */
760 if (env
->eflags
& VM_MASK
)
761 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
763 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
764 ssp
= env
->segs
[R_SS
].base
;
768 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
769 new_stack
= 0; /* avoid warning */
770 sp_mask
= 0; /* avoid warning */
771 ssp
= 0; /* avoid warning */
772 esp
= 0; /* avoid warning */
778 /* XXX: check that enough room is available */
779 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
780 if (env
->eflags
& VM_MASK
)
786 if (env
->eflags
& VM_MASK
) {
787 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
792 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
793 PUSHL(ssp
, esp
, sp_mask
, ESP
);
795 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
796 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
797 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
798 if (has_error_code
) {
799 PUSHL(ssp
, esp
, sp_mask
, error_code
);
803 if (env
->eflags
& VM_MASK
) {
804 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
809 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
810 PUSHW(ssp
, esp
, sp_mask
, ESP
);
812 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
813 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
814 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
815 if (has_error_code
) {
816 PUSHW(ssp
, esp
, sp_mask
, error_code
);
821 if (env
->eflags
& VM_MASK
) {
822 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
827 ss
= (ss
& ~3) | dpl
;
828 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
829 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
831 SET_ESP(esp
, sp_mask
);
833 selector
= (selector
& ~3) | dpl
;
834 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
835 get_seg_base(e1
, e2
),
836 get_seg_limit(e1
, e2
),
838 cpu_x86_set_cpl(env
, dpl
);
841 /* interrupt gate clear IF mask */
842 if ((type
& 1) == 0) {
843 env
->eflags
&= ~IF_MASK
;
845 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
850 #define PUSHQ(sp, val)\
853 stq_kernel(sp, (val));\
856 #define POPQ(sp, val)\
858 val = ldq_kernel(sp);\
862 static inline target_ulong
get_rsp_from_tss(int level
)
867 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
868 env
->tr
.base
, env
->tr
.limit
);
871 if (!(env
->tr
.flags
& DESC_P_MASK
))
872 cpu_abort(env
, "invalid tss");
873 index
= 8 * level
+ 4;
874 if ((index
+ 7) > env
->tr
.limit
)
875 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
876 return ldq_kernel(env
->tr
.base
+ index
);
879 /* 64 bit interrupt */
880 static void do_interrupt64(int intno
, int is_int
, int error_code
,
881 target_ulong next_eip
, int is_hw
)
885 int type
, dpl
, selector
, cpl
, ist
;
886 int has_error_code
, new_stack
;
887 uint32_t e1
, e2
, e3
, ss
;
888 target_ulong old_eip
, esp
, offset
;
891 if (!is_int
&& !is_hw
)
892 has_error_code
= exeption_has_error_code(intno
);
899 if (intno
* 16 + 15 > dt
->limit
)
900 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
901 ptr
= dt
->base
+ intno
* 16;
902 e1
= ldl_kernel(ptr
);
903 e2
= ldl_kernel(ptr
+ 4);
904 e3
= ldl_kernel(ptr
+ 8);
905 /* check gate type */
906 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
908 case 14: /* 386 interrupt gate */
909 case 15: /* 386 trap gate */
912 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
915 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
916 cpl
= env
->hflags
& HF_CPL_MASK
;
917 /* check privilege if software int */
918 if (is_int
&& dpl
< cpl
)
919 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
920 /* check valid bit */
921 if (!(e2
& DESC_P_MASK
))
922 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
924 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
926 if ((selector
& 0xfffc) == 0)
927 raise_exception_err(EXCP0D_GPF
, 0);
929 if (load_segment(&e1
, &e2
, selector
) != 0)
930 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
931 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
932 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
933 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
935 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
936 if (!(e2
& DESC_P_MASK
))
937 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
938 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
939 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
940 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
941 /* to inner privilege */
943 esp
= get_rsp_from_tss(ist
+ 3);
945 esp
= get_rsp_from_tss(dpl
);
946 esp
&= ~0xfLL
; /* align stack */
949 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
950 /* to same privilege */
951 if (env
->eflags
& VM_MASK
)
952 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
955 esp
= get_rsp_from_tss(ist
+ 3);
958 esp
&= ~0xfLL
; /* align stack */
961 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
962 new_stack
= 0; /* avoid warning */
963 esp
= 0; /* avoid warning */
966 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
968 PUSHQ(esp
, compute_eflags());
969 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
971 if (has_error_code
) {
972 PUSHQ(esp
, error_code
);
977 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
981 selector
= (selector
& ~3) | dpl
;
982 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
983 get_seg_base(e1
, e2
),
984 get_seg_limit(e1
, e2
),
986 cpu_x86_set_cpl(env
, dpl
);
989 /* interrupt gate clear IF mask */
990 if ((type
& 1) == 0) {
991 env
->eflags
&= ~IF_MASK
;
993 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
998 #if defined(CONFIG_USER_ONLY)
999 void helper_syscall(int next_eip_addend
)
1001 env
->exception_index
= EXCP_SYSCALL
;
1002 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1006 void helper_syscall(int next_eip_addend
)
1010 if (!(env
->efer
& MSR_EFER_SCE
)) {
1011 raise_exception_err(EXCP06_ILLOP
, 0);
1013 selector
= (env
->star
>> 32) & 0xffff;
1014 if (env
->hflags
& HF_LMA_MASK
) {
1017 ECX
= env
->eip
+ next_eip_addend
;
1018 env
->regs
[11] = compute_eflags();
1020 code64
= env
->hflags
& HF_CS64_MASK
;
1022 cpu_x86_set_cpl(env
, 0);
1023 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1025 DESC_G_MASK
| DESC_P_MASK
|
1027 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1028 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1030 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1032 DESC_W_MASK
| DESC_A_MASK
);
1033 env
->eflags
&= ~env
->fmask
;
1034 load_eflags(env
->eflags
, 0);
1036 env
->eip
= env
->lstar
;
1038 env
->eip
= env
->cstar
;
1040 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1042 cpu_x86_set_cpl(env
, 0);
1043 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1045 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1047 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1048 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1050 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1052 DESC_W_MASK
| DESC_A_MASK
);
1053 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1054 env
->eip
= (uint32_t)env
->star
;
1060 #ifdef TARGET_X86_64
1061 void helper_sysret(int dflag
)
1065 if (!(env
->efer
& MSR_EFER_SCE
)) {
1066 raise_exception_err(EXCP06_ILLOP
, 0);
1068 cpl
= env
->hflags
& HF_CPL_MASK
;
1069 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1070 raise_exception_err(EXCP0D_GPF
, 0);
1072 selector
= (env
->star
>> 48) & 0xffff;
1073 if (env
->hflags
& HF_LMA_MASK
) {
1075 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1077 DESC_G_MASK
| DESC_P_MASK
|
1078 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1079 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1083 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1085 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1086 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1087 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1088 env
->eip
= (uint32_t)ECX
;
1090 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1092 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1093 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1094 DESC_W_MASK
| DESC_A_MASK
);
1095 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1096 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1097 cpu_x86_set_cpl(env
, 3);
1099 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1101 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1102 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1103 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1104 env
->eip
= (uint32_t)ECX
;
1105 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1107 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1108 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1109 DESC_W_MASK
| DESC_A_MASK
);
1110 env
->eflags
|= IF_MASK
;
1111 cpu_x86_set_cpl(env
, 3);
1116 /* real mode interrupt */
1117 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1118 unsigned int next_eip
)
1121 target_ulong ptr
, ssp
;
1123 uint32_t offset
, esp
;
1124 uint32_t old_cs
, old_eip
;
1126 /* real mode (simpler !) */
1128 if (intno
* 4 + 3 > dt
->limit
)
1129 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1130 ptr
= dt
->base
+ intno
* 4;
1131 offset
= lduw_kernel(ptr
);
1132 selector
= lduw_kernel(ptr
+ 2);
1134 ssp
= env
->segs
[R_SS
].base
;
1139 old_cs
= env
->segs
[R_CS
].selector
;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1142 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1143 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1145 /* update processor state */
1146 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1148 env
->segs
[R_CS
].selector
= selector
;
1149 env
->segs
[R_CS
].base
= (selector
<< 4);
1150 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1153 /* fake user mode interrupt */
1154 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1155 target_ulong next_eip
)
1159 int dpl
, cpl
, shift
;
1163 if (env
->hflags
& HF_LMA_MASK
) {
1168 ptr
= dt
->base
+ (intno
<< shift
);
1169 e2
= ldl_kernel(ptr
+ 4);
1171 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1172 cpl
= env
->hflags
& HF_CPL_MASK
;
1173 /* check privilege if software int */
1174 if (is_int
&& dpl
< cpl
)
1175 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1184 #if !defined(CONFIG_USER_ONLY)
1185 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1188 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1189 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1192 type
= SVM_EVTINJ_TYPE_SOFT
;
1194 type
= SVM_EVTINJ_TYPE_EXEPT
;
1195 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1196 if (!rm
&& exeption_has_error_code(intno
)) {
1197 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1198 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1200 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1206 * Begin execution of an interruption. is_int is TRUE if coming from
1207 * the int instruction. next_eip is the EIP value AFTER the interrupt
1208 * instruction. It is only relevant if is_int is TRUE.
1210 void do_interrupt(int intno
, int is_int
, int error_code
,
1211 target_ulong next_eip
, int is_hw
)
1213 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1214 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1217 count
, intno
, error_code
, is_int
,
1218 env
->hflags
& HF_CPL_MASK
,
1219 env
->segs
[R_CS
].selector
, EIP
,
1220 (int)env
->segs
[R_CS
].base
+ EIP
,
1221 env
->segs
[R_SS
].selector
, ESP
);
1222 if (intno
== 0x0e) {
1223 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1225 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1228 log_cpu_state(env
, X86_DUMP_CCOP
);
1234 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1235 for(i
= 0; i
< 16; i
++) {
1236 qemu_log(" %02x", ldub(ptr
+ i
));
1244 if (env
->cr
[0] & CR0_PE_MASK
) {
1245 #if !defined(CONFIG_USER_ONLY)
1246 if (env
->hflags
& HF_SVMI_MASK
)
1247 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1249 #ifdef TARGET_X86_64
1250 if (env
->hflags
& HF_LMA_MASK
) {
1251 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1255 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1258 #if !defined(CONFIG_USER_ONLY)
1259 if (env
->hflags
& HF_SVMI_MASK
)
1260 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1262 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1265 #if !defined(CONFIG_USER_ONLY)
1266 if (env
->hflags
& HF_SVMI_MASK
) {
1267 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1268 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1273 /* This should come from sysemu.h - if we could include it here... */
1274 void qemu_system_reset_request(void);
1277 * Check nested exceptions and change to double or triple fault if
1278 * needed. It should only be called, if this is not an interrupt.
1279 * Returns the new exception number.
1281 static int check_exception(int intno
, int *error_code
)
1283 int first_contributory
= env
->old_exception
== 0 ||
1284 (env
->old_exception
>= 10 &&
1285 env
->old_exception
<= 13);
1286 int second_contributory
= intno
== 0 ||
1287 (intno
>= 10 && intno
<= 13);
1289 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1290 env
->old_exception
, intno
);
1292 #if !defined(CONFIG_USER_ONLY)
1293 if (env
->old_exception
== EXCP08_DBLE
) {
1294 if (env
->hflags
& HF_SVMI_MASK
)
1295 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1297 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1299 qemu_system_reset_request();
1304 if ((first_contributory
&& second_contributory
)
1305 || (env
->old_exception
== EXCP0E_PAGE
&&
1306 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1307 intno
= EXCP08_DBLE
;
1311 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1312 (intno
== EXCP08_DBLE
))
1313 env
->old_exception
= intno
;
1319 * Signal an interruption. It is executed in the main CPU loop.
1320 * is_int is TRUE if coming from the int instruction. next_eip is the
1321 * EIP value AFTER the interrupt instruction. It is only relevant if
1324 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1325 int next_eip_addend
)
1328 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1329 intno
= check_exception(intno
, &error_code
);
1331 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1334 env
->exception_index
= intno
;
1335 env
->error_code
= error_code
;
1336 env
->exception_is_int
= is_int
;
1337 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1341 /* shortcuts to generate exceptions */
1343 void raise_exception_err(int exception_index
, int error_code
)
1345 raise_interrupt(exception_index
, 0, error_code
, 0);
1348 void raise_exception(int exception_index
)
1350 raise_interrupt(exception_index
, 0, 0, 0);
1353 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1356 raise_exception(exception_index
);
1360 #if defined(CONFIG_USER_ONLY)
1362 void do_smm_enter(void)
1366 void helper_rsm(void)
1372 #ifdef TARGET_X86_64
1373 #define SMM_REVISION_ID 0x00020064
1375 #define SMM_REVISION_ID 0x00020000
1378 void do_smm_enter(void)
1380 target_ulong sm_state
;
1384 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1385 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1387 env
->hflags
|= HF_SMM_MASK
;
1388 cpu_smm_update(env
);
1390 sm_state
= env
->smbase
+ 0x8000;
1392 #ifdef TARGET_X86_64
1393 for(i
= 0; i
< 6; i
++) {
1395 offset
= 0x7e00 + i
* 16;
1396 stw_phys(sm_state
+ offset
, dt
->selector
);
1397 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1398 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1399 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1402 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1403 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1405 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1406 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1407 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1408 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1410 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1411 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1413 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1414 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1415 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1416 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1418 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1420 stq_phys(sm_state
+ 0x7ff8, EAX
);
1421 stq_phys(sm_state
+ 0x7ff0, ECX
);
1422 stq_phys(sm_state
+ 0x7fe8, EDX
);
1423 stq_phys(sm_state
+ 0x7fe0, EBX
);
1424 stq_phys(sm_state
+ 0x7fd8, ESP
);
1425 stq_phys(sm_state
+ 0x7fd0, EBP
);
1426 stq_phys(sm_state
+ 0x7fc8, ESI
);
1427 stq_phys(sm_state
+ 0x7fc0, EDI
);
1428 for(i
= 8; i
< 16; i
++)
1429 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1430 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1431 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1432 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1433 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1435 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1436 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1437 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1439 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1440 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1442 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1443 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1444 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1445 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1446 stl_phys(sm_state
+ 0x7fec, EDI
);
1447 stl_phys(sm_state
+ 0x7fe8, ESI
);
1448 stl_phys(sm_state
+ 0x7fe4, EBP
);
1449 stl_phys(sm_state
+ 0x7fe0, ESP
);
1450 stl_phys(sm_state
+ 0x7fdc, EBX
);
1451 stl_phys(sm_state
+ 0x7fd8, EDX
);
1452 stl_phys(sm_state
+ 0x7fd4, ECX
);
1453 stl_phys(sm_state
+ 0x7fd0, EAX
);
1454 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1455 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1457 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1458 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1459 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1460 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1462 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1463 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1464 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1465 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1467 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1468 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1470 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1471 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1473 for(i
= 0; i
< 6; i
++) {
1476 offset
= 0x7f84 + i
* 12;
1478 offset
= 0x7f2c + (i
- 3) * 12;
1479 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1480 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1481 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1482 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1484 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1486 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1487 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1489 /* init SMM cpu state */
1491 #ifdef TARGET_X86_64
1492 cpu_load_efer(env
, 0);
1494 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1495 env
->eip
= 0x00008000;
1496 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1498 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1499 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1501 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1502 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1504 cpu_x86_update_cr0(env
,
1505 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1506 cpu_x86_update_cr4(env
, 0);
1507 env
->dr
[7] = 0x00000400;
1508 CC_OP
= CC_OP_EFLAGS
;
1511 void helper_rsm(void)
1513 target_ulong sm_state
;
1517 sm_state
= env
->smbase
+ 0x8000;
1518 #ifdef TARGET_X86_64
1519 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1521 for(i
= 0; i
< 6; i
++) {
1522 offset
= 0x7e00 + i
* 16;
1523 cpu_x86_load_seg_cache(env
, i
,
1524 lduw_phys(sm_state
+ offset
),
1525 ldq_phys(sm_state
+ offset
+ 8),
1526 ldl_phys(sm_state
+ offset
+ 4),
1527 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1530 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1531 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1533 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1534 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1535 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1536 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1538 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1539 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1541 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1542 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1543 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1544 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1546 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1547 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1548 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1549 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1550 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1551 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1552 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1553 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1554 for(i
= 8; i
< 16; i
++)
1555 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1556 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1557 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1558 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1559 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1560 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1562 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1563 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1564 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1566 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1567 if (val
& 0x20000) {
1568 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1571 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1572 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1573 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1574 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1575 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1576 EDI
= ldl_phys(sm_state
+ 0x7fec);
1577 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1578 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1579 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1580 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1581 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1582 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1583 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1584 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1585 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1587 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1588 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1589 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1590 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1592 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1593 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1594 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1595 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1597 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1598 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1600 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1601 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1603 for(i
= 0; i
< 6; i
++) {
1605 offset
= 0x7f84 + i
* 12;
1607 offset
= 0x7f2c + (i
- 3) * 12;
1608 cpu_x86_load_seg_cache(env
, i
,
1609 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1610 ldl_phys(sm_state
+ offset
+ 8),
1611 ldl_phys(sm_state
+ offset
+ 4),
1612 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1614 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1616 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1617 if (val
& 0x20000) {
1618 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1621 CC_OP
= CC_OP_EFLAGS
;
1622 env
->hflags
&= ~HF_SMM_MASK
;
1623 cpu_smm_update(env
);
1625 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1626 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1629 #endif /* !CONFIG_USER_ONLY */
1632 /* division, flags are undefined */
1634 void helper_divb_AL(target_ulong t0
)
1636 unsigned int num
, den
, q
, r
;
1638 num
= (EAX
& 0xffff);
1641 raise_exception(EXCP00_DIVZ
);
1645 raise_exception(EXCP00_DIVZ
);
1647 r
= (num
% den
) & 0xff;
1648 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1651 void helper_idivb_AL(target_ulong t0
)
1658 raise_exception(EXCP00_DIVZ
);
1662 raise_exception(EXCP00_DIVZ
);
1664 r
= (num
% den
) & 0xff;
1665 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1668 void helper_divw_AX(target_ulong t0
)
1670 unsigned int num
, den
, q
, r
;
1672 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1673 den
= (t0
& 0xffff);
1675 raise_exception(EXCP00_DIVZ
);
1679 raise_exception(EXCP00_DIVZ
);
1681 r
= (num
% den
) & 0xffff;
1682 EAX
= (EAX
& ~0xffff) | q
;
1683 EDX
= (EDX
& ~0xffff) | r
;
1686 void helper_idivw_AX(target_ulong t0
)
1690 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1693 raise_exception(EXCP00_DIVZ
);
1696 if (q
!= (int16_t)q
)
1697 raise_exception(EXCP00_DIVZ
);
1699 r
= (num
% den
) & 0xffff;
1700 EAX
= (EAX
& ~0xffff) | q
;
1701 EDX
= (EDX
& ~0xffff) | r
;
1704 void helper_divl_EAX(target_ulong t0
)
1706 unsigned int den
, r
;
1709 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1712 raise_exception(EXCP00_DIVZ
);
1717 raise_exception(EXCP00_DIVZ
);
1722 void helper_idivl_EAX(target_ulong t0
)
1727 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1730 raise_exception(EXCP00_DIVZ
);
1734 if (q
!= (int32_t)q
)
1735 raise_exception(EXCP00_DIVZ
);
1742 /* XXX: exception */
1743 void helper_aam(int base
)
1749 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1753 void helper_aad(int base
)
1757 ah
= (EAX
>> 8) & 0xff;
1758 al
= ((ah
* base
) + al
) & 0xff;
1759 EAX
= (EAX
& ~0xffff) | al
;
1763 void helper_aaa(void)
1769 eflags
= helper_cc_compute_all(CC_OP
);
1772 ah
= (EAX
>> 8) & 0xff;
1774 icarry
= (al
> 0xf9);
1775 if (((al
& 0x0f) > 9 ) || af
) {
1776 al
= (al
+ 6) & 0x0f;
1777 ah
= (ah
+ 1 + icarry
) & 0xff;
1778 eflags
|= CC_C
| CC_A
;
1780 eflags
&= ~(CC_C
| CC_A
);
1783 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1787 void helper_aas(void)
1793 eflags
= helper_cc_compute_all(CC_OP
);
1796 ah
= (EAX
>> 8) & 0xff;
1799 if (((al
& 0x0f) > 9 ) || af
) {
1800 al
= (al
- 6) & 0x0f;
1801 ah
= (ah
- 1 - icarry
) & 0xff;
1802 eflags
|= CC_C
| CC_A
;
1804 eflags
&= ~(CC_C
| CC_A
);
1807 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1811 void helper_daa(void)
1816 eflags
= helper_cc_compute_all(CC_OP
);
1822 if (((al
& 0x0f) > 9 ) || af
) {
1823 al
= (al
+ 6) & 0xff;
1826 if ((al
> 0x9f) || cf
) {
1827 al
= (al
+ 0x60) & 0xff;
1830 EAX
= (EAX
& ~0xff) | al
;
1831 /* well, speed is not an issue here, so we compute the flags by hand */
1832 eflags
|= (al
== 0) << 6; /* zf */
1833 eflags
|= parity_table
[al
]; /* pf */
1834 eflags
|= (al
& 0x80); /* sf */
1838 void helper_das(void)
1840 int al
, al1
, af
, cf
;
1843 eflags
= helper_cc_compute_all(CC_OP
);
1850 if (((al
& 0x0f) > 9 ) || af
) {
1854 al
= (al
- 6) & 0xff;
1856 if ((al1
> 0x99) || cf
) {
1857 al
= (al
- 0x60) & 0xff;
1860 EAX
= (EAX
& ~0xff) | al
;
1861 /* well, speed is not an issue here, so we compute the flags by hand */
1862 eflags
|= (al
== 0) << 6; /* zf */
1863 eflags
|= parity_table
[al
]; /* pf */
1864 eflags
|= (al
& 0x80); /* sf */
1868 void helper_into(int next_eip_addend
)
1871 eflags
= helper_cc_compute_all(CC_OP
);
1872 if (eflags
& CC_O
) {
1873 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1877 void helper_cmpxchg8b(target_ulong a0
)
1882 eflags
= helper_cc_compute_all(CC_OP
);
1884 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1885 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1888 /* always do the store */
1890 EDX
= (uint32_t)(d
>> 32);
1897 #ifdef TARGET_X86_64
1898 void helper_cmpxchg16b(target_ulong a0
)
1903 if ((a0
& 0xf) != 0)
1904 raise_exception(EXCP0D_GPF
);
1905 eflags
= helper_cc_compute_all(CC_OP
);
1908 if (d0
== EAX
&& d1
== EDX
) {
1913 /* always do the store */
1924 void helper_single_step(void)
1926 #ifndef CONFIG_USER_ONLY
1927 check_hw_breakpoints(env
, 1);
1928 env
->dr
[6] |= DR6_BS
;
1930 raise_exception(EXCP01_DB
);
1933 void helper_cpuid(void)
1935 uint32_t eax
, ebx
, ecx
, edx
;
1937 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1939 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1946 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1949 uint32_t esp_mask
, esp
, ebp
;
1951 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1952 ssp
= env
->segs
[R_SS
].base
;
1961 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1964 stl(ssp
+ (esp
& esp_mask
), t1
);
1971 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1974 stw(ssp
+ (esp
& esp_mask
), t1
);
1978 #ifdef TARGET_X86_64
1979 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1981 target_ulong esp
, ebp
;
2001 stw(esp
, lduw(ebp
));
2009 void helper_lldt(int selector
)
2013 int index
, entry_limit
;
2017 if ((selector
& 0xfffc) == 0) {
2018 /* XXX: NULL selector case: invalid LDT */
2023 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2025 index
= selector
& ~7;
2026 #ifdef TARGET_X86_64
2027 if (env
->hflags
& HF_LMA_MASK
)
2032 if ((index
+ entry_limit
) > dt
->limit
)
2033 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2034 ptr
= dt
->base
+ index
;
2035 e1
= ldl_kernel(ptr
);
2036 e2
= ldl_kernel(ptr
+ 4);
2037 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2038 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2039 if (!(e2
& DESC_P_MASK
))
2040 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2041 #ifdef TARGET_X86_64
2042 if (env
->hflags
& HF_LMA_MASK
) {
2044 e3
= ldl_kernel(ptr
+ 8);
2045 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2046 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2050 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2053 env
->ldt
.selector
= selector
;
2056 void helper_ltr(int selector
)
2060 int index
, type
, entry_limit
;
2064 if ((selector
& 0xfffc) == 0) {
2065 /* NULL selector case: invalid TR */
2071 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2073 index
= selector
& ~7;
2074 #ifdef TARGET_X86_64
2075 if (env
->hflags
& HF_LMA_MASK
)
2080 if ((index
+ entry_limit
) > dt
->limit
)
2081 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2082 ptr
= dt
->base
+ index
;
2083 e1
= ldl_kernel(ptr
);
2084 e2
= ldl_kernel(ptr
+ 4);
2085 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2086 if ((e2
& DESC_S_MASK
) ||
2087 (type
!= 1 && type
!= 9))
2088 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2089 if (!(e2
& DESC_P_MASK
))
2090 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2091 #ifdef TARGET_X86_64
2092 if (env
->hflags
& HF_LMA_MASK
) {
2094 e3
= ldl_kernel(ptr
+ 8);
2095 e4
= ldl_kernel(ptr
+ 12);
2096 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2097 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2098 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2099 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2103 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2105 e2
|= DESC_TSS_BUSY_MASK
;
2106 stl_kernel(ptr
+ 4, e2
);
2108 env
->tr
.selector
= selector
;
2111 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2112 void helper_load_seg(int seg_reg
, int selector
)
2121 cpl
= env
->hflags
& HF_CPL_MASK
;
2122 if ((selector
& 0xfffc) == 0) {
2123 /* null selector case */
2125 #ifdef TARGET_X86_64
2126 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2129 raise_exception_err(EXCP0D_GPF
, 0);
2130 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2137 index
= selector
& ~7;
2138 if ((index
+ 7) > dt
->limit
)
2139 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2140 ptr
= dt
->base
+ index
;
2141 e1
= ldl_kernel(ptr
);
2142 e2
= ldl_kernel(ptr
+ 4);
2144 if (!(e2
& DESC_S_MASK
))
2145 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2147 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2148 if (seg_reg
== R_SS
) {
2149 /* must be writable segment */
2150 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2151 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2152 if (rpl
!= cpl
|| dpl
!= cpl
)
2153 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2155 /* must be readable segment */
2156 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2157 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2159 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2160 /* if not conforming code, test rights */
2161 if (dpl
< cpl
|| dpl
< rpl
)
2162 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2166 if (!(e2
& DESC_P_MASK
)) {
2167 if (seg_reg
== R_SS
)
2168 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2170 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2173 /* set the access bit if not already set */
2174 if (!(e2
& DESC_A_MASK
)) {
2176 stl_kernel(ptr
+ 4, e2
);
2179 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2180 get_seg_base(e1
, e2
),
2181 get_seg_limit(e1
, e2
),
2184 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2185 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2190 /* protected mode jump */
2191 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2192 int next_eip_addend
)
2195 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2196 target_ulong next_eip
;
2198 if ((new_cs
& 0xfffc) == 0)
2199 raise_exception_err(EXCP0D_GPF
, 0);
2200 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2201 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2202 cpl
= env
->hflags
& HF_CPL_MASK
;
2203 if (e2
& DESC_S_MASK
) {
2204 if (!(e2
& DESC_CS_MASK
))
2205 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2206 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2207 if (e2
& DESC_C_MASK
) {
2208 /* conforming code segment */
2210 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2212 /* non conforming code segment */
2215 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2217 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2219 if (!(e2
& DESC_P_MASK
))
2220 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2221 limit
= get_seg_limit(e1
, e2
);
2222 if (new_eip
> limit
&&
2223 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2224 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2225 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2226 get_seg_base(e1
, e2
), limit
, e2
);
2229 /* jump to call or task gate */
2230 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2232 cpl
= env
->hflags
& HF_CPL_MASK
;
2233 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2235 case 1: /* 286 TSS */
2236 case 9: /* 386 TSS */
2237 case 5: /* task gate */
2238 if (dpl
< cpl
|| dpl
< rpl
)
2239 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2240 next_eip
= env
->eip
+ next_eip_addend
;
2241 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2242 CC_OP
= CC_OP_EFLAGS
;
2244 case 4: /* 286 call gate */
2245 case 12: /* 386 call gate */
2246 if ((dpl
< cpl
) || (dpl
< rpl
))
2247 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2248 if (!(e2
& DESC_P_MASK
))
2249 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2251 new_eip
= (e1
& 0xffff);
2253 new_eip
|= (e2
& 0xffff0000);
2254 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2255 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2256 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2257 /* must be code segment */
2258 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2259 (DESC_S_MASK
| DESC_CS_MASK
)))
2260 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2261 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2262 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2263 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2264 if (!(e2
& DESC_P_MASK
))
2265 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2266 limit
= get_seg_limit(e1
, e2
);
2267 if (new_eip
> limit
)
2268 raise_exception_err(EXCP0D_GPF
, 0);
2269 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2270 get_seg_base(e1
, e2
), limit
, e2
);
2274 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2280 /* real mode call */
2281 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2282 int shift
, int next_eip
)
2285 uint32_t esp
, esp_mask
;
2290 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2291 ssp
= env
->segs
[R_SS
].base
;
2293 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2294 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2296 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2297 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2300 SET_ESP(esp
, esp_mask
);
2302 env
->segs
[R_CS
].selector
= new_cs
;
2303 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2306 /* protected mode call */
2307 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2308 int shift
, int next_eip_addend
)
2311 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2312 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2313 uint32_t val
, limit
, old_sp_mask
;
2314 target_ulong ssp
, old_ssp
, next_eip
;
2316 next_eip
= env
->eip
+ next_eip_addend
;
2317 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2318 LOG_PCALL_STATE(env
);
2319 if ((new_cs
& 0xfffc) == 0)
2320 raise_exception_err(EXCP0D_GPF
, 0);
2321 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2322 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2323 cpl
= env
->hflags
& HF_CPL_MASK
;
2324 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2325 if (e2
& DESC_S_MASK
) {
2326 if (!(e2
& DESC_CS_MASK
))
2327 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2328 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2329 if (e2
& DESC_C_MASK
) {
2330 /* conforming code segment */
2332 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2334 /* non conforming code segment */
2337 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2339 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2341 if (!(e2
& DESC_P_MASK
))
2342 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2344 #ifdef TARGET_X86_64
2345 /* XXX: check 16/32 bit cases in long mode */
2350 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2351 PUSHQ(rsp
, next_eip
);
2352 /* from this point, not restartable */
2354 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2355 get_seg_base(e1
, e2
),
2356 get_seg_limit(e1
, e2
), e2
);
2362 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2363 ssp
= env
->segs
[R_SS
].base
;
2365 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2366 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2368 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2369 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2372 limit
= get_seg_limit(e1
, e2
);
2373 if (new_eip
> limit
)
2374 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2375 /* from this point, not restartable */
2376 SET_ESP(sp
, sp_mask
);
2377 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2378 get_seg_base(e1
, e2
), limit
, e2
);
2382 /* check gate type */
2383 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2384 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2387 case 1: /* available 286 TSS */
2388 case 9: /* available 386 TSS */
2389 case 5: /* task gate */
2390 if (dpl
< cpl
|| dpl
< rpl
)
2391 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2392 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2393 CC_OP
= CC_OP_EFLAGS
;
2395 case 4: /* 286 call gate */
2396 case 12: /* 386 call gate */
2399 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2404 if (dpl
< cpl
|| dpl
< rpl
)
2405 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2406 /* check valid bit */
2407 if (!(e2
& DESC_P_MASK
))
2408 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2409 selector
= e1
>> 16;
2410 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2411 param_count
= e2
& 0x1f;
2412 if ((selector
& 0xfffc) == 0)
2413 raise_exception_err(EXCP0D_GPF
, 0);
2415 if (load_segment(&e1
, &e2
, selector
) != 0)
2416 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2417 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2418 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2419 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2421 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2422 if (!(e2
& DESC_P_MASK
))
2423 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2425 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2426 /* to inner privilege */
2427 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2428 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2429 ss
, sp
, param_count
, ESP
);
2430 if ((ss
& 0xfffc) == 0)
2431 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2432 if ((ss
& 3) != dpl
)
2433 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2434 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2435 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2436 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2438 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2439 if (!(ss_e2
& DESC_S_MASK
) ||
2440 (ss_e2
& DESC_CS_MASK
) ||
2441 !(ss_e2
& DESC_W_MASK
))
2442 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2443 if (!(ss_e2
& DESC_P_MASK
))
2444 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2446 // push_size = ((param_count * 2) + 8) << shift;
2448 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2449 old_ssp
= env
->segs
[R_SS
].base
;
2451 sp_mask
= get_sp_mask(ss_e2
);
2452 ssp
= get_seg_base(ss_e1
, ss_e2
);
2454 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2455 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2456 for(i
= param_count
- 1; i
>= 0; i
--) {
2457 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2458 PUSHL(ssp
, sp
, sp_mask
, val
);
2461 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2462 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2463 for(i
= param_count
- 1; i
>= 0; i
--) {
2464 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2465 PUSHW(ssp
, sp
, sp_mask
, val
);
2470 /* to same privilege */
2472 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2473 ssp
= env
->segs
[R_SS
].base
;
2474 // push_size = (4 << shift);
2479 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2480 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2482 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2483 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2486 /* from this point, not restartable */
2489 ss
= (ss
& ~3) | dpl
;
2490 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2492 get_seg_limit(ss_e1
, ss_e2
),
2496 selector
= (selector
& ~3) | dpl
;
2497 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2498 get_seg_base(e1
, e2
),
2499 get_seg_limit(e1
, e2
),
2501 cpu_x86_set_cpl(env
, dpl
);
2502 SET_ESP(sp
, sp_mask
);
2507 /* real and vm86 mode iret */
2508 void helper_iret_real(int shift
)
2510 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2514 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2516 ssp
= env
->segs
[R_SS
].base
;
2519 POPL(ssp
, sp
, sp_mask
, new_eip
);
2520 POPL(ssp
, sp
, sp_mask
, new_cs
);
2522 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2525 POPW(ssp
, sp
, sp_mask
, new_eip
);
2526 POPW(ssp
, sp
, sp_mask
, new_cs
);
2527 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2529 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2530 env
->segs
[R_CS
].selector
= new_cs
;
2531 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2533 if (env
->eflags
& VM_MASK
)
2534 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2536 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2538 eflags_mask
&= 0xffff;
2539 load_eflags(new_eflags
, eflags_mask
);
2540 env
->hflags2
&= ~HF2_NMI_MASK
;
2543 static inline void validate_seg(int seg_reg
, int cpl
)
2548 /* XXX: on x86_64, we do not want to nullify FS and GS because
2549 they may still contain a valid base. I would be interested to
2550 know how a real x86_64 CPU behaves */
2551 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2552 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2555 e2
= env
->segs
[seg_reg
].flags
;
2556 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2557 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2558 /* data or non conforming code segment */
2560 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2565 /* protected mode iret */
2566 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2568 uint32_t new_cs
, new_eflags
, new_ss
;
2569 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2570 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2571 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2572 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2574 #ifdef TARGET_X86_64
2579 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2581 ssp
= env
->segs
[R_SS
].base
;
2582 new_eflags
= 0; /* avoid warning */
2583 #ifdef TARGET_X86_64
2589 POPQ(sp
, new_eflags
);
2595 POPL(ssp
, sp
, sp_mask
, new_eip
);
2596 POPL(ssp
, sp
, sp_mask
, new_cs
);
2599 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2600 if (new_eflags
& VM_MASK
)
2601 goto return_to_vm86
;
2605 POPW(ssp
, sp
, sp_mask
, new_eip
);
2606 POPW(ssp
, sp
, sp_mask
, new_cs
);
2608 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2610 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2611 new_cs
, new_eip
, shift
, addend
);
2612 LOG_PCALL_STATE(env
);
2613 if ((new_cs
& 0xfffc) == 0)
2614 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2615 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2616 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2617 if (!(e2
& DESC_S_MASK
) ||
2618 !(e2
& DESC_CS_MASK
))
2619 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2620 cpl
= env
->hflags
& HF_CPL_MASK
;
2623 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2624 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2625 if (e2
& DESC_C_MASK
) {
2627 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2630 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2632 if (!(e2
& DESC_P_MASK
))
2633 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2636 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2637 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2638 /* return to same privilege level */
2639 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2640 get_seg_base(e1
, e2
),
2641 get_seg_limit(e1
, e2
),
2644 /* return to different privilege level */
2645 #ifdef TARGET_X86_64
2654 POPL(ssp
, sp
, sp_mask
, new_esp
);
2655 POPL(ssp
, sp
, sp_mask
, new_ss
);
2659 POPW(ssp
, sp
, sp_mask
, new_esp
);
2660 POPW(ssp
, sp
, sp_mask
, new_ss
);
2662 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2664 if ((new_ss
& 0xfffc) == 0) {
2665 #ifdef TARGET_X86_64
2666 /* NULL ss is allowed in long mode if cpl != 3*/
2667 /* XXX: test CS64 ? */
2668 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2669 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2671 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2672 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2673 DESC_W_MASK
| DESC_A_MASK
);
2674 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2678 raise_exception_err(EXCP0D_GPF
, 0);
2681 if ((new_ss
& 3) != rpl
)
2682 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2683 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2684 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2685 if (!(ss_e2
& DESC_S_MASK
) ||
2686 (ss_e2
& DESC_CS_MASK
) ||
2687 !(ss_e2
& DESC_W_MASK
))
2688 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2689 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2691 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2692 if (!(ss_e2
& DESC_P_MASK
))
2693 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2694 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2695 get_seg_base(ss_e1
, ss_e2
),
2696 get_seg_limit(ss_e1
, ss_e2
),
2700 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2701 get_seg_base(e1
, e2
),
2702 get_seg_limit(e1
, e2
),
2704 cpu_x86_set_cpl(env
, rpl
);
2706 #ifdef TARGET_X86_64
2707 if (env
->hflags
& HF_CS64_MASK
)
2711 sp_mask
= get_sp_mask(ss_e2
);
2713 /* validate data segments */
2714 validate_seg(R_ES
, rpl
);
2715 validate_seg(R_DS
, rpl
);
2716 validate_seg(R_FS
, rpl
);
2717 validate_seg(R_GS
, rpl
);
2721 SET_ESP(sp
, sp_mask
);
2724 /* NOTE: 'cpl' is the _old_ CPL */
2725 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2727 eflags_mask
|= IOPL_MASK
;
2728 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2730 eflags_mask
|= IF_MASK
;
2732 eflags_mask
&= 0xffff;
2733 load_eflags(new_eflags
, eflags_mask
);
2738 POPL(ssp
, sp
, sp_mask
, new_esp
);
2739 POPL(ssp
, sp
, sp_mask
, new_ss
);
2740 POPL(ssp
, sp
, sp_mask
, new_es
);
2741 POPL(ssp
, sp
, sp_mask
, new_ds
);
2742 POPL(ssp
, sp
, sp_mask
, new_fs
);
2743 POPL(ssp
, sp
, sp_mask
, new_gs
);
2745 /* modify processor state */
2746 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2747 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2748 load_seg_vm(R_CS
, new_cs
& 0xffff);
2749 cpu_x86_set_cpl(env
, 3);
2750 load_seg_vm(R_SS
, new_ss
& 0xffff);
2751 load_seg_vm(R_ES
, new_es
& 0xffff);
2752 load_seg_vm(R_DS
, new_ds
& 0xffff);
2753 load_seg_vm(R_FS
, new_fs
& 0xffff);
2754 load_seg_vm(R_GS
, new_gs
& 0xffff);
2756 env
->eip
= new_eip
& 0xffff;
2760 void helper_iret_protected(int shift
, int next_eip
)
2762 int tss_selector
, type
;
2765 /* specific case for TSS */
2766 if (env
->eflags
& NT_MASK
) {
2767 #ifdef TARGET_X86_64
2768 if (env
->hflags
& HF_LMA_MASK
)
2769 raise_exception_err(EXCP0D_GPF
, 0);
2771 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2772 if (tss_selector
& 4)
2773 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2774 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2775 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2776 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2777 /* NOTE: we check both segment and busy TSS */
2779 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2780 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2782 helper_ret_protected(shift
, 1, 0);
2784 env
->hflags2
&= ~HF2_NMI_MASK
;
2787 void helper_lret_protected(int shift
, int addend
)
2789 helper_ret_protected(shift
, 0, addend
);
2792 void helper_sysenter(void)
2794 if (env
->sysenter_cs
== 0) {
2795 raise_exception_err(EXCP0D_GPF
, 0);
2797 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2798 cpu_x86_set_cpl(env
, 0);
2800 #ifdef TARGET_X86_64
2801 if (env
->hflags
& HF_LMA_MASK
) {
2802 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2804 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2806 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2810 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2812 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2814 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2816 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2818 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2820 DESC_W_MASK
| DESC_A_MASK
);
2821 ESP
= env
->sysenter_esp
;
2822 EIP
= env
->sysenter_eip
;
2825 void helper_sysexit(int dflag
)
2829 cpl
= env
->hflags
& HF_CPL_MASK
;
2830 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2831 raise_exception_err(EXCP0D_GPF
, 0);
2833 cpu_x86_set_cpl(env
, 3);
2834 #ifdef TARGET_X86_64
2836 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2838 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2839 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2840 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2841 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2843 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2844 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2845 DESC_W_MASK
| DESC_A_MASK
);
2849 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2851 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2852 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2853 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2854 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2856 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2857 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2858 DESC_W_MASK
| DESC_A_MASK
);
2864 #if defined(CONFIG_USER_ONLY)
2865 target_ulong
helper_read_crN(int reg
)
2870 void helper_write_crN(int reg
, target_ulong t0
)
2874 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2878 target_ulong
helper_read_crN(int reg
)
2882 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2888 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2889 val
= cpu_get_apic_tpr(env
->apic_state
);
2898 void helper_write_crN(int reg
, target_ulong t0
)
2900 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2903 cpu_x86_update_cr0(env
, t0
);
2906 cpu_x86_update_cr3(env
, t0
);
2909 cpu_x86_update_cr4(env
, t0
);
2912 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2913 cpu_set_apic_tpr(env
->apic_state
, t0
);
2915 env
->v_tpr
= t0
& 0x0f;
2923 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2928 hw_breakpoint_remove(env
, reg
);
2930 hw_breakpoint_insert(env
, reg
);
2931 } else if (reg
== 7) {
2932 for (i
= 0; i
< 4; i
++)
2933 hw_breakpoint_remove(env
, i
);
2935 for (i
= 0; i
< 4; i
++)
2936 hw_breakpoint_insert(env
, i
);
2942 void helper_lmsw(target_ulong t0
)
2944 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2945 if already set to one. */
2946 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2947 helper_write_crN(0, t0
);
2950 void helper_clts(void)
2952 env
->cr
[0] &= ~CR0_TS_MASK
;
2953 env
->hflags
&= ~HF_TS_MASK
;
2956 void helper_invlpg(target_ulong addr
)
2958 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2959 tlb_flush_page(env
, addr
);
2962 void helper_rdtsc(void)
2966 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2967 raise_exception(EXCP0D_GPF
);
2969 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2971 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2972 EAX
= (uint32_t)(val
);
2973 EDX
= (uint32_t)(val
>> 32);
2976 void helper_rdtscp(void)
2979 ECX
= (uint32_t)(env
->tsc_aux
);
2982 void helper_rdpmc(void)
2984 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2985 raise_exception(EXCP0D_GPF
);
2987 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2989 /* currently unimplemented */
2990 raise_exception_err(EXCP06_ILLOP
, 0);
2993 #if defined(CONFIG_USER_ONLY)
2994 void helper_wrmsr(void)
2998 void helper_rdmsr(void)
3002 void helper_wrmsr(void)
3006 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3008 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3010 switch((uint32_t)ECX
) {
3011 case MSR_IA32_SYSENTER_CS
:
3012 env
->sysenter_cs
= val
& 0xffff;
3014 case MSR_IA32_SYSENTER_ESP
:
3015 env
->sysenter_esp
= val
;
3017 case MSR_IA32_SYSENTER_EIP
:
3018 env
->sysenter_eip
= val
;
3020 case MSR_IA32_APICBASE
:
3021 cpu_set_apic_base(env
->apic_state
, val
);
3025 uint64_t update_mask
;
3027 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3028 update_mask
|= MSR_EFER_SCE
;
3029 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3030 update_mask
|= MSR_EFER_LME
;
3031 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3032 update_mask
|= MSR_EFER_FFXSR
;
3033 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3034 update_mask
|= MSR_EFER_NXE
;
3035 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3036 update_mask
|= MSR_EFER_SVME
;
3037 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3038 update_mask
|= MSR_EFER_FFXSR
;
3039 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3040 (val
& update_mask
));
3049 case MSR_VM_HSAVE_PA
:
3050 env
->vm_hsave
= val
;
3052 #ifdef TARGET_X86_64
3063 env
->segs
[R_FS
].base
= val
;
3066 env
->segs
[R_GS
].base
= val
;
3068 case MSR_KERNELGSBASE
:
3069 env
->kernelgsbase
= val
;
3072 case MSR_MTRRphysBase(0):
3073 case MSR_MTRRphysBase(1):
3074 case MSR_MTRRphysBase(2):
3075 case MSR_MTRRphysBase(3):
3076 case MSR_MTRRphysBase(4):
3077 case MSR_MTRRphysBase(5):
3078 case MSR_MTRRphysBase(6):
3079 case MSR_MTRRphysBase(7):
3080 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3082 case MSR_MTRRphysMask(0):
3083 case MSR_MTRRphysMask(1):
3084 case MSR_MTRRphysMask(2):
3085 case MSR_MTRRphysMask(3):
3086 case MSR_MTRRphysMask(4):
3087 case MSR_MTRRphysMask(5):
3088 case MSR_MTRRphysMask(6):
3089 case MSR_MTRRphysMask(7):
3090 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3092 case MSR_MTRRfix64K_00000
:
3093 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3095 case MSR_MTRRfix16K_80000
:
3096 case MSR_MTRRfix16K_A0000
:
3097 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3099 case MSR_MTRRfix4K_C0000
:
3100 case MSR_MTRRfix4K_C8000
:
3101 case MSR_MTRRfix4K_D0000
:
3102 case MSR_MTRRfix4K_D8000
:
3103 case MSR_MTRRfix4K_E0000
:
3104 case MSR_MTRRfix4K_E8000
:
3105 case MSR_MTRRfix4K_F0000
:
3106 case MSR_MTRRfix4K_F8000
:
3107 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3109 case MSR_MTRRdefType
:
3110 env
->mtrr_deftype
= val
;
3112 case MSR_MCG_STATUS
:
3113 env
->mcg_status
= val
;
3116 if ((env
->mcg_cap
& MCG_CTL_P
)
3117 && (val
== 0 || val
== ~(uint64_t)0))
3124 if ((uint32_t)ECX
>= MSR_MC0_CTL
3125 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3126 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3127 if ((offset
& 0x3) != 0
3128 || (val
== 0 || val
== ~(uint64_t)0))
3129 env
->mce_banks
[offset
] = val
;
3132 /* XXX: exception ? */
3137 void helper_rdmsr(void)
3141 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3143 switch((uint32_t)ECX
) {
3144 case MSR_IA32_SYSENTER_CS
:
3145 val
= env
->sysenter_cs
;
3147 case MSR_IA32_SYSENTER_ESP
:
3148 val
= env
->sysenter_esp
;
3150 case MSR_IA32_SYSENTER_EIP
:
3151 val
= env
->sysenter_eip
;
3153 case MSR_IA32_APICBASE
:
3154 val
= cpu_get_apic_base(env
->apic_state
);
3165 case MSR_VM_HSAVE_PA
:
3166 val
= env
->vm_hsave
;
3168 case MSR_IA32_PERF_STATUS
:
3169 /* tsc_increment_by_tick */
3171 /* CPU multiplier */
3172 val
|= (((uint64_t)4ULL) << 40);
3174 #ifdef TARGET_X86_64
3185 val
= env
->segs
[R_FS
].base
;
3188 val
= env
->segs
[R_GS
].base
;
3190 case MSR_KERNELGSBASE
:
3191 val
= env
->kernelgsbase
;
3197 case MSR_MTRRphysBase(0):
3198 case MSR_MTRRphysBase(1):
3199 case MSR_MTRRphysBase(2):
3200 case MSR_MTRRphysBase(3):
3201 case MSR_MTRRphysBase(4):
3202 case MSR_MTRRphysBase(5):
3203 case MSR_MTRRphysBase(6):
3204 case MSR_MTRRphysBase(7):
3205 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3207 case MSR_MTRRphysMask(0):
3208 case MSR_MTRRphysMask(1):
3209 case MSR_MTRRphysMask(2):
3210 case MSR_MTRRphysMask(3):
3211 case MSR_MTRRphysMask(4):
3212 case MSR_MTRRphysMask(5):
3213 case MSR_MTRRphysMask(6):
3214 case MSR_MTRRphysMask(7):
3215 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3217 case MSR_MTRRfix64K_00000
:
3218 val
= env
->mtrr_fixed
[0];
3220 case MSR_MTRRfix16K_80000
:
3221 case MSR_MTRRfix16K_A0000
:
3222 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3224 case MSR_MTRRfix4K_C0000
:
3225 case MSR_MTRRfix4K_C8000
:
3226 case MSR_MTRRfix4K_D0000
:
3227 case MSR_MTRRfix4K_D8000
:
3228 case MSR_MTRRfix4K_E0000
:
3229 case MSR_MTRRfix4K_E8000
:
3230 case MSR_MTRRfix4K_F0000
:
3231 case MSR_MTRRfix4K_F8000
:
3232 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3234 case MSR_MTRRdefType
:
3235 val
= env
->mtrr_deftype
;
3238 if (env
->cpuid_features
& CPUID_MTRR
)
3239 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3241 /* XXX: exception ? */
3248 if (env
->mcg_cap
& MCG_CTL_P
)
3253 case MSR_MCG_STATUS
:
3254 val
= env
->mcg_status
;
3257 if ((uint32_t)ECX
>= MSR_MC0_CTL
3258 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3259 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3260 val
= env
->mce_banks
[offset
];
3263 /* XXX: exception ? */
3267 EAX
= (uint32_t)(val
);
3268 EDX
= (uint32_t)(val
>> 32);
3272 target_ulong
helper_lsl(target_ulong selector1
)
3275 uint32_t e1
, e2
, eflags
, selector
;
3276 int rpl
, dpl
, cpl
, type
;
3278 selector
= selector1
& 0xffff;
3279 eflags
= helper_cc_compute_all(CC_OP
);
3280 if ((selector
& 0xfffc) == 0)
3282 if (load_segment(&e1
, &e2
, selector
) != 0)
3285 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3286 cpl
= env
->hflags
& HF_CPL_MASK
;
3287 if (e2
& DESC_S_MASK
) {
3288 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3291 if (dpl
< cpl
|| dpl
< rpl
)
3295 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3306 if (dpl
< cpl
|| dpl
< rpl
) {
3308 CC_SRC
= eflags
& ~CC_Z
;
3312 limit
= get_seg_limit(e1
, e2
);
3313 CC_SRC
= eflags
| CC_Z
;
3317 target_ulong
helper_lar(target_ulong selector1
)
3319 uint32_t e1
, e2
, eflags
, selector
;
3320 int rpl
, dpl
, cpl
, type
;
3322 selector
= selector1
& 0xffff;
3323 eflags
= helper_cc_compute_all(CC_OP
);
3324 if ((selector
& 0xfffc) == 0)
3326 if (load_segment(&e1
, &e2
, selector
) != 0)
3329 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3330 cpl
= env
->hflags
& HF_CPL_MASK
;
3331 if (e2
& DESC_S_MASK
) {
3332 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3335 if (dpl
< cpl
|| dpl
< rpl
)
3339 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3353 if (dpl
< cpl
|| dpl
< rpl
) {
3355 CC_SRC
= eflags
& ~CC_Z
;
3359 CC_SRC
= eflags
| CC_Z
;
3360 return e2
& 0x00f0ff00;
3363 void helper_verr(target_ulong selector1
)
3365 uint32_t e1
, e2
, eflags
, selector
;
3368 selector
= selector1
& 0xffff;
3369 eflags
= helper_cc_compute_all(CC_OP
);
3370 if ((selector
& 0xfffc) == 0)
3372 if (load_segment(&e1
, &e2
, selector
) != 0)
3374 if (!(e2
& DESC_S_MASK
))
3377 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3378 cpl
= env
->hflags
& HF_CPL_MASK
;
3379 if (e2
& DESC_CS_MASK
) {
3380 if (!(e2
& DESC_R_MASK
))
3382 if (!(e2
& DESC_C_MASK
)) {
3383 if (dpl
< cpl
|| dpl
< rpl
)
3387 if (dpl
< cpl
|| dpl
< rpl
) {
3389 CC_SRC
= eflags
& ~CC_Z
;
3393 CC_SRC
= eflags
| CC_Z
;
3396 void helper_verw(target_ulong selector1
)
3398 uint32_t e1
, e2
, eflags
, selector
;
3401 selector
= selector1
& 0xffff;
3402 eflags
= helper_cc_compute_all(CC_OP
);
3403 if ((selector
& 0xfffc) == 0)
3405 if (load_segment(&e1
, &e2
, selector
) != 0)
3407 if (!(e2
& DESC_S_MASK
))
3410 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3411 cpl
= env
->hflags
& HF_CPL_MASK
;
3412 if (e2
& DESC_CS_MASK
) {
3415 if (dpl
< cpl
|| dpl
< rpl
)
3417 if (!(e2
& DESC_W_MASK
)) {
3419 CC_SRC
= eflags
& ~CC_Z
;
3423 CC_SRC
= eflags
| CC_Z
;
3426 /* x87 FPU helpers */
3428 static inline double floatx80_to_double(floatx80 a
)
3435 u
.f64
= floatx80_to_float64(a
, &env
->fp_status
);
3439 static inline floatx80
double_to_floatx80(double a
)
3447 return float64_to_floatx80(u
.f64
, &env
->fp_status
);
3450 static void fpu_set_exception(int mask
)
3453 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3454 env
->fpus
|= FPUS_SE
| FPUS_B
;
3457 static inline floatx80
helper_fdiv(floatx80 a
, floatx80 b
)
3459 if (floatx80_is_zero(b
)) {
3460 fpu_set_exception(FPUS_ZE
);
3462 return floatx80_div(a
, b
, &env
->fp_status
);
3465 static void fpu_raise_exception(void)
3467 if (env
->cr
[0] & CR0_NE_MASK
) {
3468 raise_exception(EXCP10_COPR
);
3470 #if !defined(CONFIG_USER_ONLY)
3477 void helper_flds_FT0(uint32_t val
)
3484 FT0
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3487 void helper_fldl_FT0(uint64_t val
)
3494 FT0
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3497 void helper_fildl_FT0(int32_t val
)
3499 FT0
= int32_to_floatx80(val
, &env
->fp_status
);
3502 void helper_flds_ST0(uint32_t val
)
3509 new_fpstt
= (env
->fpstt
- 1) & 7;
3511 env
->fpregs
[new_fpstt
].d
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3512 env
->fpstt
= new_fpstt
;
3513 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3516 void helper_fldl_ST0(uint64_t val
)
3523 new_fpstt
= (env
->fpstt
- 1) & 7;
3525 env
->fpregs
[new_fpstt
].d
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3526 env
->fpstt
= new_fpstt
;
3527 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3530 void helper_fildl_ST0(int32_t val
)
3533 new_fpstt
= (env
->fpstt
- 1) & 7;
3534 env
->fpregs
[new_fpstt
].d
= int32_to_floatx80(val
, &env
->fp_status
);
3535 env
->fpstt
= new_fpstt
;
3536 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3539 void helper_fildll_ST0(int64_t val
)
3542 new_fpstt
= (env
->fpstt
- 1) & 7;
3543 env
->fpregs
[new_fpstt
].d
= int64_to_floatx80(val
, &env
->fp_status
);
3544 env
->fpstt
= new_fpstt
;
3545 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3548 uint32_t helper_fsts_ST0(void)
3554 u
.f
= floatx80_to_float32(ST0
, &env
->fp_status
);
3558 uint64_t helper_fstl_ST0(void)
3564 u
.f
= floatx80_to_float64(ST0
, &env
->fp_status
);
3568 int32_t helper_fist_ST0(void)
3571 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3572 if (val
!= (int16_t)val
)
3577 int32_t helper_fistl_ST0(void)
3580 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3584 int64_t helper_fistll_ST0(void)
3587 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3591 int32_t helper_fistt_ST0(void)
3594 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3595 if (val
!= (int16_t)val
)
3600 int32_t helper_fisttl_ST0(void)
3603 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3607 int64_t helper_fisttll_ST0(void)
3610 val
= floatx80_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3614 void helper_fldt_ST0(target_ulong ptr
)
3617 new_fpstt
= (env
->fpstt
- 1) & 7;
3618 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3619 env
->fpstt
= new_fpstt
;
3620 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3623 void helper_fstt_ST0(target_ulong ptr
)
3625 helper_fstt(ST0
, ptr
);
3628 void helper_fpush(void)
3633 void helper_fpop(void)
3638 void helper_fdecstp(void)
3640 env
->fpstt
= (env
->fpstt
- 1) & 7;
3641 env
->fpus
&= (~0x4700);
3644 void helper_fincstp(void)
3646 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3647 env
->fpus
&= (~0x4700);
3652 void helper_ffree_STN(int st_index
)
3654 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3657 void helper_fmov_ST0_FT0(void)
3662 void helper_fmov_FT0_STN(int st_index
)
3667 void helper_fmov_ST0_STN(int st_index
)
3672 void helper_fmov_STN_ST0(int st_index
)
3677 void helper_fxchg_ST0_STN(int st_index
)
3685 /* FPU operations */
3687 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3689 void helper_fcom_ST0_FT0(void)
3693 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3694 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3697 void helper_fucom_ST0_FT0(void)
3701 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3702 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3705 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3707 void helper_fcomi_ST0_FT0(void)
3712 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3713 eflags
= helper_cc_compute_all(CC_OP
);
3714 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3718 void helper_fucomi_ST0_FT0(void)
3723 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3724 eflags
= helper_cc_compute_all(CC_OP
);
3725 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3729 void helper_fadd_ST0_FT0(void)
3731 ST0
= floatx80_add(ST0
, FT0
, &env
->fp_status
);
3734 void helper_fmul_ST0_FT0(void)
3736 ST0
= floatx80_mul(ST0
, FT0
, &env
->fp_status
);
3739 void helper_fsub_ST0_FT0(void)
3741 ST0
= floatx80_sub(ST0
, FT0
, &env
->fp_status
);
3744 void helper_fsubr_ST0_FT0(void)
3746 ST0
= floatx80_sub(FT0
, ST0
, &env
->fp_status
);
3749 void helper_fdiv_ST0_FT0(void)
3751 ST0
= helper_fdiv(ST0
, FT0
);
3754 void helper_fdivr_ST0_FT0(void)
3756 ST0
= helper_fdiv(FT0
, ST0
);
3759 /* fp operations between STN and ST0 */
3761 void helper_fadd_STN_ST0(int st_index
)
3763 ST(st_index
) = floatx80_add(ST(st_index
), ST0
, &env
->fp_status
);
3766 void helper_fmul_STN_ST0(int st_index
)
3768 ST(st_index
) = floatx80_mul(ST(st_index
), ST0
, &env
->fp_status
);
3771 void helper_fsub_STN_ST0(int st_index
)
3773 ST(st_index
) = floatx80_sub(ST(st_index
), ST0
, &env
->fp_status
);
3776 void helper_fsubr_STN_ST0(int st_index
)
3778 ST(st_index
) = floatx80_sub(ST0
, ST(st_index
), &env
->fp_status
);
3781 void helper_fdiv_STN_ST0(int st_index
)
3785 *p
= helper_fdiv(*p
, ST0
);
3788 void helper_fdivr_STN_ST0(int st_index
)
3792 *p
= helper_fdiv(ST0
, *p
);
3795 /* misc FPU operations */
3796 void helper_fchs_ST0(void)
3798 ST0
= floatx80_chs(ST0
);
3801 void helper_fabs_ST0(void)
3803 ST0
= floatx80_abs(ST0
);
3806 void helper_fld1_ST0(void)
3811 void helper_fldl2t_ST0(void)
3816 void helper_fldl2e_ST0(void)
3821 void helper_fldpi_ST0(void)
3826 void helper_fldlg2_ST0(void)
3831 void helper_fldln2_ST0(void)
3836 void helper_fldz_ST0(void)
3838 ST0
= floatx80_zero
;
3841 void helper_fldz_FT0(void)
3843 FT0
= floatx80_zero
;
3846 uint32_t helper_fnstsw(void)
3848 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3851 uint32_t helper_fnstcw(void)
3856 static void update_fp_status(void)
3860 /* set rounding mode */
3861 switch(env
->fpuc
& RC_MASK
) {
3864 rnd_type
= float_round_nearest_even
;
3867 rnd_type
= float_round_down
;
3870 rnd_type
= float_round_up
;
3873 rnd_type
= float_round_to_zero
;
3876 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3877 switch((env
->fpuc
>> 8) & 3) {
3889 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3892 void helper_fldcw(uint32_t val
)
3898 void helper_fclex(void)
3900 env
->fpus
&= 0x7f00;
3903 void helper_fwait(void)
3905 if (env
->fpus
& FPUS_SE
)
3906 fpu_raise_exception();
3909 void helper_fninit(void)
3926 void helper_fbld_ST0(target_ulong ptr
)
3934 for(i
= 8; i
>= 0; i
--) {
3936 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3938 tmp
= int64_to_floatx80(val
, &env
->fp_status
);
3939 if (ldub(ptr
+ 9) & 0x80) {
3946 void helper_fbst_ST0(target_ulong ptr
)
3949 target_ulong mem_ref
, mem_end
;
3952 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3954 mem_end
= mem_ref
+ 9;
3961 while (mem_ref
< mem_end
) {
3966 v
= ((v
/ 10) << 4) | (v
% 10);
3969 while (mem_ref
< mem_end
) {
3974 void helper_f2xm1(void)
3976 double val
= floatx80_to_double(ST0
);
3977 val
= pow(2.0, val
) - 1.0;
3978 ST0
= double_to_floatx80(val
);
3981 void helper_fyl2x(void)
3983 double fptemp
= floatx80_to_double(ST0
);
3986 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3987 fptemp
*= floatx80_to_double(ST1
);
3988 ST1
= double_to_floatx80(fptemp
);
3991 env
->fpus
&= (~0x4700);
3996 void helper_fptan(void)
3998 double fptemp
= floatx80_to_double(ST0
);
4000 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4003 fptemp
= tan(fptemp
);
4004 ST0
= double_to_floatx80(fptemp
);
4007 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4008 /* the above code is for |arg| < 2**52 only */
4012 void helper_fpatan(void)
4014 double fptemp
, fpsrcop
;
4016 fpsrcop
= floatx80_to_double(ST1
);
4017 fptemp
= floatx80_to_double(ST0
);
4018 ST1
= double_to_floatx80(atan2(fpsrcop
, fptemp
));
4022 void helper_fxtract(void)
4028 if (floatx80_is_zero(ST0
)) {
4029 /* Easy way to generate -inf and raising division by 0 exception */
4030 ST0
= floatx80_div(floatx80_chs(floatx80_one
), floatx80_zero
, &env
->fp_status
);
4036 expdif
= EXPD(temp
) - EXPBIAS
;
4037 /*DP exponent bias*/
4038 ST0
= int32_to_floatx80(expdif
, &env
->fp_status
);
4045 void helper_fprem1(void)
4047 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4048 CPU_LDoubleU fpsrcop1
, fptemp1
;
4050 signed long long int q
;
4052 st0
= floatx80_to_double(ST0
);
4053 st1
= floatx80_to_double(ST1
);
4055 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4056 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4057 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4065 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4068 /* optimisation? taken from the AMD docs */
4069 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070 /* ST0 is unchanged */
4075 dblq
= fpsrcop
/ fptemp
;
4076 /* round dblq towards nearest integer */
4078 st0
= fpsrcop
- fptemp
* dblq
;
4080 /* convert dblq to q by truncating towards zero */
4082 q
= (signed long long int)(-dblq
);
4084 q
= (signed long long int)dblq
;
4086 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4087 /* (C0,C3,C1) <-- (q2,q1,q0) */
4088 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4089 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4090 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4092 env
->fpus
|= 0x400; /* C2 <-- 1 */
4093 fptemp
= pow(2.0, expdif
- 50);
4094 fpsrcop
= (st0
/ st1
) / fptemp
;
4095 /* fpsrcop = integer obtained by chopping */
4096 fpsrcop
= (fpsrcop
< 0.0) ?
4097 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4098 st0
-= (st1
* fpsrcop
* fptemp
);
4100 ST0
= double_to_floatx80(st0
);
4103 void helper_fprem(void)
4105 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4106 CPU_LDoubleU fpsrcop1
, fptemp1
;
4108 signed long long int q
;
4110 st0
= floatx80_to_double(ST0
);
4111 st1
= floatx80_to_double(ST1
);
4113 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4114 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4115 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4123 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4126 /* optimisation? taken from the AMD docs */
4127 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4128 /* ST0 is unchanged */
4132 if ( expdif
< 53 ) {
4133 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4134 /* round dblq towards zero */
4135 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4136 st0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4138 /* convert dblq to q by truncating towards zero */
4140 q
= (signed long long int)(-dblq
);
4142 q
= (signed long long int)dblq
;
4144 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4145 /* (C0,C3,C1) <-- (q2,q1,q0) */
4146 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4147 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4148 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4150 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4151 env
->fpus
|= 0x400; /* C2 <-- 1 */
4152 fptemp
= pow(2.0, (double)(expdif
- N
));
4153 fpsrcop
= (st0
/ st1
) / fptemp
;
4154 /* fpsrcop = integer obtained by chopping */
4155 fpsrcop
= (fpsrcop
< 0.0) ?
4156 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4157 st0
-= (st1
* fpsrcop
* fptemp
);
4159 ST0
= double_to_floatx80(st0
);
4162 void helper_fyl2xp1(void)
4164 double fptemp
= floatx80_to_double(ST0
);
4166 if ((fptemp
+1.0)>0.0) {
4167 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4168 fptemp
*= floatx80_to_double(ST1
);
4169 ST1
= double_to_floatx80(fptemp
);
4172 env
->fpus
&= (~0x4700);
4177 void helper_fsqrt(void)
4179 if (floatx80_is_neg(ST0
)) {
4180 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4183 ST0
= floatx80_sqrt(ST0
, &env
->fp_status
);
4186 void helper_fsincos(void)
4188 double fptemp
= floatx80_to_double(ST0
);
4190 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4193 ST0
= double_to_floatx80(sin(fptemp
));
4195 ST0
= double_to_floatx80(cos(fptemp
));
4196 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4197 /* the above code is for |arg| < 2**63 only */
4201 void helper_frndint(void)
4203 ST0
= floatx80_round_to_int(ST0
, &env
->fp_status
);
4206 void helper_fscale(void)
4208 if (floatx80_is_any_nan(ST1
)) {
4211 int n
= floatx80_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4212 ST0
= floatx80_scalbn(ST0
, n
, &env
->fp_status
);
4216 void helper_fsin(void)
4218 double fptemp
= floatx80_to_double(ST0
);
4220 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4223 ST0
= double_to_floatx80(sin(fptemp
));
4224 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4225 /* the above code is for |arg| < 2**53 only */
4229 void helper_fcos(void)
4231 double fptemp
= floatx80_to_double(ST0
);
4233 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4236 ST0
= double_to_floatx80(cos(fptemp
));
4237 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4238 /* the above code is for |arg5 < 2**63 only */
4242 void helper_fxam_ST0(void)
4249 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4251 env
->fpus
|= 0x200; /* C1 <-- 1 */
4253 /* XXX: test fptags too */
4254 expdif
= EXPD(temp
);
4255 if (expdif
== MAXEXPD
) {
4256 if (MANTD(temp
) == 0x8000000000000000ULL
)
4257 env
->fpus
|= 0x500 /*Infinity*/;
4259 env
->fpus
|= 0x100 /*NaN*/;
4260 } else if (expdif
== 0) {
4261 if (MANTD(temp
) == 0)
4262 env
->fpus
|= 0x4000 /*Zero*/;
4264 env
->fpus
|= 0x4400 /*Denormal*/;
4270 void helper_fstenv(target_ulong ptr
, int data32
)
4272 int fpus
, fptag
, exp
, i
;
4276 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4278 for (i
=7; i
>=0; i
--) {
4280 if (env
->fptags
[i
]) {
4283 tmp
.d
= env
->fpregs
[i
].d
;
4286 if (exp
== 0 && mant
== 0) {
4289 } else if (exp
== 0 || exp
== MAXEXPD
4290 || (mant
& (1LL << 63)) == 0
4292 /* NaNs, infinity, denormal */
4299 stl(ptr
, env
->fpuc
);
4301 stl(ptr
+ 8, fptag
);
4302 stl(ptr
+ 12, 0); /* fpip */
4303 stl(ptr
+ 16, 0); /* fpcs */
4304 stl(ptr
+ 20, 0); /* fpoo */
4305 stl(ptr
+ 24, 0); /* fpos */
4308 stw(ptr
, env
->fpuc
);
4310 stw(ptr
+ 4, fptag
);
4318 void helper_fldenv(target_ulong ptr
, int data32
)
4323 env
->fpuc
= lduw(ptr
);
4324 fpus
= lduw(ptr
+ 4);
4325 fptag
= lduw(ptr
+ 8);
4328 env
->fpuc
= lduw(ptr
);
4329 fpus
= lduw(ptr
+ 2);
4330 fptag
= lduw(ptr
+ 4);
4332 env
->fpstt
= (fpus
>> 11) & 7;
4333 env
->fpus
= fpus
& ~0x3800;
4334 for(i
= 0;i
< 8; i
++) {
4335 env
->fptags
[i
] = ((fptag
& 3) == 3);
4340 void helper_fsave(target_ulong ptr
, int data32
)
4345 helper_fstenv(ptr
, data32
);
4347 ptr
+= (14 << data32
);
4348 for(i
= 0;i
< 8; i
++) {
4350 helper_fstt(tmp
, ptr
);
4368 void helper_frstor(target_ulong ptr
, int data32
)
4373 helper_fldenv(ptr
, data32
);
4374 ptr
+= (14 << data32
);
4376 for(i
= 0;i
< 8; i
++) {
4377 tmp
= helper_fldt(ptr
);
4383 void helper_fxsave(target_ulong ptr
, int data64
)
4385 int fpus
, fptag
, i
, nb_xmm_regs
;
4389 /* The operand must be 16 byte aligned */
4391 raise_exception(EXCP0D_GPF
);
4394 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4396 for(i
= 0; i
< 8; i
++) {
4397 fptag
|= (env
->fptags
[i
] << i
);
4399 stw(ptr
, env
->fpuc
);
4401 stw(ptr
+ 4, fptag
^ 0xff);
4402 #ifdef TARGET_X86_64
4404 stq(ptr
+ 0x08, 0); /* rip */
4405 stq(ptr
+ 0x10, 0); /* rdp */
4409 stl(ptr
+ 0x08, 0); /* eip */
4410 stl(ptr
+ 0x0c, 0); /* sel */
4411 stl(ptr
+ 0x10, 0); /* dp */
4412 stl(ptr
+ 0x14, 0); /* sel */
4416 for(i
= 0;i
< 8; i
++) {
4418 helper_fstt(tmp
, addr
);
4422 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4423 /* XXX: finish it */
4424 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4425 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4426 if (env
->hflags
& HF_CS64_MASK
)
4431 /* Fast FXSAVE leaves out the XMM registers */
4432 if (!(env
->efer
& MSR_EFER_FFXSR
)
4433 || (env
->hflags
& HF_CPL_MASK
)
4434 || !(env
->hflags
& HF_LMA_MASK
)) {
4435 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4436 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4437 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4444 void helper_fxrstor(target_ulong ptr
, int data64
)
4446 int i
, fpus
, fptag
, nb_xmm_regs
;
4450 /* The operand must be 16 byte aligned */
4452 raise_exception(EXCP0D_GPF
);
4455 env
->fpuc
= lduw(ptr
);
4456 fpus
= lduw(ptr
+ 2);
4457 fptag
= lduw(ptr
+ 4);
4458 env
->fpstt
= (fpus
>> 11) & 7;
4459 env
->fpus
= fpus
& ~0x3800;
4461 for(i
= 0;i
< 8; i
++) {
4462 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4466 for(i
= 0;i
< 8; i
++) {
4467 tmp
= helper_fldt(addr
);
4472 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4473 /* XXX: finish it */
4474 env
->mxcsr
= ldl(ptr
+ 0x18);
4476 if (env
->hflags
& HF_CS64_MASK
)
4481 /* Fast FXRESTORE leaves out the XMM registers */
4482 if (!(env
->efer
& MSR_EFER_FFXSR
)
4483 || (env
->hflags
& HF_CPL_MASK
)
4484 || !(env
->hflags
& HF_LMA_MASK
)) {
4485 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4486 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4487 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4494 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, floatx80 f
)
4499 *pmant
= temp
.l
.lower
;
4500 *pexp
= temp
.l
.upper
;
4503 floatx80
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4507 temp
.l
.upper
= upper
;
4508 temp
.l
.lower
= mant
;
4512 #ifdef TARGET_X86_64
4514 //#define DEBUG_MULDIV
4516 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4525 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4529 add128(plow
, phigh
, 1, 0);
4532 /* return TRUE if overflow */
4533 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4535 uint64_t q
, r
, a1
, a0
;
4548 /* XXX: use a better algorithm */
4549 for(i
= 0; i
< 64; i
++) {
4551 a1
= (a1
<< 1) | (a0
>> 63);
4552 if (ab
|| a1
>= b
) {
4558 a0
= (a0
<< 1) | qb
;
4560 #if defined(DEBUG_MULDIV)
4561 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4562 *phigh
, *plow
, b
, a0
, a1
);
4570 /* return TRUE if overflow */
4571 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4574 sa
= ((int64_t)*phigh
< 0);
4576 neg128(plow
, phigh
);
4580 if (div64(plow
, phigh
, b
) != 0)
4583 if (*plow
> (1ULL << 63))
4587 if (*plow
>= (1ULL << 63))
4595 void helper_mulq_EAX_T0(target_ulong t0
)
4599 mulu64(&r0
, &r1
, EAX
, t0
);
4606 void helper_imulq_EAX_T0(target_ulong t0
)
4610 muls64(&r0
, &r1
, EAX
, t0
);
4614 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4617 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4621 muls64(&r0
, &r1
, t0
, t1
);
4623 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4627 void helper_divq_EAX(target_ulong t0
)
4631 raise_exception(EXCP00_DIVZ
);
4635 if (div64(&r0
, &r1
, t0
))
4636 raise_exception(EXCP00_DIVZ
);
4641 void helper_idivq_EAX(target_ulong t0
)
4645 raise_exception(EXCP00_DIVZ
);
4649 if (idiv64(&r0
, &r1
, t0
))
4650 raise_exception(EXCP00_DIVZ
);
4656 static void do_hlt(void)
4658 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4660 env
->exception_index
= EXCP_HLT
;
4664 void helper_hlt(int next_eip_addend
)
4666 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4667 EIP
+= next_eip_addend
;
4672 void helper_monitor(target_ulong ptr
)
4674 if ((uint32_t)ECX
!= 0)
4675 raise_exception(EXCP0D_GPF
);
4676 /* XXX: store address ? */
4677 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4680 void helper_mwait(int next_eip_addend
)
4682 if ((uint32_t)ECX
!= 0)
4683 raise_exception(EXCP0D_GPF
);
4684 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4685 EIP
+= next_eip_addend
;
4687 /* XXX: not complete but not completely erroneous */
4688 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4689 /* more than one CPU: do not sleep because another CPU may
4696 void helper_debug(void)
4698 env
->exception_index
= EXCP_DEBUG
;
4702 void helper_reset_rf(void)
4704 env
->eflags
&= ~RF_MASK
;
4707 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4709 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4712 void helper_raise_exception(int exception_index
)
4714 raise_exception(exception_index
);
4717 void helper_cli(void)
4719 env
->eflags
&= ~IF_MASK
;
4722 void helper_sti(void)
4724 env
->eflags
|= IF_MASK
;
4728 /* vm86plus instructions */
4729 void helper_cli_vm(void)
4731 env
->eflags
&= ~VIF_MASK
;
4734 void helper_sti_vm(void)
4736 env
->eflags
|= VIF_MASK
;
4737 if (env
->eflags
& VIP_MASK
) {
4738 raise_exception(EXCP0D_GPF
);
4743 void helper_set_inhibit_irq(void)
4745 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4748 void helper_reset_inhibit_irq(void)
4750 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4753 void helper_boundw(target_ulong a0
, int v
)
4757 high
= ldsw(a0
+ 2);
4759 if (v
< low
|| v
> high
) {
4760 raise_exception(EXCP05_BOUND
);
4764 void helper_boundl(target_ulong a0
, int v
)
4769 if (v
< low
|| v
> high
) {
4770 raise_exception(EXCP05_BOUND
);
4774 #if !defined(CONFIG_USER_ONLY)
4776 #define MMUSUFFIX _mmu
4779 #include "softmmu_template.h"
4782 #include "softmmu_template.h"
4785 #include "softmmu_template.h"
4788 #include "softmmu_template.h"
4792 #if !defined(CONFIG_USER_ONLY)
4793 /* try to fill the TLB and return an exception if error. If retaddr is
4794 NULL, it means that the function was called in C code (i.e. not
4795 from generated code or from helper.c) */
4796 /* XXX: fix it to restore all registers */
4797 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4799 TranslationBlock
*tb
;
4802 CPUX86State
*saved_env
;
4804 /* XXX: hack to restore env in all cases, even if not called from
4807 env
= cpu_single_env
;
4809 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4812 /* now we have a real cpu fault */
4813 pc
= (unsigned long)retaddr
;
4814 tb
= tb_find_pc(pc
);
4816 /* the PC is inside the translated code. It means that we have
4817 a virtual CPU fault */
4818 cpu_restore_state(tb
, env
, pc
);
4821 raise_exception_err(env
->exception_index
, env
->error_code
);
4827 /* Secure Virtual Machine helpers */
4829 #if defined(CONFIG_USER_ONLY)
4831 void helper_vmrun(int aflag
, int next_eip_addend
)
4834 void helper_vmmcall(void)
4837 void helper_vmload(int aflag
)
4840 void helper_vmsave(int aflag
)
4843 void helper_stgi(void)
4846 void helper_clgi(void)
4849 void helper_skinit(void)
4852 void helper_invlpga(int aflag
)
4855 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4858 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4862 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4863 uint32_t next_eip_addend
)
4868 static inline void svm_save_seg(target_phys_addr_t addr
,
4869 const SegmentCache
*sc
)
4871 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4873 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4875 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4877 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4878 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4881 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4885 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4886 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4887 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4888 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4889 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4892 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4893 CPUState
*env
, int seg_reg
)
4895 SegmentCache sc1
, *sc
= &sc1
;
4896 svm_load_seg(addr
, sc
);
4897 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4898 sc
->base
, sc
->limit
, sc
->flags
);
4901 void helper_vmrun(int aflag
, int next_eip_addend
)
4907 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4912 addr
= (uint32_t)EAX
;
4914 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4916 env
->vm_vmcb
= addr
;
4918 /* save the current CPU state in the hsave page */
4919 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4920 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4922 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4923 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4925 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4926 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4927 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4928 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4929 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4930 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4932 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4933 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4935 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4937 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4939 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4941 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4944 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4945 EIP
+ next_eip_addend
);
4946 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4947 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4949 /* load the interception bitmaps so we do not need to access the
4951 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4952 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4953 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4954 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4955 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4956 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4958 /* enable intercepts */
4959 env
->hflags
|= HF_SVMI_MASK
;
4961 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4963 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4964 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4966 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4967 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4969 /* clear exit_info_2 so we behave like the real hardware */
4970 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4972 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4973 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4974 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4975 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4976 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4977 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4978 if (int_ctl
& V_INTR_MASKING_MASK
) {
4979 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4980 env
->hflags2
|= HF2_VINTR_MASK
;
4981 if (env
->eflags
& IF_MASK
)
4982 env
->hflags2
|= HF2_HIF_MASK
;
4986 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4988 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4989 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4990 CC_OP
= CC_OP_EFLAGS
;
4992 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4994 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4996 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4998 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5001 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5003 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5004 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5005 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5006 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5007 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5009 /* FIXME: guest state consistency checks */
5011 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5012 case TLB_CONTROL_DO_NOTHING
:
5014 case TLB_CONTROL_FLUSH_ALL_ASID
:
5015 /* FIXME: this is not 100% correct but should work for now */
5020 env
->hflags2
|= HF2_GIF_MASK
;
5022 if (int_ctl
& V_IRQ_MASK
) {
5023 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5026 /* maybe we need to inject an event */
5027 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5028 if (event_inj
& SVM_EVTINJ_VALID
) {
5029 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5030 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5031 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5033 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5034 /* FIXME: need to implement valid_err */
5035 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5036 case SVM_EVTINJ_TYPE_INTR
:
5037 env
->exception_index
= vector
;
5038 env
->error_code
= event_inj_err
;
5039 env
->exception_is_int
= 0;
5040 env
->exception_next_eip
= -1;
5041 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5042 /* XXX: is it always correct ? */
5043 do_interrupt(vector
, 0, 0, 0, 1);
5045 case SVM_EVTINJ_TYPE_NMI
:
5046 env
->exception_index
= EXCP02_NMI
;
5047 env
->error_code
= event_inj_err
;
5048 env
->exception_is_int
= 0;
5049 env
->exception_next_eip
= EIP
;
5050 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5053 case SVM_EVTINJ_TYPE_EXEPT
:
5054 env
->exception_index
= vector
;
5055 env
->error_code
= event_inj_err
;
5056 env
->exception_is_int
= 0;
5057 env
->exception_next_eip
= -1;
5058 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5061 case SVM_EVTINJ_TYPE_SOFT
:
5062 env
->exception_index
= vector
;
5063 env
->error_code
= event_inj_err
;
5064 env
->exception_is_int
= 1;
5065 env
->exception_next_eip
= EIP
;
5066 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5070 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5074 void helper_vmmcall(void)
5076 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5077 raise_exception(EXCP06_ILLOP
);
5080 void helper_vmload(int aflag
)
5083 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5088 addr
= (uint32_t)EAX
;
5090 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5091 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5092 env
->segs
[R_FS
].base
);
5094 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5096 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5098 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5100 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5103 #ifdef TARGET_X86_64
5104 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5105 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5106 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5107 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5109 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5110 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5111 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5112 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5115 void helper_vmsave(int aflag
)
5118 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5123 addr
= (uint32_t)EAX
;
5125 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5126 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5127 env
->segs
[R_FS
].base
);
5129 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5131 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5133 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5135 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5138 #ifdef TARGET_X86_64
5139 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5140 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5141 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5142 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5144 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5145 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5146 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5147 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5150 void helper_stgi(void)
5152 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5153 env
->hflags2
|= HF2_GIF_MASK
;
5156 void helper_clgi(void)
5158 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5159 env
->hflags2
&= ~HF2_GIF_MASK
;
5162 void helper_skinit(void)
5164 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5165 /* XXX: not implemented */
5166 raise_exception(EXCP06_ILLOP
);
5169 void helper_invlpga(int aflag
)
5172 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5177 addr
= (uint32_t)EAX
;
5179 /* XXX: could use the ASID to see if it is needed to do the
5181 tlb_flush_page(env
, addr
);
5184 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5186 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5189 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5190 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5191 helper_vmexit(type
, param
);
5194 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5195 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5196 helper_vmexit(type
, param
);
5199 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5200 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5201 helper_vmexit(type
, param
);
5204 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5205 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5206 helper_vmexit(type
, param
);
5209 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5210 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5211 helper_vmexit(type
, param
);
5215 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5216 /* FIXME: this should be read in at vmrun (faster this way?) */
5217 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5219 switch((uint32_t)ECX
) {
5224 case 0xc0000000 ... 0xc0001fff:
5225 t0
= (8192 + ECX
- 0xc0000000) * 2;
5229 case 0xc0010000 ... 0xc0011fff:
5230 t0
= (16384 + ECX
- 0xc0010000) * 2;
5235 helper_vmexit(type
, param
);
5240 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5241 helper_vmexit(type
, param
);
5245 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5246 helper_vmexit(type
, param
);
5252 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5253 uint32_t next_eip_addend
)
5255 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5256 /* FIXME: this should be read in at vmrun (faster this way?) */
5257 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5258 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5259 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5261 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5262 env
->eip
+ next_eip_addend
);
5263 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5268 /* Note: currently only 32 bits of exit_code are used */
5269 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5273 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5274 exit_code
, exit_info_1
,
5275 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5278 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5279 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5280 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5282 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5285 /* Save the VM state in the vmcb */
5286 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5288 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5290 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5292 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5295 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5296 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5298 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5299 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5301 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5302 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5303 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5304 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5305 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5307 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5308 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5309 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5310 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5311 int_ctl
|= V_IRQ_MASK
;
5312 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5314 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5315 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5316 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5317 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5318 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5319 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5320 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5322 /* Reload the host state from vm_hsave */
5323 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5324 env
->hflags
&= ~HF_SVMI_MASK
;
5326 env
->intercept_exceptions
= 0;
5327 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5328 env
->tsc_offset
= 0;
5330 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5331 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5333 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5334 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5336 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5337 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5338 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5339 /* we need to set the efer after the crs so the hidden flags get
5342 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5344 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5345 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5346 CC_OP
= CC_OP_EFLAGS
;
5348 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5350 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5352 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5354 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5357 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5358 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5359 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5361 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5362 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5365 cpu_x86_set_cpl(env
, 0);
5366 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5367 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5369 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5370 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5371 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5372 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5373 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5375 env
->hflags2
&= ~HF2_GIF_MASK
;
5376 /* FIXME: Resets the current ASID register to zero (host ASID). */
5378 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5380 /* Clears the TSC_OFFSET inside the processor. */
5382 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5383 from the page table indicated the host's CR3. If the PDPEs contain
5384 illegal state, the processor causes a shutdown. */
5386 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5387 env
->cr
[0] |= CR0_PE_MASK
;
5388 env
->eflags
&= ~VM_MASK
;
5390 /* Disables all breakpoints in the host DR7 register. */
5392 /* Checks the reloaded host state for consistency. */
5394 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5395 host's code segment or non-canonical (in the case of long mode), a
5396 #GP fault is delivered inside the host.) */
5398 /* remove any pending exception */
5399 env
->exception_index
= -1;
5400 env
->error_code
= 0;
5401 env
->old_exception
= -1;
5409 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5410 void helper_enter_mmx(void)
5413 *(uint32_t *)(env
->fptags
) = 0;
5414 *(uint32_t *)(env
->fptags
+ 4) = 0;
5417 void helper_emms(void)
5419 /* set to empty state */
5420 *(uint32_t *)(env
->fptags
) = 0x01010101;
5421 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5425 void helper_movq(void *d
, void *s
)
5427 *(uint64_t *)d
= *(uint64_t *)s
;
5431 #include "ops_sse.h"
5434 #include "ops_sse.h"
5437 #include "helper_template.h"
5441 #include "helper_template.h"
5445 #include "helper_template.h"
5448 #ifdef TARGET_X86_64
5451 #include "helper_template.h"
5456 /* bit operations */
5457 target_ulong
helper_bsf(target_ulong t0
)
5464 while ((res
& 1) == 0) {
5471 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5474 target_ulong res
, mask
;
5476 if (wordsize
> 0 && t0
== 0) {
5480 count
= TARGET_LONG_BITS
- 1;
5481 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5482 while ((res
& mask
) == 0) {
5487 return wordsize
- 1 - count
;
5492 target_ulong
helper_bsr(target_ulong t0
)
5494 return helper_lzcnt(t0
, 0);
5497 static int compute_all_eflags(void)
5502 static int compute_c_eflags(void)
5504 return CC_SRC
& CC_C
;
5507 uint32_t helper_cc_compute_all(int op
)
5510 default: /* should never happen */ return 0;
5512 case CC_OP_EFLAGS
: return compute_all_eflags();
5514 case CC_OP_MULB
: return compute_all_mulb();
5515 case CC_OP_MULW
: return compute_all_mulw();
5516 case CC_OP_MULL
: return compute_all_mull();
5518 case CC_OP_ADDB
: return compute_all_addb();
5519 case CC_OP_ADDW
: return compute_all_addw();
5520 case CC_OP_ADDL
: return compute_all_addl();
5522 case CC_OP_ADCB
: return compute_all_adcb();
5523 case CC_OP_ADCW
: return compute_all_adcw();
5524 case CC_OP_ADCL
: return compute_all_adcl();
5526 case CC_OP_SUBB
: return compute_all_subb();
5527 case CC_OP_SUBW
: return compute_all_subw();
5528 case CC_OP_SUBL
: return compute_all_subl();
5530 case CC_OP_SBBB
: return compute_all_sbbb();
5531 case CC_OP_SBBW
: return compute_all_sbbw();
5532 case CC_OP_SBBL
: return compute_all_sbbl();
5534 case CC_OP_LOGICB
: return compute_all_logicb();
5535 case CC_OP_LOGICW
: return compute_all_logicw();
5536 case CC_OP_LOGICL
: return compute_all_logicl();
5538 case CC_OP_INCB
: return compute_all_incb();
5539 case CC_OP_INCW
: return compute_all_incw();
5540 case CC_OP_INCL
: return compute_all_incl();
5542 case CC_OP_DECB
: return compute_all_decb();
5543 case CC_OP_DECW
: return compute_all_decw();
5544 case CC_OP_DECL
: return compute_all_decl();
5546 case CC_OP_SHLB
: return compute_all_shlb();
5547 case CC_OP_SHLW
: return compute_all_shlw();
5548 case CC_OP_SHLL
: return compute_all_shll();
5550 case CC_OP_SARB
: return compute_all_sarb();
5551 case CC_OP_SARW
: return compute_all_sarw();
5552 case CC_OP_SARL
: return compute_all_sarl();
5554 #ifdef TARGET_X86_64
5555 case CC_OP_MULQ
: return compute_all_mulq();
5557 case CC_OP_ADDQ
: return compute_all_addq();
5559 case CC_OP_ADCQ
: return compute_all_adcq();
5561 case CC_OP_SUBQ
: return compute_all_subq();
5563 case CC_OP_SBBQ
: return compute_all_sbbq();
5565 case CC_OP_LOGICQ
: return compute_all_logicq();
5567 case CC_OP_INCQ
: return compute_all_incq();
5569 case CC_OP_DECQ
: return compute_all_decq();
5571 case CC_OP_SHLQ
: return compute_all_shlq();
5573 case CC_OP_SARQ
: return compute_all_sarq();
5578 uint32_t helper_cc_compute_c(int op
)
5581 default: /* should never happen */ return 0;
5583 case CC_OP_EFLAGS
: return compute_c_eflags();
5585 case CC_OP_MULB
: return compute_c_mull();
5586 case CC_OP_MULW
: return compute_c_mull();
5587 case CC_OP_MULL
: return compute_c_mull();
5589 case CC_OP_ADDB
: return compute_c_addb();
5590 case CC_OP_ADDW
: return compute_c_addw();
5591 case CC_OP_ADDL
: return compute_c_addl();
5593 case CC_OP_ADCB
: return compute_c_adcb();
5594 case CC_OP_ADCW
: return compute_c_adcw();
5595 case CC_OP_ADCL
: return compute_c_adcl();
5597 case CC_OP_SUBB
: return compute_c_subb();
5598 case CC_OP_SUBW
: return compute_c_subw();
5599 case CC_OP_SUBL
: return compute_c_subl();
5601 case CC_OP_SBBB
: return compute_c_sbbb();
5602 case CC_OP_SBBW
: return compute_c_sbbw();
5603 case CC_OP_SBBL
: return compute_c_sbbl();
5605 case CC_OP_LOGICB
: return compute_c_logicb();
5606 case CC_OP_LOGICW
: return compute_c_logicw();
5607 case CC_OP_LOGICL
: return compute_c_logicl();
5609 case CC_OP_INCB
: return compute_c_incl();
5610 case CC_OP_INCW
: return compute_c_incl();
5611 case CC_OP_INCL
: return compute_c_incl();
5613 case CC_OP_DECB
: return compute_c_incl();
5614 case CC_OP_DECW
: return compute_c_incl();
5615 case CC_OP_DECL
: return compute_c_incl();
5617 case CC_OP_SHLB
: return compute_c_shlb();
5618 case CC_OP_SHLW
: return compute_c_shlw();
5619 case CC_OP_SHLL
: return compute_c_shll();
5621 case CC_OP_SARB
: return compute_c_sarl();
5622 case CC_OP_SARW
: return compute_c_sarl();
5623 case CC_OP_SARL
: return compute_c_sarl();
5625 #ifdef TARGET_X86_64
5626 case CC_OP_MULQ
: return compute_c_mull();
5628 case CC_OP_ADDQ
: return compute_c_addq();
5630 case CC_OP_ADCQ
: return compute_c_adcq();
5632 case CC_OP_SUBQ
: return compute_c_subq();
5634 case CC_OP_SBBQ
: return compute_c_sbbq();
5636 case CC_OP_LOGICQ
: return compute_c_logicq();
5638 case CC_OP_INCQ
: return compute_c_incl();
5640 case CC_OP_DECQ
: return compute_c_incl();
5642 case CC_OP_SHLQ
: return compute_c_shlq();
5644 case CC_OP_SARQ
: return compute_c_sarl();