2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
38 static bool kvmppc_check_fp_disabled(struct kvm_vcpu
*vcpu
)
40 if (!(kvmppc_get_msr(vcpu
) & MSR_FP
)) {
41 kvmppc_core_queue_fpunavail(vcpu
);
47 #endif /* CONFIG_PPC_FPU */
50 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu
*vcpu
)
52 if (!(kvmppc_get_msr(vcpu
) & MSR_VSX
)) {
53 kvmppc_core_queue_vsx_unavail(vcpu
);
59 #endif /* CONFIG_VSX */
62 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu
*vcpu
)
64 if (!(kvmppc_get_msr(vcpu
) & MSR_VEC
)) {
65 kvmppc_core_queue_vec_unavail(vcpu
);
71 #endif /* CONFIG_ALTIVEC */
76 * vector loads and stores
78 * Instructions that trap when used on cache-inhibited mappings
79 * are not emulated here: multiple and string instructions,
80 * lq/stq, and the load-reserve/store-conditional instructions.
82 int kvmppc_emulate_loadstore(struct kvm_vcpu
*vcpu
)
84 struct kvm_run
*run
= vcpu
->run
;
87 enum emulation_result emulated
;
90 /* this default type might be overwritten by subcategories */
91 kvmppc_set_exit_type(vcpu
, EMULATED_INST_EXITS
);
93 emulated
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &inst
);
94 if (emulated
!= EMULATE_DONE
)
102 * if mmio_vsx_tx_sx_enabled == 0, copy data between
103 * VSR[0..31] and memory
104 * if mmio_vsx_tx_sx_enabled == 1, copy data between
105 * VSR[32..63] and memory
107 vcpu
->arch
.mmio_vsx_tx_sx_enabled
= get_tx_or_sx(inst
);
108 vcpu
->arch
.mmio_vsx_copy_nums
= 0;
109 vcpu
->arch
.mmio_vsx_offset
= 0;
110 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_NONE
;
111 vcpu
->arch
.mmio_sp64_extend
= 0;
112 vcpu
->arch
.mmio_sign_extend
= 0;
113 vcpu
->arch
.mmio_vmx_copy_nums
= 0;
115 switch (get_op(inst
)) {
117 switch (get_xop(inst
)) {
119 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
122 case OP_31_XOP_LWZUX
:
123 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
124 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
128 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
131 case OP_31_XOP_LBZUX
:
132 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
133 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
137 emulated
= kvmppc_handle_store(run
, vcpu
,
138 kvmppc_get_gpr(vcpu
, rs
), 8, 1);
141 case OP_31_XOP_STDUX
:
142 emulated
= kvmppc_handle_store(run
, vcpu
,
143 kvmppc_get_gpr(vcpu
, rs
), 8, 1);
144 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
148 emulated
= kvmppc_handle_store(run
, vcpu
,
149 kvmppc_get_gpr(vcpu
, rs
), 4, 1);
152 case OP_31_XOP_STWUX
:
153 emulated
= kvmppc_handle_store(run
, vcpu
,
154 kvmppc_get_gpr(vcpu
, rs
), 4, 1);
155 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
159 emulated
= kvmppc_handle_store(run
, vcpu
,
160 kvmppc_get_gpr(vcpu
, rs
), 1, 1);
163 case OP_31_XOP_STBUX
:
164 emulated
= kvmppc_handle_store(run
, vcpu
,
165 kvmppc_get_gpr(vcpu
, rs
), 1, 1);
166 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
170 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 2, 1);
173 case OP_31_XOP_LHAUX
:
174 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 2, 1);
175 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
179 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
182 case OP_31_XOP_LHZUX
:
183 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
184 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
188 emulated
= kvmppc_handle_store(run
, vcpu
,
189 kvmppc_get_gpr(vcpu
, rs
), 2, 1);
192 case OP_31_XOP_STHUX
:
193 emulated
= kvmppc_handle_store(run
, vcpu
,
194 kvmppc_get_gpr(vcpu
, rs
), 2, 1);
195 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
198 case OP_31_XOP_DCBST
:
201 /* Do nothing. The guest is performing dcbi because
202 * hardware DMA is not snooped by the dcache, but
203 * emulated DMA either goes through the dcache as
204 * normal writes, or the host kernel has handled dcache
208 case OP_31_XOP_LWBRX
:
209 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 0);
212 case OP_31_XOP_STWBRX
:
213 emulated
= kvmppc_handle_store(run
, vcpu
,
214 kvmppc_get_gpr(vcpu
, rs
), 4, 0);
217 case OP_31_XOP_LHBRX
:
218 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 0);
221 case OP_31_XOP_STHBRX
:
222 emulated
= kvmppc_handle_store(run
, vcpu
,
223 kvmppc_get_gpr(vcpu
, rs
), 2, 0);
226 case OP_31_XOP_LDBRX
:
227 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 8, 0);
230 case OP_31_XOP_STDBRX
:
231 emulated
= kvmppc_handle_store(run
, vcpu
,
232 kvmppc_get_gpr(vcpu
, rs
), 8, 0);
236 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 8, 1);
240 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 8, 1);
241 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
245 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 4, 1);
248 case OP_31_XOP_LWAUX
:
249 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 4, 1);
250 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
253 #ifdef CONFIG_PPC_FPU
255 if (kvmppc_check_fp_disabled(vcpu
))
257 vcpu
->arch
.mmio_sp64_extend
= 1;
258 emulated
= kvmppc_handle_load(run
, vcpu
,
259 KVM_MMIO_REG_FPR
|rt
, 4, 1);
262 case OP_31_XOP_LFSUX
:
263 if (kvmppc_check_fp_disabled(vcpu
))
265 vcpu
->arch
.mmio_sp64_extend
= 1;
266 emulated
= kvmppc_handle_load(run
, vcpu
,
267 KVM_MMIO_REG_FPR
|rt
, 4, 1);
268 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
272 if (kvmppc_check_fp_disabled(vcpu
))
274 emulated
= kvmppc_handle_load(run
, vcpu
,
275 KVM_MMIO_REG_FPR
|rt
, 8, 1);
278 case OP_31_XOP_LFDUX
:
279 if (kvmppc_check_fp_disabled(vcpu
))
281 emulated
= kvmppc_handle_load(run
, vcpu
,
282 KVM_MMIO_REG_FPR
|rt
, 8, 1);
283 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
286 case OP_31_XOP_LFIWAX
:
287 if (kvmppc_check_fp_disabled(vcpu
))
289 emulated
= kvmppc_handle_loads(run
, vcpu
,
290 KVM_MMIO_REG_FPR
|rt
, 4, 1);
293 case OP_31_XOP_LFIWZX
:
294 if (kvmppc_check_fp_disabled(vcpu
))
296 emulated
= kvmppc_handle_load(run
, vcpu
,
297 KVM_MMIO_REG_FPR
|rt
, 4, 1);
300 case OP_31_XOP_STFSX
:
301 if (kvmppc_check_fp_disabled(vcpu
))
303 vcpu
->arch
.mmio_sp64_extend
= 1;
304 emulated
= kvmppc_handle_store(run
, vcpu
,
305 VCPU_FPR(vcpu
, rs
), 4, 1);
308 case OP_31_XOP_STFSUX
:
309 if (kvmppc_check_fp_disabled(vcpu
))
311 vcpu
->arch
.mmio_sp64_extend
= 1;
312 emulated
= kvmppc_handle_store(run
, vcpu
,
313 VCPU_FPR(vcpu
, rs
), 4, 1);
314 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
317 case OP_31_XOP_STFDX
:
318 if (kvmppc_check_fp_disabled(vcpu
))
320 emulated
= kvmppc_handle_store(run
, vcpu
,
321 VCPU_FPR(vcpu
, rs
), 8, 1);
324 case OP_31_XOP_STFDUX
:
325 if (kvmppc_check_fp_disabled(vcpu
))
327 emulated
= kvmppc_handle_store(run
, vcpu
,
328 VCPU_FPR(vcpu
, rs
), 8, 1);
329 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
332 case OP_31_XOP_STFIWX
:
333 if (kvmppc_check_fp_disabled(vcpu
))
335 emulated
= kvmppc_handle_store(run
, vcpu
,
336 VCPU_FPR(vcpu
, rs
), 4, 1);
341 case OP_31_XOP_LXSDX
:
342 if (kvmppc_check_vsx_disabled(vcpu
))
344 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
345 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
346 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
347 KVM_MMIO_REG_VSX
|rt
, 8, 1, 0);
350 case OP_31_XOP_LXSSPX
:
351 if (kvmppc_check_vsx_disabled(vcpu
))
353 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
354 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
355 vcpu
->arch
.mmio_sp64_extend
= 1;
356 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
357 KVM_MMIO_REG_VSX
|rt
, 4, 1, 0);
360 case OP_31_XOP_LXSIWAX
:
361 if (kvmppc_check_vsx_disabled(vcpu
))
363 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
364 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
365 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
366 KVM_MMIO_REG_VSX
|rt
, 4, 1, 1);
369 case OP_31_XOP_LXSIWZX
:
370 if (kvmppc_check_vsx_disabled(vcpu
))
372 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
373 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
374 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
375 KVM_MMIO_REG_VSX
|rt
, 4, 1, 0);
378 case OP_31_XOP_LXVD2X
:
380 * In this case, the official load/store process is like this:
381 * Step1, exit from vm by page fault isr, then kvm save vsr.
382 * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
385 * Step2, copy data between memory and VCPU
386 * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
387 * 2copies*8bytes or 4copies*4bytes
388 * to simulate one copy of 16bytes.
389 * Also there is an endian issue here, we should notice the
391 * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
392 * If host is little-endian, kvm will call XXSWAPD for
393 * LXVD2X_ROT/STXVD2X_ROT.
394 * So, if host is little-endian,
395 * the postion of memeory should be swapped.
397 * Step3, return to guest, kvm reset register.
398 * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
401 if (kvmppc_check_vsx_disabled(vcpu
))
403 vcpu
->arch
.mmio_vsx_copy_nums
= 2;
404 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
405 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
406 KVM_MMIO_REG_VSX
|rt
, 8, 1, 0);
409 case OP_31_XOP_LXVW4X
:
410 if (kvmppc_check_vsx_disabled(vcpu
))
412 vcpu
->arch
.mmio_vsx_copy_nums
= 4;
413 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_WORD
;
414 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
415 KVM_MMIO_REG_VSX
|rt
, 4, 1, 0);
418 case OP_31_XOP_LXVDSX
:
419 if (kvmppc_check_vsx_disabled(vcpu
))
421 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
422 vcpu
->arch
.mmio_vsx_copy_type
=
423 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
;
424 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
425 KVM_MMIO_REG_VSX
|rt
, 8, 1, 0);
428 case OP_31_XOP_STXSDX
:
429 if (kvmppc_check_vsx_disabled(vcpu
))
431 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
432 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
433 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
437 case OP_31_XOP_STXSSPX
:
438 if (kvmppc_check_vsx_disabled(vcpu
))
440 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
441 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
442 vcpu
->arch
.mmio_sp64_extend
= 1;
443 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
447 case OP_31_XOP_STXSIWX
:
448 if (kvmppc_check_vsx_disabled(vcpu
))
450 vcpu
->arch
.mmio_vsx_offset
= 1;
451 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
452 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_WORD
;
453 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
457 case OP_31_XOP_STXVD2X
:
458 if (kvmppc_check_vsx_disabled(vcpu
))
460 vcpu
->arch
.mmio_vsx_copy_nums
= 2;
461 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_DWORD
;
462 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
466 case OP_31_XOP_STXVW4X
:
467 if (kvmppc_check_vsx_disabled(vcpu
))
469 vcpu
->arch
.mmio_vsx_copy_nums
= 4;
470 vcpu
->arch
.mmio_vsx_copy_type
= KVMPPC_VSX_COPY_WORD
;
471 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
474 #endif /* CONFIG_VSX */
476 #ifdef CONFIG_ALTIVEC
478 if (kvmppc_check_altivec_disabled(vcpu
))
480 vcpu
->arch
.vaddr_accessed
&= ~0xFULL
;
481 vcpu
->arch
.paddr_accessed
&= ~0xFULL
;
482 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
483 emulated
= kvmppc_handle_load128_by2x64(run
, vcpu
,
484 KVM_MMIO_REG_VMX
|rt
, 1);
488 if (kvmppc_check_altivec_disabled(vcpu
))
490 vcpu
->arch
.vaddr_accessed
&= ~0xFULL
;
491 vcpu
->arch
.paddr_accessed
&= ~0xFULL
;
492 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
493 emulated
= kvmppc_handle_store128_by2x64(run
, vcpu
,
496 #endif /* CONFIG_ALTIVEC */
499 emulated
= EMULATE_FAIL
;
505 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
508 #ifdef CONFIG_PPC_FPU
510 if (kvmppc_check_fp_disabled(vcpu
))
512 vcpu
->arch
.mmio_sp64_extend
= 1;
513 emulated
= kvmppc_handle_store(run
, vcpu
,
519 if (kvmppc_check_fp_disabled(vcpu
))
521 vcpu
->arch
.mmio_sp64_extend
= 1;
522 emulated
= kvmppc_handle_store(run
, vcpu
,
525 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
529 if (kvmppc_check_fp_disabled(vcpu
))
531 emulated
= kvmppc_handle_store(run
, vcpu
,
537 if (kvmppc_check_fp_disabled(vcpu
))
539 emulated
= kvmppc_handle_store(run
, vcpu
,
542 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
550 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 8, 1);
553 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 8, 1);
554 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
557 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 4, 1);
560 emulated
= EMULATE_FAIL
;
565 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 4, 1);
566 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
570 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
574 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 1, 1);
575 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
579 emulated
= kvmppc_handle_store(run
, vcpu
,
580 kvmppc_get_gpr(vcpu
, rs
),
588 emulated
= kvmppc_handle_store(run
, vcpu
,
589 kvmppc_get_gpr(vcpu
, rs
), 8, 1);
592 emulated
= kvmppc_handle_store(run
, vcpu
,
593 kvmppc_get_gpr(vcpu
, rs
), 8, 1);
594 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
597 emulated
= EMULATE_FAIL
;
602 emulated
= kvmppc_handle_store(run
, vcpu
,
603 kvmppc_get_gpr(vcpu
, rs
), 4, 1);
604 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
608 emulated
= kvmppc_handle_store(run
, vcpu
,
609 kvmppc_get_gpr(vcpu
, rs
), 1, 1);
613 emulated
= kvmppc_handle_store(run
, vcpu
,
614 kvmppc_get_gpr(vcpu
, rs
), 1, 1);
615 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
619 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
623 emulated
= kvmppc_handle_load(run
, vcpu
, rt
, 2, 1);
624 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
628 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 2, 1);
632 emulated
= kvmppc_handle_loads(run
, vcpu
, rt
, 2, 1);
633 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
637 emulated
= kvmppc_handle_store(run
, vcpu
,
638 kvmppc_get_gpr(vcpu
, rs
), 2, 1);
642 emulated
= kvmppc_handle_store(run
, vcpu
,
643 kvmppc_get_gpr(vcpu
, rs
), 2, 1);
644 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
647 #ifdef CONFIG_PPC_FPU
649 if (kvmppc_check_fp_disabled(vcpu
))
651 vcpu
->arch
.mmio_sp64_extend
= 1;
652 emulated
= kvmppc_handle_load(run
, vcpu
,
653 KVM_MMIO_REG_FPR
|rt
, 4, 1);
657 if (kvmppc_check_fp_disabled(vcpu
))
659 vcpu
->arch
.mmio_sp64_extend
= 1;
660 emulated
= kvmppc_handle_load(run
, vcpu
,
661 KVM_MMIO_REG_FPR
|rt
, 4, 1);
662 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
666 if (kvmppc_check_fp_disabled(vcpu
))
668 emulated
= kvmppc_handle_load(run
, vcpu
,
669 KVM_MMIO_REG_FPR
|rt
, 8, 1);
673 if (kvmppc_check_fp_disabled(vcpu
))
675 emulated
= kvmppc_handle_load(run
, vcpu
,
676 KVM_MMIO_REG_FPR
|rt
, 8, 1);
677 kvmppc_set_gpr(vcpu
, ra
, vcpu
->arch
.vaddr_accessed
);
682 emulated
= EMULATE_FAIL
;
686 if (emulated
== EMULATE_FAIL
) {
688 kvmppc_core_queue_program(vcpu
, 0);
691 trace_kvm_ppc_instr(inst
, kvmppc_get_pc(vcpu
), emulated
);
693 /* Advance past emulated instruction. */
695 kvmppc_set_pc(vcpu
, kvmppc_get_pc(vcpu
) + 4);