1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu
*vcpu
)
30 if (!(kvmppc_get_msr(vcpu
) & MSR_FP
)) {
31 kvmppc_core_queue_fpunavail(vcpu
);
37 #endif /* CONFIG_PPC_FPU */
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu
*vcpu
)
42 if (!(kvmppc_get_msr(vcpu
) & MSR_VSX
)) {
43 kvmppc_core_queue_vsx_unavail(vcpu
);
49 #endif /* CONFIG_VSX */
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu
*vcpu
)
54 if (!(kvmppc_get_msr(vcpu
) & MSR_VEC
)) {
55 kvmppc_core_queue_vec_unavail(vcpu
);
61 #endif /* CONFIG_ALTIVEC */
66 * vector loads and stores
68 * Instructions that trap when used on cache-inhibited mappings
69 * are not emulated here: multiple and string instructions,
70 * lq/stq, and the load-reserve/store-conditional instructions.
72 int kvmppc_emulate_loadstore(struct kvm_vcpu
*vcpu
)
74 struct kvm_run
*run
= vcpu
->run
;
76 enum emulation_result emulated
= EMULATE_FAIL
;
78 struct instruction_op op
;
80 /* this default type might be overwritten by subcategories */
81 kvmppc_set_exit_type(vcpu
, EMULATED_INST_EXITS
);
83 emulated
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &inst
);
84 if (emulated
!= EMULATE_DONE
)
87 vcpu
->arch
.mmio_vsx_copy_nums
= 0;
88 vcpu
->arch
.mmio_vsx_offset
= 0;
89 vcpu
->arch
.mmio_copy_type
= KVMPPC_VSX_COPY_NONE
;
90 vcpu
->arch
.mmio_sp64_extend
= 0;
91 vcpu
->arch
.mmio_sign_extend
= 0;
92 vcpu
->arch
.mmio_vmx_copy_nums
= 0;
93 vcpu
->arch
.mmio_vmx_offset
= 0;
94 vcpu
->arch
.mmio_host_swabbed
= 0;
96 emulated
= EMULATE_FAIL
;
97 vcpu
->arch
.regs
.msr
= vcpu
->arch
.shared
->msr
;
98 if (analyse_instr(&op
, &vcpu
->arch
.regs
, inst
) == 0) {
99 int type
= op
.type
& INSTR_TYPE_MASK
;
100 int size
= GETSIZE(op
.type
);
104 int instr_byte_swap
= op
.type
& BYTEREV
;
106 if (op
.type
& SIGNEXT
)
107 emulated
= kvmppc_handle_loads(run
, vcpu
,
108 op
.reg
, size
, !instr_byte_swap
);
110 emulated
= kvmppc_handle_load(run
, vcpu
,
111 op
.reg
, size
, !instr_byte_swap
);
113 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
114 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
118 #ifdef CONFIG_PPC_FPU
120 if (kvmppc_check_fp_disabled(vcpu
))
123 if (op
.type
& FPCONV
)
124 vcpu
->arch
.mmio_sp64_extend
= 1;
126 if (op
.type
& SIGNEXT
)
127 emulated
= kvmppc_handle_loads(run
, vcpu
,
128 KVM_MMIO_REG_FPR
|op
.reg
, size
, 1);
130 emulated
= kvmppc_handle_load(run
, vcpu
,
131 KVM_MMIO_REG_FPR
|op
.reg
, size
, 1);
133 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
134 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
138 #ifdef CONFIG_ALTIVEC
140 if (kvmppc_check_altivec_disabled(vcpu
))
143 /* Hardware enforces alignment of VMX accesses */
144 vcpu
->arch
.vaddr_accessed
&= ~((unsigned long)size
- 1);
145 vcpu
->arch
.paddr_accessed
&= ~((unsigned long)size
- 1);
147 if (size
== 16) { /* lvx */
148 vcpu
->arch
.mmio_copy_type
=
149 KVMPPC_VMX_COPY_DWORD
;
150 } else if (size
== 4) { /* lvewx */
151 vcpu
->arch
.mmio_copy_type
=
152 KVMPPC_VMX_COPY_WORD
;
153 } else if (size
== 2) { /* lvehx */
154 vcpu
->arch
.mmio_copy_type
=
155 KVMPPC_VMX_COPY_HWORD
;
156 } else if (size
== 1) { /* lvebx */
157 vcpu
->arch
.mmio_copy_type
=
158 KVMPPC_VMX_COPY_BYTE
;
162 vcpu
->arch
.mmio_vmx_offset
=
163 (vcpu
->arch
.vaddr_accessed
& 0xf)/size
;
166 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
167 emulated
= kvmppc_handle_vmx_load(run
,
168 vcpu
, KVM_MMIO_REG_VMX
|op
.reg
,
171 vcpu
->arch
.mmio_vmx_copy_nums
= 1;
172 emulated
= kvmppc_handle_vmx_load(run
, vcpu
,
173 KVM_MMIO_REG_VMX
|op
.reg
,
182 if (op
.vsx_flags
& VSX_CHECK_VEC
) {
183 if (kvmppc_check_altivec_disabled(vcpu
))
186 if (kvmppc_check_vsx_disabled(vcpu
))
190 if (op
.vsx_flags
& VSX_FPCONV
)
191 vcpu
->arch
.mmio_sp64_extend
= 1;
193 if (op
.element_size
== 8) {
194 if (op
.vsx_flags
& VSX_SPLAT
)
195 vcpu
->arch
.mmio_copy_type
=
196 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
;
198 vcpu
->arch
.mmio_copy_type
=
199 KVMPPC_VSX_COPY_DWORD
;
200 } else if (op
.element_size
== 4) {
201 if (op
.vsx_flags
& VSX_SPLAT
)
202 vcpu
->arch
.mmio_copy_type
=
203 KVMPPC_VSX_COPY_WORD_LOAD_DUMP
;
205 vcpu
->arch
.mmio_copy_type
=
206 KVMPPC_VSX_COPY_WORD
;
210 if (size
< op
.element_size
) {
211 /* precision convert case: lxsspx, etc */
212 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
214 } else { /* lxvw4x, lxvd2x, etc */
215 vcpu
->arch
.mmio_vsx_copy_nums
=
216 size
/op
.element_size
;
217 io_size_each
= op
.element_size
;
220 emulated
= kvmppc_handle_vsx_load(run
, vcpu
,
221 KVM_MMIO_REG_VSX
|op
.reg
, io_size_each
,
222 1, op
.type
& SIGNEXT
);
227 /* if need byte reverse, op.val has been reversed by
230 emulated
= kvmppc_handle_store(run
, vcpu
, op
.val
,
233 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
234 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
237 #ifdef CONFIG_PPC_FPU
239 if (kvmppc_check_fp_disabled(vcpu
))
242 /* The FP registers need to be flushed so that
243 * kvmppc_handle_store() can read actual FP vals
246 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
247 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
250 if (op
.type
& FPCONV
)
251 vcpu
->arch
.mmio_sp64_extend
= 1;
253 emulated
= kvmppc_handle_store(run
, vcpu
,
254 VCPU_FPR(vcpu
, op
.reg
), size
, 1);
256 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
257 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
261 #ifdef CONFIG_ALTIVEC
263 if (kvmppc_check_altivec_disabled(vcpu
))
266 /* Hardware enforces alignment of VMX accesses. */
267 vcpu
->arch
.vaddr_accessed
&= ~((unsigned long)size
- 1);
268 vcpu
->arch
.paddr_accessed
&= ~((unsigned long)size
- 1);
270 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
271 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
273 if (size
== 16) { /* stvx */
274 vcpu
->arch
.mmio_copy_type
=
275 KVMPPC_VMX_COPY_DWORD
;
276 } else if (size
== 4) { /* stvewx */
277 vcpu
->arch
.mmio_copy_type
=
278 KVMPPC_VMX_COPY_WORD
;
279 } else if (size
== 2) { /* stvehx */
280 vcpu
->arch
.mmio_copy_type
=
281 KVMPPC_VMX_COPY_HWORD
;
282 } else if (size
== 1) { /* stvebx */
283 vcpu
->arch
.mmio_copy_type
=
284 KVMPPC_VMX_COPY_BYTE
;
288 vcpu
->arch
.mmio_vmx_offset
=
289 (vcpu
->arch
.vaddr_accessed
& 0xf)/size
;
292 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
293 emulated
= kvmppc_handle_vmx_store(run
,
296 vcpu
->arch
.mmio_vmx_copy_nums
= 1;
297 emulated
= kvmppc_handle_vmx_store(run
,
298 vcpu
, op
.reg
, size
, 1);
307 if (op
.vsx_flags
& VSX_CHECK_VEC
) {
308 if (kvmppc_check_altivec_disabled(vcpu
))
311 if (kvmppc_check_vsx_disabled(vcpu
))
315 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
316 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
319 if (op
.vsx_flags
& VSX_FPCONV
)
320 vcpu
->arch
.mmio_sp64_extend
= 1;
322 if (op
.element_size
== 8)
323 vcpu
->arch
.mmio_copy_type
=
324 KVMPPC_VSX_COPY_DWORD
;
325 else if (op
.element_size
== 4)
326 vcpu
->arch
.mmio_copy_type
=
327 KVMPPC_VSX_COPY_WORD
;
331 if (size
< op
.element_size
) {
332 /* precise conversion case, like stxsspx */
333 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
335 } else { /* stxvw4x, stxvd2x, etc */
336 vcpu
->arch
.mmio_vsx_copy_nums
=
337 size
/op
.element_size
;
338 io_size_each
= op
.element_size
;
341 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
342 op
.reg
, io_size_each
, 1);
347 /* Do nothing. The guest is performing dcbi because
348 * hardware DMA is not snooped by the dcache, but
349 * emulated DMA either goes through the dcache as
350 * normal writes, or the host kernel has handled dcache
353 emulated
= EMULATE_DONE
;
360 if (emulated
== EMULATE_FAIL
) {
362 kvmppc_core_queue_program(vcpu
, 0);
365 trace_kvm_ppc_instr(inst
, kvmppc_get_pc(vcpu
), emulated
);
367 /* Advance past emulated instruction. */
369 kvmppc_set_pc(vcpu
, kvmppc_get_pc(vcpu
) + 4);