1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright IBM Corp. 2007
5 * Copyright 2011 Freescale Semiconductor, Inc.
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
10 #include <linux/jiffies.h>
11 #include <linux/hrtimer.h>
12 #include <linux/types.h>
13 #include <linux/string.h>
14 #include <linux/kvm_host.h>
15 #include <linux/clockchips.h>
19 #include <asm/byteorder.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/disassemble.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/sstep.h>
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu
*vcpu
)
30 if (!(kvmppc_get_msr(vcpu
) & MSR_FP
)) {
31 kvmppc_core_queue_fpunavail(vcpu
);
37 #endif /* CONFIG_PPC_FPU */
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu
*vcpu
)
42 if (!(kvmppc_get_msr(vcpu
) & MSR_VSX
)) {
43 kvmppc_core_queue_vsx_unavail(vcpu
);
49 #endif /* CONFIG_VSX */
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu
*vcpu
)
54 if (!(kvmppc_get_msr(vcpu
) & MSR_VEC
)) {
55 kvmppc_core_queue_vec_unavail(vcpu
);
61 #endif /* CONFIG_ALTIVEC */
66 * vector loads and stores
68 * Instructions that trap when used on cache-inhibited mappings
69 * are not emulated here: multiple and string instructions,
70 * lq/stq, and the load-reserve/store-conditional instructions.
72 int kvmppc_emulate_loadstore(struct kvm_vcpu
*vcpu
)
75 enum emulation_result emulated
= EMULATE_FAIL
;
77 struct instruction_op op
;
79 /* this default type might be overwritten by subcategories */
80 kvmppc_set_exit_type(vcpu
, EMULATED_INST_EXITS
);
82 emulated
= kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &inst
);
83 if (emulated
!= EMULATE_DONE
)
86 vcpu
->arch
.mmio_vsx_copy_nums
= 0;
87 vcpu
->arch
.mmio_vsx_offset
= 0;
88 vcpu
->arch
.mmio_copy_type
= KVMPPC_VSX_COPY_NONE
;
89 vcpu
->arch
.mmio_sp64_extend
= 0;
90 vcpu
->arch
.mmio_sign_extend
= 0;
91 vcpu
->arch
.mmio_vmx_copy_nums
= 0;
92 vcpu
->arch
.mmio_vmx_offset
= 0;
93 vcpu
->arch
.mmio_host_swabbed
= 0;
95 emulated
= EMULATE_FAIL
;
96 vcpu
->arch
.regs
.msr
= vcpu
->arch
.shared
->msr
;
97 if (analyse_instr(&op
, &vcpu
->arch
.regs
, ppc_inst(inst
)) == 0) {
98 int type
= op
.type
& INSTR_TYPE_MASK
;
99 int size
= GETSIZE(op
.type
);
103 int instr_byte_swap
= op
.type
& BYTEREV
;
105 if (op
.type
& SIGNEXT
)
106 emulated
= kvmppc_handle_loads(vcpu
,
107 op
.reg
, size
, !instr_byte_swap
);
109 emulated
= kvmppc_handle_load(vcpu
,
110 op
.reg
, size
, !instr_byte_swap
);
112 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
113 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
117 #ifdef CONFIG_PPC_FPU
119 if (kvmppc_check_fp_disabled(vcpu
))
122 if (op
.type
& FPCONV
)
123 vcpu
->arch
.mmio_sp64_extend
= 1;
125 if (op
.type
& SIGNEXT
)
126 emulated
= kvmppc_handle_loads(vcpu
,
127 KVM_MMIO_REG_FPR
|op
.reg
, size
, 1);
129 emulated
= kvmppc_handle_load(vcpu
,
130 KVM_MMIO_REG_FPR
|op
.reg
, size
, 1);
132 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
133 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
137 #ifdef CONFIG_ALTIVEC
139 if (kvmppc_check_altivec_disabled(vcpu
))
142 /* Hardware enforces alignment of VMX accesses */
143 vcpu
->arch
.vaddr_accessed
&= ~((unsigned long)size
- 1);
144 vcpu
->arch
.paddr_accessed
&= ~((unsigned long)size
- 1);
146 if (size
== 16) { /* lvx */
147 vcpu
->arch
.mmio_copy_type
=
148 KVMPPC_VMX_COPY_DWORD
;
149 } else if (size
== 4) { /* lvewx */
150 vcpu
->arch
.mmio_copy_type
=
151 KVMPPC_VMX_COPY_WORD
;
152 } else if (size
== 2) { /* lvehx */
153 vcpu
->arch
.mmio_copy_type
=
154 KVMPPC_VMX_COPY_HWORD
;
155 } else if (size
== 1) { /* lvebx */
156 vcpu
->arch
.mmio_copy_type
=
157 KVMPPC_VMX_COPY_BYTE
;
161 vcpu
->arch
.mmio_vmx_offset
=
162 (vcpu
->arch
.vaddr_accessed
& 0xf)/size
;
165 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
166 emulated
= kvmppc_handle_vmx_load(vcpu
,
167 KVM_MMIO_REG_VMX
|op
.reg
,
170 vcpu
->arch
.mmio_vmx_copy_nums
= 1;
171 emulated
= kvmppc_handle_vmx_load(vcpu
,
172 KVM_MMIO_REG_VMX
|op
.reg
,
181 if (op
.vsx_flags
& VSX_CHECK_VEC
) {
182 if (kvmppc_check_altivec_disabled(vcpu
))
185 if (kvmppc_check_vsx_disabled(vcpu
))
189 if (op
.vsx_flags
& VSX_FPCONV
)
190 vcpu
->arch
.mmio_sp64_extend
= 1;
192 if (op
.element_size
== 8) {
193 if (op
.vsx_flags
& VSX_SPLAT
)
194 vcpu
->arch
.mmio_copy_type
=
195 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
;
197 vcpu
->arch
.mmio_copy_type
=
198 KVMPPC_VSX_COPY_DWORD
;
199 } else if (op
.element_size
== 4) {
200 if (op
.vsx_flags
& VSX_SPLAT
)
201 vcpu
->arch
.mmio_copy_type
=
202 KVMPPC_VSX_COPY_WORD_LOAD_DUMP
;
204 vcpu
->arch
.mmio_copy_type
=
205 KVMPPC_VSX_COPY_WORD
;
209 if (size
< op
.element_size
) {
210 /* precision convert case: lxsspx, etc */
211 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
213 } else { /* lxvw4x, lxvd2x, etc */
214 vcpu
->arch
.mmio_vsx_copy_nums
=
215 size
/op
.element_size
;
216 io_size_each
= op
.element_size
;
219 emulated
= kvmppc_handle_vsx_load(vcpu
,
220 KVM_MMIO_REG_VSX
|op
.reg
, io_size_each
,
221 1, op
.type
& SIGNEXT
);
226 /* if need byte reverse, op.val has been reversed by
229 emulated
= kvmppc_handle_store(vcpu
, op
.val
, size
, 1);
231 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
232 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
235 #ifdef CONFIG_PPC_FPU
237 if (kvmppc_check_fp_disabled(vcpu
))
240 /* The FP registers need to be flushed so that
241 * kvmppc_handle_store() can read actual FP vals
244 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
245 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
248 if (op
.type
& FPCONV
)
249 vcpu
->arch
.mmio_sp64_extend
= 1;
251 emulated
= kvmppc_handle_store(vcpu
,
252 VCPU_FPR(vcpu
, op
.reg
), size
, 1);
254 if ((op
.type
& UPDATE
) && (emulated
!= EMULATE_FAIL
))
255 kvmppc_set_gpr(vcpu
, op
.update_reg
, op
.ea
);
259 #ifdef CONFIG_ALTIVEC
261 if (kvmppc_check_altivec_disabled(vcpu
))
264 /* Hardware enforces alignment of VMX accesses. */
265 vcpu
->arch
.vaddr_accessed
&= ~((unsigned long)size
- 1);
266 vcpu
->arch
.paddr_accessed
&= ~((unsigned long)size
- 1);
268 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
269 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
271 if (size
== 16) { /* stvx */
272 vcpu
->arch
.mmio_copy_type
=
273 KVMPPC_VMX_COPY_DWORD
;
274 } else if (size
== 4) { /* stvewx */
275 vcpu
->arch
.mmio_copy_type
=
276 KVMPPC_VMX_COPY_WORD
;
277 } else if (size
== 2) { /* stvehx */
278 vcpu
->arch
.mmio_copy_type
=
279 KVMPPC_VMX_COPY_HWORD
;
280 } else if (size
== 1) { /* stvebx */
281 vcpu
->arch
.mmio_copy_type
=
282 KVMPPC_VMX_COPY_BYTE
;
286 vcpu
->arch
.mmio_vmx_offset
=
287 (vcpu
->arch
.vaddr_accessed
& 0xf)/size
;
290 vcpu
->arch
.mmio_vmx_copy_nums
= 2;
291 emulated
= kvmppc_handle_vmx_store(vcpu
,
294 vcpu
->arch
.mmio_vmx_copy_nums
= 1;
295 emulated
= kvmppc_handle_vmx_store(vcpu
,
305 if (op
.vsx_flags
& VSX_CHECK_VEC
) {
306 if (kvmppc_check_altivec_disabled(vcpu
))
309 if (kvmppc_check_vsx_disabled(vcpu
))
313 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
314 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
,
317 if (op
.vsx_flags
& VSX_FPCONV
)
318 vcpu
->arch
.mmio_sp64_extend
= 1;
320 if (op
.element_size
== 8)
321 vcpu
->arch
.mmio_copy_type
=
322 KVMPPC_VSX_COPY_DWORD
;
323 else if (op
.element_size
== 4)
324 vcpu
->arch
.mmio_copy_type
=
325 KVMPPC_VSX_COPY_WORD
;
329 if (size
< op
.element_size
) {
330 /* precise conversion case, like stxsspx */
331 vcpu
->arch
.mmio_vsx_copy_nums
= 1;
333 } else { /* stxvw4x, stxvd2x, etc */
334 vcpu
->arch
.mmio_vsx_copy_nums
=
335 size
/op
.element_size
;
336 io_size_each
= op
.element_size
;
339 emulated
= kvmppc_handle_vsx_store(vcpu
,
340 op
.reg
, io_size_each
, 1);
345 /* Do nothing. The guest is performing dcbi because
346 * hardware DMA is not snooped by the dcache, but
347 * emulated DMA either goes through the dcache as
348 * normal writes, or the host kernel has handled dcache
351 emulated
= EMULATE_DONE
;
358 if (emulated
== EMULATE_FAIL
) {
360 kvmppc_core_queue_program(vcpu
, 0);
363 trace_kvm_ppc_instr(inst
, kvmppc_get_pc(vcpu
), emulated
);
365 /* Advance past emulated instruction. */
367 kvmppc_set_pc(vcpu
, kvmppc_get_pc(vcpu
) + 4);