1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
5 * Author: Yu Liu, <yu.liu@freescale.com>
8 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
9 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 #include <asm/kvm_ppc.h>
13 #include <asm/disassemble.h>
14 #include <asm/dbell.h>
15 #include <asm/reg_booke.h>
20 #define XOP_DCBTLS 166
21 #define XOP_MSGSND 206
22 #define XOP_MSGCLR 238
24 #define XOP_TLBIVAX 786
29 #define XOP_EHPRIV 270
31 #ifdef CONFIG_KVM_E500MC
32 static int dbell2prio(ulong param
)
34 int msg
= param
& PPC_DBELL_TYPE_MASK
;
38 case PPC_DBELL_TYPE(PPC_DBELL
):
39 prio
= BOOKE_IRQPRIO_DBELL
;
41 case PPC_DBELL_TYPE(PPC_DBELL_CRIT
):
42 prio
= BOOKE_IRQPRIO_DBELL_CRIT
;
51 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu
*vcpu
, int rb
)
53 ulong param
= vcpu
->arch
.regs
.gpr
[rb
];
54 int prio
= dbell2prio(param
);
59 clear_bit(prio
, &vcpu
->arch
.pending_exceptions
);
63 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu
*vcpu
, int rb
)
65 ulong param
= vcpu
->arch
.regs
.gpr
[rb
];
66 int prio
= dbell2prio(rb
);
67 int pir
= param
& PPC_DBELL_PIR_MASK
;
69 struct kvm_vcpu
*cvcpu
;
74 kvm_for_each_vcpu(i
, cvcpu
, vcpu
->kvm
) {
75 int cpir
= cvcpu
->arch
.shared
->pir
;
76 if ((param
& PPC_DBELL_MSG_BRDCAST
) || (cpir
== pir
)) {
77 set_bit(prio
, &cvcpu
->arch
.pending_exceptions
);
86 static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu
*vcpu
,
87 unsigned int inst
, int *advance
)
89 int emulated
= EMULATE_DONE
;
91 switch (get_oc(inst
)) {
93 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
94 vcpu
->run
->debug
.arch
.address
= vcpu
->arch
.regs
.nip
;
95 vcpu
->run
->debug
.arch
.status
= 0;
96 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
97 emulated
= EMULATE_EXIT_USER
;
101 emulated
= EMULATE_FAIL
;
106 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu
*vcpu
)
108 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
110 /* Always fail to lock the cache */
111 vcpu_e500
->l1csr0
|= L1CSR0_CUL
;
115 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu
*vcpu
, unsigned int inst
,
118 /* Expose one thread per vcpu */
119 if (get_tmrn(inst
) == TMRN_TMCFG0
) {
120 kvmppc_set_gpr(vcpu
, rt
,
121 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT
));
128 int kvmppc_core_emulate_op_e500(struct kvm_vcpu
*vcpu
,
129 unsigned int inst
, int *advance
)
131 int emulated
= EMULATE_DONE
;
132 int ra
= get_ra(inst
);
133 int rb
= get_rb(inst
);
134 int rt
= get_rt(inst
);
137 switch (get_op(inst
)) {
139 switch (get_xop(inst
)) {
142 emulated
= kvmppc_e500_emul_dcbtls(vcpu
);
145 #ifdef CONFIG_KVM_E500MC
147 emulated
= kvmppc_e500_emul_msgsnd(vcpu
, rb
);
151 emulated
= kvmppc_e500_emul_msgclr(vcpu
, rb
);
156 emulated
= kvmppc_e500_emul_tlbre(vcpu
);
160 emulated
= kvmppc_e500_emul_tlbwe(vcpu
);
164 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
165 emulated
= kvmppc_e500_emul_tlbsx(vcpu
, ea
);
170 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
171 emulated
= kvmppc_e500_emul_tlbilx(vcpu
, type
, ea
);
176 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
177 emulated
= kvmppc_e500_emul_tlbivax(vcpu
, ea
);
181 emulated
= kvmppc_e500_emul_mftmr(vcpu
, inst
, rt
);
185 emulated
= kvmppc_e500_emul_ehpriv(vcpu
, inst
, advance
);
189 emulated
= EMULATE_FAIL
;
195 emulated
= EMULATE_FAIL
;
198 if (emulated
== EMULATE_FAIL
)
199 emulated
= kvmppc_booke_emulate_op(vcpu
, inst
, advance
);
204 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
206 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
207 int emulated
= EMULATE_DONE
;
210 #ifndef CONFIG_KVM_BOOKE_HV
212 kvmppc_set_pid(vcpu
, spr_val
);
217 vcpu_e500
->pid
[1] = spr_val
;
222 vcpu_e500
->pid
[2] = spr_val
;
225 vcpu
->arch
.shared
->mas0
= spr_val
;
228 vcpu
->arch
.shared
->mas1
= spr_val
;
231 vcpu
->arch
.shared
->mas2
= spr_val
;
234 vcpu
->arch
.shared
->mas7_3
&= ~(u64
)0xffffffff;
235 vcpu
->arch
.shared
->mas7_3
|= spr_val
;
238 vcpu
->arch
.shared
->mas4
= spr_val
;
241 vcpu
->arch
.shared
->mas6
= spr_val
;
244 vcpu
->arch
.shared
->mas7_3
&= (u64
)0xffffffff;
245 vcpu
->arch
.shared
->mas7_3
|= (u64
)spr_val
<< 32;
249 vcpu_e500
->l1csr0
= spr_val
;
250 vcpu_e500
->l1csr0
&= ~(L1CSR0_DCFI
| L1CSR0_CLFC
);
253 vcpu_e500
->l1csr1
= spr_val
;
254 vcpu_e500
->l1csr1
&= ~(L1CSR1_ICFI
| L1CSR1_ICLFR
);
257 vcpu_e500
->hid0
= spr_val
;
260 vcpu_e500
->hid1
= spr_val
;
264 emulated
= kvmppc_e500_emul_mt_mmucsr0(vcpu_e500
,
270 * Guest relies on host power management configurations
271 * Treat the request as a general store
273 vcpu
->arch
.pwrmgtcr0
= spr_val
;
278 * If we are here, it means that we have already flushed the
279 * branch predictor, so just return to guest.
283 /* extra exceptions */
284 #ifdef CONFIG_SPE_POSSIBLE
286 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] = spr_val
;
289 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] = spr_val
;
292 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] = spr_val
;
295 #ifdef CONFIG_ALTIVEC
297 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
] = spr_val
;
300 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
] = spr_val
;
304 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] = spr_val
;
306 #ifdef CONFIG_KVM_BOOKE_HV
308 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
] = spr_val
;
311 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
] = spr_val
;
315 emulated
= kvmppc_booke_emulate_mtspr(vcpu
, sprn
, spr_val
);
321 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
323 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
324 int emulated
= EMULATE_DONE
;
327 #ifndef CONFIG_KVM_BOOKE_HV
329 *spr_val
= vcpu_e500
->pid
[0];
332 *spr_val
= vcpu_e500
->pid
[1];
335 *spr_val
= vcpu_e500
->pid
[2];
338 *spr_val
= vcpu
->arch
.shared
->mas0
;
341 *spr_val
= vcpu
->arch
.shared
->mas1
;
344 *spr_val
= vcpu
->arch
.shared
->mas2
;
347 *spr_val
= (u32
)vcpu
->arch
.shared
->mas7_3
;
350 *spr_val
= vcpu
->arch
.shared
->mas4
;
353 *spr_val
= vcpu
->arch
.shared
->mas6
;
356 *spr_val
= vcpu
->arch
.shared
->mas7_3
>> 32;
360 *spr_val
= vcpu
->arch
.decar
;
363 *spr_val
= vcpu
->arch
.tlbcfg
[0];
366 *spr_val
= vcpu
->arch
.tlbcfg
[1];
369 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
371 *spr_val
= vcpu
->arch
.tlbps
[0];
374 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
376 *spr_val
= vcpu
->arch
.tlbps
[1];
379 *spr_val
= vcpu_e500
->l1csr0
;
382 *spr_val
= vcpu_e500
->l1csr1
;
385 *spr_val
= vcpu_e500
->hid0
;
388 *spr_val
= vcpu_e500
->hid1
;
391 *spr_val
= vcpu_e500
->svr
;
399 *spr_val
= vcpu
->arch
.mmucfg
;
402 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
405 * Legacy Linux guests access EPTCFG register even if the E.PT
406 * category is disabled in the VM. Give them a chance to live.
408 *spr_val
= vcpu
->arch
.eptcfg
;
412 *spr_val
= vcpu
->arch
.pwrmgtcr0
;
415 /* extra exceptions */
416 #ifdef CONFIG_SPE_POSSIBLE
418 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
421 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
424 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
427 #ifdef CONFIG_ALTIVEC
429 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
];
432 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
];
436 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
438 #ifdef CONFIG_KVM_BOOKE_HV
440 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
];
443 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
];
447 emulated
= kvmppc_booke_emulate_mfspr(vcpu
, sprn
, spr_val
);