2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, <yu.liu@freescale.com>
7 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <asm/kvm_ppc.h>
16 #include <asm/disassemble.h>
17 #include <asm/dbell.h>
18 #include <asm/reg_booke.h>
23 #define XOP_DCBTLS 166
24 #define XOP_MSGSND 206
25 #define XOP_MSGCLR 238
27 #define XOP_TLBIVAX 786
32 #define XOP_EHPRIV 270
34 #ifdef CONFIG_KVM_E500MC
35 static int dbell2prio(ulong param
)
37 int msg
= param
& PPC_DBELL_TYPE_MASK
;
41 case PPC_DBELL_TYPE(PPC_DBELL
):
42 prio
= BOOKE_IRQPRIO_DBELL
;
44 case PPC_DBELL_TYPE(PPC_DBELL_CRIT
):
45 prio
= BOOKE_IRQPRIO_DBELL_CRIT
;
54 static int kvmppc_e500_emul_msgclr(struct kvm_vcpu
*vcpu
, int rb
)
56 ulong param
= vcpu
->arch
.gpr
[rb
];
57 int prio
= dbell2prio(param
);
62 clear_bit(prio
, &vcpu
->arch
.pending_exceptions
);
66 static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu
*vcpu
, int rb
)
68 ulong param
= vcpu
->arch
.gpr
[rb
];
69 int prio
= dbell2prio(rb
);
70 int pir
= param
& PPC_DBELL_PIR_MASK
;
72 struct kvm_vcpu
*cvcpu
;
77 kvm_for_each_vcpu(i
, cvcpu
, vcpu
->kvm
) {
78 int cpir
= cvcpu
->arch
.shared
->pir
;
79 if ((param
& PPC_DBELL_MSG_BRDCAST
) || (cpir
== pir
)) {
80 set_bit(prio
, &cvcpu
->arch
.pending_exceptions
);
89 static int kvmppc_e500_emul_ehpriv(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
90 unsigned int inst
, int *advance
)
92 int emulated
= EMULATE_DONE
;
94 switch (get_oc(inst
)) {
96 run
->exit_reason
= KVM_EXIT_DEBUG
;
97 run
->debug
.arch
.address
= vcpu
->arch
.pc
;
98 run
->debug
.arch
.status
= 0;
99 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
100 emulated
= EMULATE_EXIT_USER
;
104 emulated
= EMULATE_FAIL
;
109 static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu
*vcpu
)
111 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
113 /* Always fail to lock the cache */
114 vcpu_e500
->l1csr0
|= L1CSR0_CUL
;
118 static int kvmppc_e500_emul_mftmr(struct kvm_vcpu
*vcpu
, unsigned int inst
,
121 /* Expose one thread per vcpu */
122 if (get_tmrn(inst
) == TMRN_TMCFG0
) {
123 kvmppc_set_gpr(vcpu
, rt
,
124 1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT
));
131 int kvmppc_core_emulate_op_e500(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
132 unsigned int inst
, int *advance
)
134 int emulated
= EMULATE_DONE
;
135 int ra
= get_ra(inst
);
136 int rb
= get_rb(inst
);
137 int rt
= get_rt(inst
);
140 switch (get_op(inst
)) {
142 switch (get_xop(inst
)) {
145 emulated
= kvmppc_e500_emul_dcbtls(vcpu
);
148 #ifdef CONFIG_KVM_E500MC
150 emulated
= kvmppc_e500_emul_msgsnd(vcpu
, rb
);
154 emulated
= kvmppc_e500_emul_msgclr(vcpu
, rb
);
159 emulated
= kvmppc_e500_emul_tlbre(vcpu
);
163 emulated
= kvmppc_e500_emul_tlbwe(vcpu
);
167 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
168 emulated
= kvmppc_e500_emul_tlbsx(vcpu
, ea
);
173 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
174 emulated
= kvmppc_e500_emul_tlbilx(vcpu
, type
, ea
);
179 ea
= kvmppc_get_ea_indexed(vcpu
, ra
, rb
);
180 emulated
= kvmppc_e500_emul_tlbivax(vcpu
, ea
);
184 emulated
= kvmppc_e500_emul_mftmr(vcpu
, inst
, rt
);
188 emulated
= kvmppc_e500_emul_ehpriv(run
, vcpu
, inst
,
193 emulated
= EMULATE_FAIL
;
199 emulated
= EMULATE_FAIL
;
202 if (emulated
== EMULATE_FAIL
)
203 emulated
= kvmppc_booke_emulate_op(run
, vcpu
, inst
, advance
);
208 int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
210 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
211 int emulated
= EMULATE_DONE
;
214 #ifndef CONFIG_KVM_BOOKE_HV
216 kvmppc_set_pid(vcpu
, spr_val
);
221 vcpu_e500
->pid
[1] = spr_val
;
226 vcpu_e500
->pid
[2] = spr_val
;
229 vcpu
->arch
.shared
->mas0
= spr_val
;
232 vcpu
->arch
.shared
->mas1
= spr_val
;
235 vcpu
->arch
.shared
->mas2
= spr_val
;
238 vcpu
->arch
.shared
->mas7_3
&= ~(u64
)0xffffffff;
239 vcpu
->arch
.shared
->mas7_3
|= spr_val
;
242 vcpu
->arch
.shared
->mas4
= spr_val
;
245 vcpu
->arch
.shared
->mas6
= spr_val
;
248 vcpu
->arch
.shared
->mas7_3
&= (u64
)0xffffffff;
249 vcpu
->arch
.shared
->mas7_3
|= (u64
)spr_val
<< 32;
253 vcpu_e500
->l1csr0
= spr_val
;
254 vcpu_e500
->l1csr0
&= ~(L1CSR0_DCFI
| L1CSR0_CLFC
);
257 vcpu_e500
->l1csr1
= spr_val
;
258 vcpu_e500
->l1csr1
&= ~(L1CSR1_ICFI
| L1CSR1_ICLFR
);
261 vcpu_e500
->hid0
= spr_val
;
264 vcpu_e500
->hid1
= spr_val
;
268 emulated
= kvmppc_e500_emul_mt_mmucsr0(vcpu_e500
,
274 * Guest relies on host power management configurations
275 * Treat the request as a general store
277 vcpu
->arch
.pwrmgtcr0
= spr_val
;
280 /* extra exceptions */
281 #ifdef CONFIG_SPE_POSSIBLE
283 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] = spr_val
;
286 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] = spr_val
;
289 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] = spr_val
;
292 #ifdef CONFIG_ALTIVEC
294 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
] = spr_val
;
297 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
] = spr_val
;
301 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] = spr_val
;
303 #ifdef CONFIG_KVM_BOOKE_HV
305 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
] = spr_val
;
308 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
] = spr_val
;
312 emulated
= kvmppc_booke_emulate_mtspr(vcpu
, sprn
, spr_val
);
318 int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
320 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
321 int emulated
= EMULATE_DONE
;
324 #ifndef CONFIG_KVM_BOOKE_HV
326 *spr_val
= vcpu_e500
->pid
[0];
329 *spr_val
= vcpu_e500
->pid
[1];
332 *spr_val
= vcpu_e500
->pid
[2];
335 *spr_val
= vcpu
->arch
.shared
->mas0
;
338 *spr_val
= vcpu
->arch
.shared
->mas1
;
341 *spr_val
= vcpu
->arch
.shared
->mas2
;
344 *spr_val
= (u32
)vcpu
->arch
.shared
->mas7_3
;
347 *spr_val
= vcpu
->arch
.shared
->mas4
;
350 *spr_val
= vcpu
->arch
.shared
->mas6
;
353 *spr_val
= vcpu
->arch
.shared
->mas7_3
>> 32;
357 *spr_val
= vcpu
->arch
.decar
;
360 *spr_val
= vcpu
->arch
.tlbcfg
[0];
363 *spr_val
= vcpu
->arch
.tlbcfg
[1];
366 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
368 *spr_val
= vcpu
->arch
.tlbps
[0];
371 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
373 *spr_val
= vcpu
->arch
.tlbps
[1];
376 *spr_val
= vcpu_e500
->l1csr0
;
379 *spr_val
= vcpu_e500
->l1csr1
;
382 *spr_val
= vcpu_e500
->hid0
;
385 *spr_val
= vcpu_e500
->hid1
;
388 *spr_val
= vcpu_e500
->svr
;
396 *spr_val
= vcpu
->arch
.mmucfg
;
399 if (!has_feature(vcpu
, VCPU_FTR_MMU_V2
))
402 * Legacy Linux guests access EPTCFG register even if the E.PT
403 * category is disabled in the VM. Give them a chance to live.
405 *spr_val
= vcpu
->arch
.eptcfg
;
409 *spr_val
= vcpu
->arch
.pwrmgtcr0
;
412 /* extra exceptions */
413 #ifdef CONFIG_SPE_POSSIBLE
415 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
418 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
421 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
424 #ifdef CONFIG_ALTIVEC
426 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL
];
429 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALTIVEC_ASSIST
];
433 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
435 #ifdef CONFIG_KVM_BOOKE_HV
437 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
];
440 *spr_val
= vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
];
444 emulated
= kvmppc_booke_emulate_mfspr(vcpu
, sprn
, spr_val
);