2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #include <linux/jump_label.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_mmu.h>
23 __asm__(".arch_extension virt");
26 * Activate the traps, saving the host's fpexc register before
27 * overwriting it. We'll restore it on VM exit.
29 static void __hyp_text
__activate_traps(struct kvm_vcpu
*vcpu
, u32
*fpexc_host
)
34 * We are about to set HCPTR.TCP10/11 to trap all floating point
35 * register accesses to HYP, however, the ARM ARM clearly states that
36 * traps are only taken to HYP if the operation would not otherwise
37 * trap to SVC. Therefore, always make sure that for 32-bit guests,
38 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
40 val
= read_sysreg(VFP_FPEXC
);
42 if (!(val
& FPEXC_EN
)) {
43 write_sysreg(val
| FPEXC_EN
, VFP_FPEXC
);
47 write_sysreg(vcpu
->arch
.hcr
| vcpu
->arch
.irq_lines
, HCR
);
48 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
49 write_sysreg(HSTR_T(15), HSTR
);
50 write_sysreg(HCPTR_TTA
| HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR
);
51 val
= read_sysreg(HDCR
);
52 val
|= HDCR_TPM
| HDCR_TPMCR
; /* trap performance monitors */
53 val
|= HDCR_TDRA
| HDCR_TDOSA
| HDCR_TDA
; /* trap debug regs */
54 write_sysreg(val
, HDCR
);
57 static void __hyp_text
__deactivate_traps(struct kvm_vcpu
*vcpu
)
62 * If we pended a virtual abort, preserve it until it gets
63 * cleared. See B1.9.9 (Virtual Abort exception) for details,
64 * but the crucial bit is the zeroing of HCR.VA in the
67 if (vcpu
->arch
.hcr
& HCR_VA
)
68 vcpu
->arch
.hcr
= read_sysreg(HCR
);
71 write_sysreg(0, HSTR
);
72 val
= read_sysreg(HDCR
);
73 write_sysreg(val
& ~(HDCR_TPM
| HDCR_TPMCR
), HDCR
);
74 write_sysreg(0, HCPTR
);
77 static void __hyp_text
__activate_vm(struct kvm_vcpu
*vcpu
)
79 struct kvm
*kvm
= kern_hyp_va(vcpu
->kvm
);
80 write_sysreg(kvm
->arch
.vttbr
, VTTBR
);
81 write_sysreg(vcpu
->arch
.midr
, VPIDR
);
84 static void __hyp_text
__deactivate_vm(struct kvm_vcpu
*vcpu
)
86 write_sysreg(0, VTTBR
);
87 write_sysreg(read_sysreg(MIDR
), VPIDR
);
91 static void __hyp_text
__vgic_save_state(struct kvm_vcpu
*vcpu
)
93 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
))
94 __vgic_v3_save_state(vcpu
);
96 __vgic_v2_save_state(vcpu
);
99 static void __hyp_text
__vgic_restore_state(struct kvm_vcpu
*vcpu
)
101 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
))
102 __vgic_v3_restore_state(vcpu
);
104 __vgic_v2_restore_state(vcpu
);
107 static bool __hyp_text
__populate_fault_info(struct kvm_vcpu
*vcpu
)
109 u32 hsr
= read_sysreg(HSR
);
110 u8 ec
= hsr
>> HSR_EC_SHIFT
;
113 vcpu
->arch
.fault
.hsr
= hsr
;
115 if (ec
== HSR_EC_IABT
)
116 far
= read_sysreg(HIFAR
);
117 else if (ec
== HSR_EC_DABT
)
118 far
= read_sysreg(HDFAR
);
123 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
125 * Abort on the stage 2 translation for a memory access from a
126 * Non-secure PL1 or PL0 mode:
128 * For any Access flag fault or Translation fault, and also for any
129 * Permission fault on the stage 2 translation of a memory access
130 * made as part of a translation table walk for a stage 1 translation,
131 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
134 if (!(hsr
& HSR_DABT_S1PTW
) && (hsr
& HSR_FSC_TYPE
) == FSC_PERM
) {
137 par
= read_sysreg(PAR
);
138 write_sysreg(far
, ATS1CPR
);
141 tmp
= read_sysreg(PAR
);
142 write_sysreg(par
, PAR
);
144 if (unlikely(tmp
& 1))
145 return false; /* Translation failed, back to guest */
147 hpfar
= ((tmp
>> 12) & ((1UL << 28) - 1)) << 4;
149 hpfar
= read_sysreg(HPFAR
);
152 vcpu
->arch
.fault
.hxfar
= far
;
153 vcpu
->arch
.fault
.hpfar
= hpfar
;
157 int __hyp_text
__kvm_vcpu_run(struct kvm_vcpu
*vcpu
)
159 struct kvm_cpu_context
*host_ctxt
;
160 struct kvm_cpu_context
*guest_ctxt
;
165 vcpu
= kern_hyp_va(vcpu
);
166 write_sysreg(vcpu
, HTPIDR
);
168 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
169 guest_ctxt
= &vcpu
->arch
.ctxt
;
171 __sysreg_save_state(host_ctxt
);
172 __banked_save_state(host_ctxt
);
174 __activate_traps(vcpu
, &fpexc
);
177 __vgic_restore_state(vcpu
);
178 __timer_enable_traps(vcpu
);
180 __sysreg_restore_state(guest_ctxt
);
181 __banked_restore_state(guest_ctxt
);
183 /* Jump in the fire! */
185 exit_code
= __guest_enter(vcpu
, host_ctxt
);
186 /* And we're baaack! */
188 if (exit_code
== ARM_EXCEPTION_HVC
&& !__populate_fault_info(vcpu
))
191 fp_enabled
= __vfp_enabled();
193 __banked_save_state(guest_ctxt
);
194 __sysreg_save_state(guest_ctxt
);
195 __timer_disable_traps(vcpu
);
197 __vgic_save_state(vcpu
);
199 __deactivate_traps(vcpu
);
200 __deactivate_vm(vcpu
);
202 __banked_restore_state(host_ctxt
);
203 __sysreg_restore_state(host_ctxt
);
206 __vfp_save_state(&guest_ctxt
->vfp
);
207 __vfp_restore_state(&host_ctxt
->vfp
);
210 write_sysreg(fpexc
, VFP_FPEXC
);
215 static const char * const __hyp_panic_string
[] = {
216 [ARM_EXCEPTION_RESET
] = "\nHYP panic: RST PC:%08x CPSR:%08x",
217 [ARM_EXCEPTION_UNDEFINED
] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
218 [ARM_EXCEPTION_SOFTWARE
] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
219 [ARM_EXCEPTION_PREF_ABORT
] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
220 [ARM_EXCEPTION_DATA_ABORT
] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
221 [ARM_EXCEPTION_IRQ
] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
222 [ARM_EXCEPTION_FIQ
] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
223 [ARM_EXCEPTION_HVC
] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
226 void __hyp_text __noreturn
__hyp_panic(int cause
)
228 u32 elr
= read_special(ELR_hyp
);
231 if (cause
== ARM_EXCEPTION_DATA_ABORT
)
232 val
= read_sysreg(HDFAR
);
234 val
= read_special(SPSR
);
236 if (read_sysreg(VTTBR
)) {
237 struct kvm_vcpu
*vcpu
;
238 struct kvm_cpu_context
*host_ctxt
;
240 vcpu
= (struct kvm_vcpu
*)read_sysreg(HTPIDR
);
241 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
242 __timer_disable_traps(vcpu
);
243 __deactivate_traps(vcpu
);
244 __deactivate_vm(vcpu
);
245 __banked_restore_state(host_ctxt
);
246 __sysreg_restore_state(host_ctxt
);
249 /* Call panic for real */
250 __hyp_do_panic(__hyp_panic_string
[cause
], elr
, val
);