2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/alternative.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/fpsimdmacros.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/memory.h>
33 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
34 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
35 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
36 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
39 .pushsection .hyp.text, "ax"
42 .macro save_common_regs
43 // x2: base address for cpu context
46 add x3, x2, #CPU_XREG_OFFSET(19)
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
55 mrs x20, elr_el2 // pc before entering el2
56 mrs x21, spsr_el2 // pstate before entering el2
58 stp x19, x20, [x3, #96]
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
70 .macro restore_common_regs
71 // x2: base address for cpu context
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
87 msr elr_el2, x20 // pc on return from el2
88 msr spsr_el2, x21 // pstate on return from el2
90 add x3, x2, #CPU_XREG_OFFSET(19)
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
103 .macro restore_host_regs
108 // x2: cpu context address
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 .macro restore_fpsimd
115 // x2: cpu context address
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
142 add x3, x2, #CPU_XREG_OFFSET(0)
144 stp x6, x7, [x3, #16]
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
171 // x19-x29, lr, sp*, elr*, spsr*
174 // Last bits of the 64bit state
178 // Do not touch any register after this!
182 * Macros to perform system register save/restore.
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
188 * In other words, don't touch any of these unless you know what
192 // x2: base address for cpu context
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
211 mrs x18, contextidr_el1
221 stp x6, x7, [x3, #16]
222 stp x8, x9, [x3, #32]
223 stp x10, x11, [x3, #48]
224 stp x12, x13, [x3, #64]
225 stp x14, x15, [x3, #80]
226 stp x16, x17, [x3, #96]
227 stp x18, x19, [x3, #112]
228 stp x20, x21, [x3, #128]
229 stp x22, x23, [x3, #144]
230 stp x24, x25, [x3, #160]
233 .macro save_debug type
234 // x4: pointer to register set
235 // x5: number of registers to skip
239 add x22, x22, x5, lsl #2
242 mrs x21, \type\()15_el1
243 mrs x20, \type\()14_el1
244 mrs x19, \type\()13_el1
245 mrs x18, \type\()12_el1
246 mrs x17, \type\()11_el1
247 mrs x16, \type\()10_el1
248 mrs x15, \type\()9_el1
249 mrs x14, \type\()8_el1
250 mrs x13, \type\()7_el1
251 mrs x12, \type\()6_el1
252 mrs x11, \type\()5_el1
253 mrs x10, \type\()4_el1
254 mrs x9, \type\()3_el1
255 mrs x8, \type\()2_el1
256 mrs x7, \type\()1_el1
257 mrs x6, \type\()0_el1
260 add x22, x22, x5, lsl #2
263 str x21, [x4, #(15 * 8)]
264 str x20, [x4, #(14 * 8)]
265 str x19, [x4, #(13 * 8)]
266 str x18, [x4, #(12 * 8)]
267 str x17, [x4, #(11 * 8)]
268 str x16, [x4, #(10 * 8)]
269 str x15, [x4, #(9 * 8)]
270 str x14, [x4, #(8 * 8)]
271 str x13, [x4, #(7 * 8)]
272 str x12, [x4, #(6 * 8)]
273 str x11, [x4, #(5 * 8)]
274 str x10, [x4, #(4 * 8)]
275 str x9, [x4, #(3 * 8)]
276 str x8, [x4, #(2 * 8)]
277 str x7, [x4, #(1 * 8)]
278 str x6, [x4, #(0 * 8)]
281 .macro restore_sysregs
282 // x2: base address for cpu context
285 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
288 ldp x6, x7, [x3, #16]
289 ldp x8, x9, [x3, #32]
290 ldp x10, x11, [x3, #48]
291 ldp x12, x13, [x3, #64]
292 ldp x14, x15, [x3, #80]
293 ldp x16, x17, [x3, #96]
294 ldp x18, x19, [x3, #112]
295 ldp x20, x21, [x3, #128]
296 ldp x22, x23, [x3, #144]
297 ldp x24, x25, [x3, #160]
313 msr contextidr_el1, x18
323 .macro restore_debug type
324 // x4: pointer to register set
325 // x5: number of registers to skip
329 add x22, x22, x5, lsl #2
332 ldr x21, [x4, #(15 * 8)]
333 ldr x20, [x4, #(14 * 8)]
334 ldr x19, [x4, #(13 * 8)]
335 ldr x18, [x4, #(12 * 8)]
336 ldr x17, [x4, #(11 * 8)]
337 ldr x16, [x4, #(10 * 8)]
338 ldr x15, [x4, #(9 * 8)]
339 ldr x14, [x4, #(8 * 8)]
340 ldr x13, [x4, #(7 * 8)]
341 ldr x12, [x4, #(6 * 8)]
342 ldr x11, [x4, #(5 * 8)]
343 ldr x10, [x4, #(4 * 8)]
344 ldr x9, [x4, #(3 * 8)]
345 ldr x8, [x4, #(2 * 8)]
346 ldr x7, [x4, #(1 * 8)]
347 ldr x6, [x4, #(0 * 8)]
350 add x22, x22, x5, lsl #2
353 msr \type\()15_el1, x21
354 msr \type\()14_el1, x20
355 msr \type\()13_el1, x19
356 msr \type\()12_el1, x18
357 msr \type\()11_el1, x17
358 msr \type\()10_el1, x16
359 msr \type\()9_el1, x15
360 msr \type\()8_el1, x14
361 msr \type\()7_el1, x13
362 msr \type\()6_el1, x12
363 msr \type\()5_el1, x11
364 msr \type\()4_el1, x10
365 msr \type\()3_el1, x9
366 msr \type\()2_el1, x8
367 msr \type\()1_el1, x7
368 msr \type\()0_el1, x6
371 .macro skip_32bit_state tmp, target
372 // Skip 32bit state if not needed
374 tbnz \tmp, #HCR_RW_SHIFT, \target
377 .macro skip_tee_state tmp, target
378 // Skip ThumbEE state if not needed
379 mrs \tmp, id_pfr0_el1
380 tbz \tmp, #12, \target
383 .macro skip_debug_state tmp, target
384 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
385 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
389 * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
391 .macro skip_fpsimd_state tmp, target
393 tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
396 .macro compute_debug_state target
397 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
398 // is set, we do a full save/restore cycle and disable trapping.
399 add x25, x0, #VCPU_CONTEXT
401 // Check the state of MDSCR_EL1
402 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
403 and x26, x25, #DBG_MDSCR_KDE
404 and x25, x25, #DBG_MDSCR_MDE
406 b.eq 9998f // Nothing to see there
408 // If any interesting bits was set, we must set the flag
409 mov x26, #KVM_ARM64_DEBUG_DIRTY
410 str x26, [x0, #VCPU_DEBUG_FLAGS]
411 b 9999f // Don't skip restore
414 // Otherwise load the flags from memory in case we recently
416 skip_debug_state x25, \target
420 .macro save_guest_32bit_state
421 skip_32bit_state x3, 1f
423 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
429 stp x6, x7, [x3, #16]
431 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
436 skip_fpsimd_state x8, 2f
440 skip_debug_state x8, 1f
446 .macro restore_guest_32bit_state
447 skip_32bit_state x3, 1f
449 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
451 ldp x6, x7, [x3, #16]
457 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
462 skip_debug_state x8, 1f
468 .macro activate_traps
469 ldr x2, [x0, #VCPU_HCR_EL2]
472 * We are about to set CPTR_EL2.TFP to trap all floating point
473 * register accesses to EL2, however, the ARM ARM clearly states that
474 * traps are only taken to EL2 if the operation would not otherwise
475 * trap to EL1. Therefore, always make sure that for 32-bit guests,
476 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
478 tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
484 mov x2, #CPTR_EL2_TTA
485 orr x2, x2, #CPTR_EL2_TFP
488 mov x2, #(1 << 15) // Trap CP15 Cr=15
491 // Monitor Debug Config - see kvm_arm_setup_debug()
492 ldr x2, [x0, #VCPU_MDCR_EL2]
496 .macro deactivate_traps
502 and x2, x2, #MDCR_EL2_HPMN_MASK
507 ldr x1, [x0, #VCPU_KVM]
509 ldr x2, [x1, #KVM_VTTBR]
518 * Call into the vgic backend for state saving
520 .macro save_vgic_state
521 alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
522 bl __save_vgic_v2_state
524 bl __save_vgic_v3_state
527 mov x25, #HCR_INT_OVERRIDE
534 * Call into the vgic backend for state restoring
536 .macro restore_vgic_state
538 ldr x25, [x0, #VCPU_IRQ_LINES]
539 orr x24, x24, #HCR_INT_OVERRIDE
542 alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
543 bl __restore_vgic_v2_state
545 bl __restore_vgic_v3_state
549 .macro save_timer_state
551 ldr x2, [x0, #VCPU_KVM]
553 ldr w3, [x2, #KVM_TIMER_ENABLED]
558 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
562 mrs x3, cntv_cval_el0
563 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
566 // Disable the virtual timer
567 msr cntv_ctl_el0, xzr
569 // Allow physical timer/counter access for the host
574 // Clear cntvoff for the host
578 .macro restore_timer_state
580 // Disallow physical timer access for the guest
581 // Physical counter access is allowed
587 ldr x2, [x0, #VCPU_KVM]
589 ldr w3, [x2, #KVM_TIMER_ENABLED]
592 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
594 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
595 msr cntv_cval_el0, x2
598 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
612 /* Save debug state */
614 // x2: ptr to CPU context
615 // x3: ptr to debug reg struct
616 // x4/x5/x6-22/x24-26: trashed
618 mrs x26, id_aa64dfr0_el1
619 ubfx x24, x26, #12, #4 // Extract BRPs
620 ubfx x25, x26, #20, #4 // Extract WRPs
622 sub w24, w26, w24 // How many BPs to skip
623 sub w25, w26, w25 // How many WPs to skip
626 add x4, x3, #DEBUG_BCR
628 add x4, x3, #DEBUG_BVR
632 add x4, x3, #DEBUG_WCR
634 add x4, x3, #DEBUG_WVR
638 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
641 /* Restore debug state */
643 // x2: ptr to CPU context
644 // x3: ptr to debug reg struct
645 // x4/x5/x6-22/x24-26: trashed
647 mrs x26, id_aa64dfr0_el1
648 ubfx x24, x26, #12, #4 // Extract BRPs
649 ubfx x25, x26, #20, #4 // Extract WRPs
651 sub w24, w26, w24 // How many BPs to skip
652 sub w25, w26, w25 // How many WPs to skip
655 add x4, x3, #DEBUG_BCR
657 add x4, x3, #DEBUG_BVR
661 add x4, x3, #DEBUG_WCR
663 add x4, x3, #DEBUG_WVR
666 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
672 skip_fpsimd_state x3, 1f
677 skip_fpsimd_state x3, 1f
681 switch_to_guest_fpsimd:
685 bic x2, x2, #CPTR_EL2_TFP
691 ldr x2, [x0, #VCPU_HOST_CONTEXT]
695 add x2, x0, #VCPU_CONTEXT
698 skip_32bit_state x3, 1f
699 ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
709 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
711 * This is the world switch. The first half of the function
712 * deals with entering the guest, and anything from __kvm_vcpu_return
713 * to the end of the function deals with reentering the host.
714 * On the enter path, only x0 (vcpu pointer) must be preserved until
715 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
716 * code) must both be preserved until the epilogue.
717 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
719 ENTRY(__kvm_vcpu_run)
721 msr tpidr_el2, x0 // Save the vcpu register
724 ldr x2, [x0, #VCPU_HOST_CONTEXT]
730 compute_debug_state 1f
731 add x3, x0, #VCPU_HOST_DEBUG_STATE
741 add x2, x0, #VCPU_CONTEXT
743 // We must restore the 32-bit state before the sysregs, thanks
744 // to Cortex-A57 erratum #852523.
745 restore_guest_32bit_state
748 skip_debug_state x3, 1f
749 ldr x3, [x0, #VCPU_DEBUG_PTR]
755 // That's it, no more messing around.
759 // Assume x0 is the vcpu pointer, x1 the return code
760 // Guest's x0-x3 are on the stack
763 add x2, x0, #VCPU_CONTEXT
769 skip_debug_state x3, 1f
770 ldr x3, [x0, #VCPU_DEBUG_PTR]
774 save_guest_32bit_state
783 ldr x2, [x0, #VCPU_HOST_CONTEXT]
788 /* Clear FPSIMD and Trace trapping */
791 skip_debug_state x3, 1f
792 // Clear the dirty flag for the next run, as all the state has
793 // already been saved. Note that we nuke the whole 64bit word.
794 // If we ever add more flags, we'll have to be more careful...
795 str xzr, [x0, #VCPU_DEBUG_FLAGS]
796 add x3, x0, #VCPU_HOST_DEBUG_STATE
805 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
806 ENTRY(__kvm_tlb_flush_vmid_ipa)
810 ldr x2, [x0, #KVM_VTTBR]
815 * We could do so much better if we had the VA as well.
816 * Instead, we invalidate Stage-2 for this IPA, and the
817 * whole of Stage-1. Weep...
822 * We have to ensure completion of the invalidation at Stage-2,
823 * since a table walk on another CPU could refill a TLB with a
824 * complete (S1 + S2) walk based on the old Stage-2 mapping if
825 * the Stage-1 invalidation happened first.
834 ENDPROC(__kvm_tlb_flush_vmid_ipa)
837 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
838 * @struct kvm *kvm - pointer to kvm structure
840 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
842 ENTRY(__kvm_tlb_flush_vmid)
846 ldr x2, [x0, #KVM_VTTBR]
856 ENDPROC(__kvm_tlb_flush_vmid)
858 ENTRY(__kvm_flush_vm_context)
864 ENDPROC(__kvm_flush_vm_context)
867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
871 // Guess the context by looking at VTTBR:
872 // If zero, then we're already a host.
873 // Otherwise restore a minimal host context before panicing.
882 ldr x2, [x0, #VCPU_HOST_CONTEXT]
888 * Make sure we have a valid host stack, and don't leave junk in the
889 * frame pointer that will give us a misleading host stack unwinding.
891 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
895 1: adr x0, __hyp_panic_str
905 pop x6, xzr // active context PAR_EL1
908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
916 2: .quad HYP_PAGE_OFFSET
918 ENDPROC(__kvm_hyp_panic)
921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
926 * u64 kvm_call_hyp(void *hypfn, ...);
928 * This is not really a variadic function in the classic C-way and care must
929 * be taken when calling this to ensure parameters are passed in registers
930 * only, since the stack will change between the caller and the callee.
932 * Call the function with the first argument containing a pointer to the
933 * function you wish to call in Hyp mode, and subsequent arguments will be
934 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
935 * function pointer can be passed). The function being called must be mapped
936 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
937 * passed in r0 and r1.
939 * A function pointer with a value of 0 has a special meaning, and is
940 * used to implement __hyp_get_vectors in the same way as in
941 * arch/arm64/kernel/hyp_stub.S.
946 ENDPROC(kvm_call_hyp)
948 .macro invalid_vector label, target
955 /* None of these should ever happen */
956 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
957 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
958 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
959 invalid_vector el2t_error_invalid, __kvm_hyp_panic
960 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
961 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
962 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
963 invalid_vector el2h_error_invalid, __kvm_hyp_panic
964 invalid_vector el1_sync_invalid, __kvm_hyp_panic
965 invalid_vector el1_irq_invalid, __kvm_hyp_panic
966 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
967 invalid_vector el1_error_invalid, __kvm_hyp_panic
969 el1_sync: // Guest trapped into EL2
974 lsr x2, x1, #ESR_ELx_EC_SHIFT
976 cmp x2, #ESR_ELx_EC_HVC64
979 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
980 cbnz x3, el1_trap // called HVC
982 /* Here, we're pretty sure the host called HVC. */
986 /* Check for __hyp_get_vectors */
994 * Compute the function address in EL2, and shuffle the parameters.
1012 /* Guest accessed VFP/SIMD registers, save host, restore Guest */
1013 cmp x2, #ESR_ELx_EC_FP_ASIMD
1014 b.eq switch_to_guest_fpsimd
1016 cmp x2, #ESR_ELx_EC_DABT_LOW
1017 mov x0, #ESR_ELx_EC_IABT_LOW
1019 b.ne 1f // Not an abort we care about
1021 /* This is an abort. Check for permission fault */
1022 alternative_if_not ARM64_WORKAROUND_834220
1023 and x2, x1, #ESR_ELx_FSC_TYPE
1025 b.ne 1f // Not a permission fault
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1033 * Check for Stage-1 page table walk, which is guaranteed
1034 * to give a valid HPFAR_EL2.
1036 tbnz x1, #7, 1f // S1PTW is set
1038 /* Preserve PAR_EL1 */
1043 * Permission fault, HPFAR_EL2 is invalid.
1044 * Resolve the IPA the hard way using the guest VA.
1045 * Stage-1 translation already validated the memory access rights.
1046 * As such, we can use the EL1 translation regime, and don't have
1047 * to distinguish between EL0 and EL1 access.
1055 pop x0, xzr // Restore PAR_EL1 from the stack
1057 tbnz x3, #0, 3f // Bail out if we failed the translation
1058 ubfx x3, x3, #12, #36 // Extract IPA
1059 lsl x3, x3, #4 // and present it like HPFAR
1062 1: mrs x3, hpfar_el2
1065 2: mrs x0, tpidr_el2
1066 str w1, [x0, #VCPU_ESR_EL2]
1067 str x2, [x0, #VCPU_FAR_EL2]
1068 str x3, [x0, #VCPU_HPFAR_EL2]
1070 mov x1, #ARM_EXCEPTION_TRAP
1074 * Translation failed. Just return to the guest and
1075 * let it fault again. Another CPU is probably playing
1087 mov x1, #ARM_EXCEPTION_IRQ
1094 ENTRY(__kvm_hyp_vector)
1095 ventry el2t_sync_invalid // Synchronous EL2t
1096 ventry el2t_irq_invalid // IRQ EL2t
1097 ventry el2t_fiq_invalid // FIQ EL2t
1098 ventry el2t_error_invalid // Error EL2t
1100 ventry el2h_sync_invalid // Synchronous EL2h
1101 ventry el2h_irq_invalid // IRQ EL2h
1102 ventry el2h_fiq_invalid // FIQ EL2h
1103 ventry el2h_error_invalid // Error EL2h
1105 ventry el1_sync // Synchronous 64-bit EL1
1106 ventry el1_irq // IRQ 64-bit EL1
1107 ventry el1_fiq_invalid // FIQ 64-bit EL1
1108 ventry el1_error_invalid // Error 64-bit EL1
1110 ventry el1_sync // Synchronous 32-bit EL1
1111 ventry el1_irq // IRQ 32-bit EL1
1112 ventry el1_fiq_invalid // FIQ 32-bit EL1
1113 ventry el1_error_invalid // Error 32-bit EL1
1114 ENDPROC(__kvm_hyp_vector)
1117 ENTRY(__kvm_get_mdcr_el2)
1120 ENDPROC(__kvm_get_mdcr_el2)