2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
19 #include <linux/irqchip/arm-gic.h>
21 #include <asm/assembler.h>
22 #include <asm/memory.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/fpsimdmacros.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
30 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
36 .pushsection .hyp.text, "ax"
40 .globl __kvm_hyp_code_start
42 .macro save_common_regs
43 // x2: base address for cpu context
46 add x3, x2, #CPU_XREG_OFFSET(19)
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
58 stp x19, x20, [x3, #96]
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
70 .macro restore_common_regs
71 // x2: base address for cpu context
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
90 add x3, x2, #CPU_XREG_OFFSET(19)
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
103 .macro restore_host_regs
108 // x2: cpu context address
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 .macro restore_fpsimd
115 // x2: cpu context address
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
142 add x3, x2, #CPU_XREG_OFFSET(0)
144 stp x6, x7, [x3, #16]
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
171 // x19-x29, lr, sp*, elr*, spsr*
174 // Last bits of the 64bit state
178 // Do not touch any register after this!
182 * Macros to perform system register save/restore.
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
188 * In other words, don't touch any of these unless you know what
192 // x2: base address for cpu context
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
211 mrs x18, contextidr_el1
220 stp x6, x7, [x3, #16]
221 stp x8, x9, [x3, #32]
222 stp x10, x11, [x3, #48]
223 stp x12, x13, [x3, #64]
224 stp x14, x15, [x3, #80]
225 stp x16, x17, [x3, #96]
226 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144]
232 .macro restore_sysregs
233 // x2: base address for cpu context
236 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
239 ldp x6, x7, [x3, #16]
240 ldp x8, x9, [x3, #32]
241 ldp x10, x11, [x3, #48]
242 ldp x12, x13, [x3, #64]
243 ldp x14, x15, [x3, #80]
244 ldp x16, x17, [x3, #96]
245 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144]
264 msr contextidr_el1, x18
273 .macro skip_32bit_state tmp, target
274 // Skip 32bit state if not needed
276 tbnz \tmp, #HCR_RW_SHIFT, \target
279 .macro skip_tee_state tmp, target
280 // Skip ThumbEE state if not needed
281 mrs \tmp, id_pfr0_el1
282 tbz \tmp, #12, \target
285 .macro save_guest_32bit_state
286 skip_32bit_state x3, 1f
288 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
294 stp x6, x7, [x3, #16]
296 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
302 stp x6, x7, [x3, #16]
304 skip_tee_state x8, 1f
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
313 .macro restore_guest_32bit_state
314 skip_32bit_state x3, 1f
316 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
318 ldp x6, x7, [x3, #16]
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
326 ldp x6, x7, [x3, #16]
332 skip_tee_state x8, 1f
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
341 .macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES]
343 ldr x1, [x0, #VCPU_HCR_EL2]
347 ldr x2, =(CPTR_EL2_TTA)
350 ldr x2, =(1 << 15) // Trap CP15 Cr=15
354 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
359 .macro deactivate_traps
366 and x2, x2, #MDCR_EL2_HPMN_MASK
371 ldr x1, [x0, #VCPU_KVM]
373 ldr x2, [x1, #KVM_VTTBR]
382 * Save the VGIC CPU state into memory
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
386 .macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */
388 ldr x2, [x0, #VCPU_KVM]
390 ldr x2, [x2, #KVM_VGIC_VCTRL]
392 cbz x2, 2f // disabled
394 /* Compute the address of struct vgic_cpu */
395 add x3, x0, #VCPU_VGIC_CPU
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
412 CPU_BE( rev w10, w10 )
413 CPU_BE( rev w11, w11 )
415 str w4, [x3, #VGIC_CPU_HCR]
416 str w5, [x3, #VGIC_CPU_VMCR]
417 str w6, [x3, #VGIC_CPU_MISR]
418 str w7, [x3, #VGIC_CPU_EISR]
419 str w8, [x3, #(VGIC_CPU_EISR + 4)]
420 str w9, [x3, #VGIC_CPU_ELRSR]
421 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
422 str w11, [x3, #VGIC_CPU_APR]
425 str wzr, [x2, #GICH_HCR]
427 /* Save list registers */
428 add x2, x2, #GICH_LR0
429 ldr w4, [x3, #VGIC_CPU_NR_LR]
430 add x3, x3, #VGIC_CPU_LR
440 * Restore the VGIC CPU state from memory
441 * x0: Register pointing to VCPU struct
443 .macro restore_vgic_state
444 /* Get VGIC VCTRL base into x2 */
445 ldr x2, [x0, #VCPU_KVM]
447 ldr x2, [x2, #KVM_VGIC_VCTRL]
449 cbz x2, 2f // disabled
451 /* Compute the address of struct vgic_cpu */
452 add x3, x0, #VCPU_VGIC_CPU
454 /* We only restore a minimal set of registers */
455 ldr w4, [x3, #VGIC_CPU_HCR]
456 ldr w5, [x3, #VGIC_CPU_VMCR]
457 ldr w6, [x3, #VGIC_CPU_APR]
462 str w4, [x2, #GICH_HCR]
463 str w5, [x2, #GICH_VMCR]
464 str w6, [x2, #GICH_APR]
466 /* Restore list registers */
467 add x2, x2, #GICH_LR0
468 ldr w4, [x3, #VGIC_CPU_NR_LR]
469 add x3, x3, #VGIC_CPU_LR
478 .macro save_timer_state
480 ldr x2, [x0, #VCPU_KVM]
482 ldr w3, [x2, #KVM_TIMER_ENABLED]
487 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
488 bic x3, x3, #1 // Clear Enable
493 mrs x3, cntv_cval_el0
494 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
497 // Allow physical timer/counter access for the host
502 // Clear cntvoff for the host
506 .macro restore_timer_state
508 // Disallow physical timer access for the guest
509 // Physical counter access is allowed
515 ldr x2, [x0, #VCPU_KVM]
517 ldr w3, [x2, #KVM_TIMER_ENABLED]
520 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
522 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
523 msr cntv_cval_el0, x2
526 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
549 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
551 * This is the world switch. The first half of the function
552 * deals with entering the guest, and anything from __kvm_vcpu_return
553 * to the end of the function deals with reentering the host.
554 * On the enter path, only x0 (vcpu pointer) must be preserved until
555 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
556 * code) must both be preserved until the epilogue.
557 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
559 ENTRY(__kvm_vcpu_run)
561 msr tpidr_el2, x0 // Save the vcpu register
564 ldr x2, [x0, #VCPU_HOST_CONTEXT]
578 add x2, x0, #VCPU_CONTEXT
582 restore_guest_32bit_state
585 // That's it, no more messing around.
589 // Assume x0 is the vcpu pointer, x1 the return code
590 // Guest's x0-x3 are on the stack
593 add x2, x0, #VCPU_CONTEXT
598 save_guest_32bit_state
607 ldr x2, [x0, #VCPU_HOST_CONTEXT]
618 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
619 ENTRY(__kvm_tlb_flush_vmid_ipa)
623 ldr x2, [x0, #KVM_VTTBR]
628 * We could do so much better if we had the VA as well.
629 * Instead, we invalidate Stage-2 for this IPA, and the
630 * whole of Stage-1. Weep...
640 ENDPROC(__kvm_tlb_flush_vmid_ipa)
642 ENTRY(__kvm_flush_vm_context)
648 ENDPROC(__kvm_flush_vm_context)
651 // Guess the context by looking at VTTBR:
652 // If zero, then we're already a host.
653 // Otherwise restore a minimal host context before panicing.
662 ldr x2, [x0, #VCPU_HOST_CONTEXT]
667 1: adr x0, __hyp_panic_str
680 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
688 2: .quad HYP_PAGE_OFFSET
690 ENDPROC(__kvm_hyp_panic)
693 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
700 ENDPROC(kvm_call_hyp)
702 .macro invalid_vector label, target
709 /* None of these should ever happen */
710 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
711 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
712 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
713 invalid_vector el2t_error_invalid, __kvm_hyp_panic
714 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
715 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
716 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
717 invalid_vector el2h_error_invalid, __kvm_hyp_panic
718 invalid_vector el1_sync_invalid, __kvm_hyp_panic
719 invalid_vector el1_irq_invalid, __kvm_hyp_panic
720 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
721 invalid_vector el1_error_invalid, __kvm_hyp_panic
723 el1_sync: // Guest trapped into EL2
728 lsr x2, x1, #ESR_EL2_EC_SHIFT
730 cmp x2, #ESR_EL2_EC_HVC64
733 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
734 cbnz x3, el1_trap // called HVC
736 /* Here, we're pretty sure the host called HVC. */
743 * Compute the function address in EL2, and shuffle the parameters.
760 cmp x2, #ESR_EL2_EC_DABT
761 mov x0, #ESR_EL2_EC_IABT
763 b.ne 1f // Not an abort we care about
765 /* This is an abort. Check for permission fault */
766 and x2, x1, #ESR_EL2_FSC_TYPE
768 b.ne 1f // Not a permission fault
771 * Check for Stage-1 page table walk, which is guaranteed
772 * to give a valid HPFAR_EL2.
774 tbnz x1, #7, 1f // S1PTW is set
776 /* Preserve PAR_EL1 */
781 * Permission fault, HPFAR_EL2 is invalid.
782 * Resolve the IPA the hard way using the guest VA.
783 * Stage-1 translation already validated the memory access rights.
784 * As such, we can use the EL1 translation regime, and don't have
785 * to distinguish between EL0 and EL1 access.
793 pop x0, xzr // Restore PAR_EL1 from the stack
795 tbnz x3, #0, 3f // Bail out if we failed the translation
796 ubfx x3, x3, #12, #36 // Extract IPA
797 lsl x3, x3, #4 // and present it like HPFAR
804 str x1, [x0, #VCPU_ESR_EL2]
805 str x2, [x0, #VCPU_FAR_EL2]
806 str x3, [x0, #VCPU_HPFAR_EL2]
808 mov x1, #ARM_EXCEPTION_TRAP
812 * Translation failed. Just return to the guest and
813 * let it fault again. Another CPU is probably playing
825 mov x1, #ARM_EXCEPTION_IRQ
832 ENTRY(__kvm_hyp_vector)
833 ventry el2t_sync_invalid // Synchronous EL2t
834 ventry el2t_irq_invalid // IRQ EL2t
835 ventry el2t_fiq_invalid // FIQ EL2t
836 ventry el2t_error_invalid // Error EL2t
838 ventry el2h_sync_invalid // Synchronous EL2h
839 ventry el2h_irq_invalid // IRQ EL2h
840 ventry el2h_fiq_invalid // FIQ EL2h
841 ventry el2h_error_invalid // Error EL2h
843 ventry el1_sync // Synchronous 64-bit EL1
844 ventry el1_irq // IRQ 64-bit EL1
845 ventry el1_fiq_invalid // FIQ 64-bit EL1
846 ventry el1_error_invalid // Error 64-bit EL1
848 ventry el1_sync // Synchronous 32-bit EL1
849 ventry el1_irq // IRQ 32-bit EL1
850 ventry el1_fiq_invalid // FIQ 32-bit EL1
851 ventry el1_error_invalid // Error 32-bit EL1
852 ENDPROC(__kvm_hyp_vector)
855 .globl __kvm_hyp_code_end