2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
19 #include <linux/irqchip/arm-gic.h>
21 #include <asm/assembler.h>
22 #include <asm/memory.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/fpsimdmacros.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
30 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
36 .pushsection .hyp.text, "ax"
40 .globl __kvm_hyp_code_start
42 .macro save_common_regs
43 // x2: base address for cpu context
46 add x3, x2, #CPU_XREG_OFFSET(19)
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
58 stp x19, x20, [x3, #96]
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
70 .macro restore_common_regs
71 // x2: base address for cpu context
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
90 add x3, x2, #CPU_XREG_OFFSET(19)
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
103 .macro restore_host_regs
108 // x2: cpu context address
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 .macro restore_fpsimd
115 // x2: cpu context address
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
142 add x3, x2, #CPU_XREG_OFFSET(0)
144 stp x6, x7, [x3, #16]
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
171 // x19-x29, lr, sp*, elr*, spsr*
174 // Last bits of the 64bit state
178 // Do not touch any register after this!
182 * Macros to perform system register save/restore.
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
188 * In other words, don't touch any of these unless you know what
192 // x2: base address for cpu context
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
211 mrs x18, contextidr_el1
220 stp x6, x7, [x3, #16]
221 stp x8, x9, [x3, #32]
222 stp x10, x11, [x3, #48]
223 stp x12, x13, [x3, #64]
224 stp x14, x15, [x3, #80]
225 stp x16, x17, [x3, #96]
226 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144]
232 .macro restore_sysregs
233 // x2: base address for cpu context
236 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
239 ldp x6, x7, [x3, #16]
240 ldp x8, x9, [x3, #32]
241 ldp x10, x11, [x3, #48]
242 ldp x12, x13, [x3, #64]
243 ldp x14, x15, [x3, #80]
244 ldp x16, x17, [x3, #96]
245 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144]
264 msr contextidr_el1, x18
273 .macro skip_32bit_state tmp, target
274 // Skip 32bit state if not needed
276 tbnz \tmp, #HCR_RW_SHIFT, \target
279 .macro skip_tee_state tmp, target
280 // Skip ThumbEE state if not needed
281 mrs \tmp, id_pfr0_el1
282 tbz \tmp, #12, \target
285 .macro save_guest_32bit_state
286 skip_32bit_state x3, 1f
288 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
294 stp x6, x7, [x3, #16]
296 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
302 stp x6, x7, [x3, #16]
304 skip_tee_state x8, 1f
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
313 .macro restore_guest_32bit_state
314 skip_32bit_state x3, 1f
316 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
318 ldp x6, x7, [x3, #16]
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
326 ldp x6, x7, [x3, #16]
332 skip_tee_state x8, 1f
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
341 .macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES]
343 ldr x1, [x0, #VCPU_HCR_EL2]
347 ldr x2, =(CPTR_EL2_TTA)
350 ldr x2, =(1 << 15) // Trap CP15 Cr=15
354 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
359 .macro deactivate_traps
366 and x2, x2, #MDCR_EL2_HPMN_MASK
371 ldr x1, [x0, #VCPU_KVM]
373 ldr x2, [x1, #KVM_VTTBR]
382 * Save the VGIC CPU state into memory
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
386 .macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */
388 ldr x2, [x0, #VCPU_KVM]
390 ldr x2, [x2, #KVM_VGIC_VCTRL]
392 cbz x2, 2f // disabled
394 /* Compute the address of struct vgic_cpu */
395 add x3, x0, #VCPU_VGIC_CPU
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
407 str w4, [x3, #VGIC_CPU_HCR]
408 str w5, [x3, #VGIC_CPU_VMCR]
409 str w6, [x3, #VGIC_CPU_MISR]
410 str w7, [x3, #VGIC_CPU_EISR]
411 str w8, [x3, #(VGIC_CPU_EISR + 4)]
412 str w9, [x3, #VGIC_CPU_ELRSR]
413 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
414 str w11, [x3, #VGIC_CPU_APR]
417 str wzr, [x2, #GICH_HCR]
419 /* Save list registers */
420 add x2, x2, #GICH_LR0
421 ldr w4, [x3, #VGIC_CPU_NR_LR]
422 add x3, x3, #VGIC_CPU_LR
431 * Restore the VGIC CPU state from memory
432 * x0: Register pointing to VCPU struct
434 .macro restore_vgic_state
435 /* Get VGIC VCTRL base into x2 */
436 ldr x2, [x0, #VCPU_KVM]
438 ldr x2, [x2, #KVM_VGIC_VCTRL]
440 cbz x2, 2f // disabled
442 /* Compute the address of struct vgic_cpu */
443 add x3, x0, #VCPU_VGIC_CPU
445 /* We only restore a minimal set of registers */
446 ldr w4, [x3, #VGIC_CPU_HCR]
447 ldr w5, [x3, #VGIC_CPU_VMCR]
448 ldr w6, [x3, #VGIC_CPU_APR]
450 str w4, [x2, #GICH_HCR]
451 str w5, [x2, #GICH_VMCR]
452 str w6, [x2, #GICH_APR]
454 /* Restore list registers */
455 add x2, x2, #GICH_LR0
456 ldr w4, [x3, #VGIC_CPU_NR_LR]
457 add x3, x3, #VGIC_CPU_LR
465 .macro save_timer_state
467 ldr x2, [x0, #VCPU_KVM]
469 ldr w3, [x2, #KVM_TIMER_ENABLED]
474 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
475 bic x3, x3, #1 // Clear Enable
480 mrs x3, cntv_cval_el0
481 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
484 // Allow physical timer/counter access for the host
489 // Clear cntvoff for the host
493 .macro restore_timer_state
495 // Disallow physical timer access for the guest
496 // Physical counter access is allowed
502 ldr x2, [x0, #VCPU_KVM]
504 ldr w3, [x2, #KVM_TIMER_ENABLED]
507 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
509 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
510 msr cntv_cval_el0, x2
513 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
536 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
538 * This is the world switch. The first half of the function
539 * deals with entering the guest, and anything from __kvm_vcpu_return
540 * to the end of the function deals with reentering the host.
541 * On the enter path, only x0 (vcpu pointer) must be preserved until
542 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
543 * code) must both be preserved until the epilogue.
544 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
546 ENTRY(__kvm_vcpu_run)
548 msr tpidr_el2, x0 // Save the vcpu register
551 ldr x2, [x0, #VCPU_HOST_CONTEXT]
565 add x2, x0, #VCPU_CONTEXT
569 restore_guest_32bit_state
572 // That's it, no more messing around.
576 // Assume x0 is the vcpu pointer, x1 the return code
577 // Guest's x0-x3 are on the stack
580 add x2, x0, #VCPU_CONTEXT
585 save_guest_32bit_state
594 ldr x2, [x0, #VCPU_HOST_CONTEXT]
605 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
606 ENTRY(__kvm_tlb_flush_vmid_ipa)
610 ldr x2, [x0, #KVM_VTTBR]
615 * We could do so much better if we had the VA as well.
616 * Instead, we invalidate Stage-2 for this IPA, and the
617 * whole of Stage-1. Weep...
627 ENDPROC(__kvm_tlb_flush_vmid_ipa)
629 ENTRY(__kvm_flush_vm_context)
635 ENDPROC(__kvm_flush_vm_context)
638 // Guess the context by looking at VTTBR:
639 // If zero, then we're already a host.
640 // Otherwise restore a minimal host context before panicing.
649 ldr x2, [x0, #VCPU_HOST_CONTEXT]
654 1: adr x0, __hyp_panic_str
667 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
675 2: .quad HYP_PAGE_OFFSET
677 ENDPROC(__kvm_hyp_panic)
680 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
687 ENDPROC(kvm_call_hyp)
689 .macro invalid_vector label, target
696 /* None of these should ever happen */
697 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
698 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
699 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
700 invalid_vector el2t_error_invalid, __kvm_hyp_panic
701 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
702 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
703 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
704 invalid_vector el2h_error_invalid, __kvm_hyp_panic
705 invalid_vector el1_sync_invalid, __kvm_hyp_panic
706 invalid_vector el1_irq_invalid, __kvm_hyp_panic
707 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
708 invalid_vector el1_error_invalid, __kvm_hyp_panic
710 el1_sync: // Guest trapped into EL2
715 lsr x2, x1, #ESR_EL2_EC_SHIFT
717 cmp x2, #ESR_EL2_EC_HVC64
720 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
721 cbnz x3, el1_trap // called HVC
723 /* Here, we're pretty sure the host called HVC. */
730 * Compute the function address in EL2, and shuffle the parameters.
747 cmp x2, #ESR_EL2_EC_DABT
748 mov x0, #ESR_EL2_EC_IABT
750 b.ne 1f // Not an abort we care about
752 /* This is an abort. Check for permission fault */
753 and x2, x1, #ESR_EL2_FSC_TYPE
755 b.ne 1f // Not a permission fault
758 * Check for Stage-1 page table walk, which is guaranteed
759 * to give a valid HPFAR_EL2.
761 tbnz x1, #7, 1f // S1PTW is set
763 /* Preserve PAR_EL1 */
768 * Permission fault, HPFAR_EL2 is invalid.
769 * Resolve the IPA the hard way using the guest VA.
770 * Stage-1 translation already validated the memory access rights.
771 * As such, we can use the EL1 translation regime, and don't have
772 * to distinguish between EL0 and EL1 access.
780 pop x0, xzr // Restore PAR_EL1 from the stack
782 tbnz x3, #0, 3f // Bail out if we failed the translation
783 ubfx x3, x3, #12, #36 // Extract IPA
784 lsl x3, x3, #4 // and present it like HPFAR
791 str x1, [x0, #VCPU_ESR_EL2]
792 str x2, [x0, #VCPU_FAR_EL2]
793 str x3, [x0, #VCPU_HPFAR_EL2]
795 mov x1, #ARM_EXCEPTION_TRAP
799 * Translation failed. Just return to the guest and
800 * let it fault again. Another CPU is probably playing
812 mov x1, #ARM_EXCEPTION_IRQ
819 ENTRY(__kvm_hyp_vector)
820 ventry el2t_sync_invalid // Synchronous EL2t
821 ventry el2t_irq_invalid // IRQ EL2t
822 ventry el2t_fiq_invalid // FIQ EL2t
823 ventry el2t_error_invalid // Error EL2t
825 ventry el2h_sync_invalid // Synchronous EL2h
826 ventry el2h_irq_invalid // IRQ EL2h
827 ventry el2h_fiq_invalid // FIQ EL2h
828 ventry el2h_error_invalid // Error EL2h
830 ventry el1_sync // Synchronous 64-bit EL1
831 ventry el1_irq // IRQ 64-bit EL1
832 ventry el1_fiq_invalid // FIQ 64-bit EL1
833 ventry el1_error_invalid // Error 64-bit EL1
835 ventry el1_sync // Synchronous 32-bit EL1
836 ventry el1_irq // IRQ 32-bit EL1
837 ventry el1_fiq_invalid // FIQ 32-bit EL1
838 ventry el1_error_invalid // Error 32-bit EL1
839 ENDPROC(__kvm_hyp_vector)
842 .globl __kvm_hyp_code_end