2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/assembler.h>
21 #include <asm/memory.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/fpsimdmacros.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
30 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
36 .pushsection .hyp.text, "ax"
39 .macro save_common_regs
40 // x2: base address for cpu context
43 add x3, x2, #CPU_XREG_OFFSET(19)
45 stp x21, x22, [x3, #16]
46 stp x23, x24, [x3, #32]
47 stp x25, x26, [x3, #48]
48 stp x27, x28, [x3, #64]
49 stp x29, lr, [x3, #80]
52 mrs x20, elr_el2 // EL1 PC
53 mrs x21, spsr_el2 // EL1 pstate
55 stp x19, x20, [x3, #96]
62 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
63 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
64 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
67 .macro restore_common_regs
68 // x2: base address for cpu context
71 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
72 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
73 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
79 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
84 msr elr_el2, x20 // EL1 PC
85 msr spsr_el2, x21 // EL1 pstate
87 add x3, x2, #CPU_XREG_OFFSET(19)
89 ldp x21, x22, [x3, #16]
90 ldp x23, x24, [x3, #32]
91 ldp x25, x26, [x3, #48]
92 ldp x27, x28, [x3, #64]
93 ldp x29, lr, [x3, #80]
100 .macro restore_host_regs
105 // x2: cpu context address
107 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 .macro restore_fpsimd
112 // x2: cpu context address
114 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 .macro save_guest_regs
119 // x0 is the vcpu address
120 // x1 is the return code, do not corrupt!
121 // x2 is the cpu context
122 // x3 is a tmp register
123 // Guest's x0-x3 are on the stack
125 // Compute base to save registers
126 add x3, x2, #CPU_XREG_OFFSET(4)
128 stp x6, x7, [x3, #16]
129 stp x8, x9, [x3, #32]
130 stp x10, x11, [x3, #48]
131 stp x12, x13, [x3, #64]
132 stp x14, x15, [x3, #80]
133 stp x16, x17, [x3, #96]
139 add x3, x2, #CPU_XREG_OFFSET(0)
141 stp x6, x7, [x3, #16]
146 .macro restore_guest_regs
147 // x0 is the vcpu address.
148 // x2 is the cpu context
149 // x3 is a tmp register
151 // Prepare x0-x3 for later restore
152 add x3, x2, #CPU_XREG_OFFSET(0)
154 ldp x6, x7, [x3, #16]
155 push x4, x5 // Push x0-x3 on the stack
159 ldp x4, x5, [x3, #32]
160 ldp x6, x7, [x3, #48]
161 ldp x8, x9, [x3, #64]
162 ldp x10, x11, [x3, #80]
163 ldp x12, x13, [x3, #96]
164 ldp x14, x15, [x3, #112]
165 ldp x16, x17, [x3, #128]
168 // x19-x29, lr, sp*, elr*, spsr*
171 // Last bits of the 64bit state
175 // Do not touch any register after this!
179 * Macros to perform system register save/restore.
181 * Ordering here is absolutely critical, and must be kept consistent
182 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
185 * In other words, don't touch any of these unless you know what
189 // x2: base address for cpu context
192 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
208 mrs x18, contextidr_el1
218 stp x6, x7, [x3, #16]
219 stp x8, x9, [x3, #32]
220 stp x10, x11, [x3, #48]
221 stp x12, x13, [x3, #64]
222 stp x14, x15, [x3, #80]
223 stp x16, x17, [x3, #96]
224 stp x18, x19, [x3, #112]
225 stp x20, x21, [x3, #128]
226 stp x22, x23, [x3, #144]
227 stp x24, x25, [x3, #160]
231 // x2: base address for cpu context
234 mrs x26, id_aa64dfr0_el1
235 ubfx x24, x26, #12, #4 // Extract BRPs
236 ubfx x25, x26, #20, #4 // Extract WRPs
238 sub w24, w26, w24 // How many BPs to skip
239 sub w25, w26, w25 // How many WPs to skip
241 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
244 add x26, x26, x24, lsl #2
247 mrs x20, dbgbcr15_el1
248 mrs x19, dbgbcr14_el1
249 mrs x18, dbgbcr13_el1
250 mrs x17, dbgbcr12_el1
251 mrs x16, dbgbcr11_el1
252 mrs x15, dbgbcr10_el1
265 add x26, x26, x24, lsl #2
269 str x20, [x3, #(15 * 8)]
270 str x19, [x3, #(14 * 8)]
271 str x18, [x3, #(13 * 8)]
272 str x17, [x3, #(12 * 8)]
273 str x16, [x3, #(11 * 8)]
274 str x15, [x3, #(10 * 8)]
275 str x14, [x3, #(9 * 8)]
276 str x13, [x3, #(8 * 8)]
277 str x12, [x3, #(7 * 8)]
278 str x11, [x3, #(6 * 8)]
279 str x10, [x3, #(5 * 8)]
280 str x9, [x3, #(4 * 8)]
281 str x8, [x3, #(3 * 8)]
282 str x7, [x3, #(2 * 8)]
283 str x6, [x3, #(1 * 8)]
284 str x5, [x3, #(0 * 8)]
286 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
289 add x26, x26, x24, lsl #2
292 mrs x20, dbgbvr15_el1
293 mrs x19, dbgbvr14_el1
294 mrs x18, dbgbvr13_el1
295 mrs x17, dbgbvr12_el1
296 mrs x16, dbgbvr11_el1
297 mrs x15, dbgbvr10_el1
310 add x26, x26, x24, lsl #2
314 str x20, [x3, #(15 * 8)]
315 str x19, [x3, #(14 * 8)]
316 str x18, [x3, #(13 * 8)]
317 str x17, [x3, #(12 * 8)]
318 str x16, [x3, #(11 * 8)]
319 str x15, [x3, #(10 * 8)]
320 str x14, [x3, #(9 * 8)]
321 str x13, [x3, #(8 * 8)]
322 str x12, [x3, #(7 * 8)]
323 str x11, [x3, #(6 * 8)]
324 str x10, [x3, #(5 * 8)]
325 str x9, [x3, #(4 * 8)]
326 str x8, [x3, #(3 * 8)]
327 str x7, [x3, #(2 * 8)]
328 str x6, [x3, #(1 * 8)]
329 str x5, [x3, #(0 * 8)]
331 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
334 add x26, x26, x25, lsl #2
337 mrs x20, dbgwcr15_el1
338 mrs x19, dbgwcr14_el1
339 mrs x18, dbgwcr13_el1
340 mrs x17, dbgwcr12_el1
341 mrs x16, dbgwcr11_el1
342 mrs x15, dbgwcr10_el1
355 add x26, x26, x25, lsl #2
359 str x20, [x3, #(15 * 8)]
360 str x19, [x3, #(14 * 8)]
361 str x18, [x3, #(13 * 8)]
362 str x17, [x3, #(12 * 8)]
363 str x16, [x3, #(11 * 8)]
364 str x15, [x3, #(10 * 8)]
365 str x14, [x3, #(9 * 8)]
366 str x13, [x3, #(8 * 8)]
367 str x12, [x3, #(7 * 8)]
368 str x11, [x3, #(6 * 8)]
369 str x10, [x3, #(5 * 8)]
370 str x9, [x3, #(4 * 8)]
371 str x8, [x3, #(3 * 8)]
372 str x7, [x3, #(2 * 8)]
373 str x6, [x3, #(1 * 8)]
374 str x5, [x3, #(0 * 8)]
376 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
379 add x26, x26, x25, lsl #2
382 mrs x20, dbgwvr15_el1
383 mrs x19, dbgwvr14_el1
384 mrs x18, dbgwvr13_el1
385 mrs x17, dbgwvr12_el1
386 mrs x16, dbgwvr11_el1
387 mrs x15, dbgwvr10_el1
400 add x26, x26, x25, lsl #2
404 str x20, [x3, #(15 * 8)]
405 str x19, [x3, #(14 * 8)]
406 str x18, [x3, #(13 * 8)]
407 str x17, [x3, #(12 * 8)]
408 str x16, [x3, #(11 * 8)]
409 str x15, [x3, #(10 * 8)]
410 str x14, [x3, #(9 * 8)]
411 str x13, [x3, #(8 * 8)]
412 str x12, [x3, #(7 * 8)]
413 str x11, [x3, #(6 * 8)]
414 str x10, [x3, #(5 * 8)]
415 str x9, [x3, #(4 * 8)]
416 str x8, [x3, #(3 * 8)]
417 str x7, [x3, #(2 * 8)]
418 str x6, [x3, #(1 * 8)]
419 str x5, [x3, #(0 * 8)]
422 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
425 .macro restore_sysregs
426 // x2: base address for cpu context
429 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
432 ldp x6, x7, [x3, #16]
433 ldp x8, x9, [x3, #32]
434 ldp x10, x11, [x3, #48]
435 ldp x12, x13, [x3, #64]
436 ldp x14, x15, [x3, #80]
437 ldp x16, x17, [x3, #96]
438 ldp x18, x19, [x3, #112]
439 ldp x20, x21, [x3, #128]
440 ldp x22, x23, [x3, #144]
441 ldp x24, x25, [x3, #160]
457 msr contextidr_el1, x18
468 // x2: base address for cpu context
471 mrs x26, id_aa64dfr0_el1
472 ubfx x24, x26, #12, #4 // Extract BRPs
473 ubfx x25, x26, #20, #4 // Extract WRPs
475 sub w24, w26, w24 // How many BPs to skip
476 sub w25, w26, w25 // How many WPs to skip
478 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
481 add x26, x26, x24, lsl #2
484 ldr x20, [x3, #(15 * 8)]
485 ldr x19, [x3, #(14 * 8)]
486 ldr x18, [x3, #(13 * 8)]
487 ldr x17, [x3, #(12 * 8)]
488 ldr x16, [x3, #(11 * 8)]
489 ldr x15, [x3, #(10 * 8)]
490 ldr x14, [x3, #(9 * 8)]
491 ldr x13, [x3, #(8 * 8)]
492 ldr x12, [x3, #(7 * 8)]
493 ldr x11, [x3, #(6 * 8)]
494 ldr x10, [x3, #(5 * 8)]
495 ldr x9, [x3, #(4 * 8)]
496 ldr x8, [x3, #(3 * 8)]
497 ldr x7, [x3, #(2 * 8)]
498 ldr x6, [x3, #(1 * 8)]
499 ldr x5, [x3, #(0 * 8)]
502 add x26, x26, x24, lsl #2
505 msr dbgbcr15_el1, x20
506 msr dbgbcr14_el1, x19
507 msr dbgbcr13_el1, x18
508 msr dbgbcr12_el1, x17
509 msr dbgbcr11_el1, x16
510 msr dbgbcr10_el1, x15
522 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
525 add x26, x26, x24, lsl #2
528 ldr x20, [x3, #(15 * 8)]
529 ldr x19, [x3, #(14 * 8)]
530 ldr x18, [x3, #(13 * 8)]
531 ldr x17, [x3, #(12 * 8)]
532 ldr x16, [x3, #(11 * 8)]
533 ldr x15, [x3, #(10 * 8)]
534 ldr x14, [x3, #(9 * 8)]
535 ldr x13, [x3, #(8 * 8)]
536 ldr x12, [x3, #(7 * 8)]
537 ldr x11, [x3, #(6 * 8)]
538 ldr x10, [x3, #(5 * 8)]
539 ldr x9, [x3, #(4 * 8)]
540 ldr x8, [x3, #(3 * 8)]
541 ldr x7, [x3, #(2 * 8)]
542 ldr x6, [x3, #(1 * 8)]
543 ldr x5, [x3, #(0 * 8)]
546 add x26, x26, x24, lsl #2
549 msr dbgbvr15_el1, x20
550 msr dbgbvr14_el1, x19
551 msr dbgbvr13_el1, x18
552 msr dbgbvr12_el1, x17
553 msr dbgbvr11_el1, x16
554 msr dbgbvr10_el1, x15
566 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
569 add x26, x26, x25, lsl #2
572 ldr x20, [x3, #(15 * 8)]
573 ldr x19, [x3, #(14 * 8)]
574 ldr x18, [x3, #(13 * 8)]
575 ldr x17, [x3, #(12 * 8)]
576 ldr x16, [x3, #(11 * 8)]
577 ldr x15, [x3, #(10 * 8)]
578 ldr x14, [x3, #(9 * 8)]
579 ldr x13, [x3, #(8 * 8)]
580 ldr x12, [x3, #(7 * 8)]
581 ldr x11, [x3, #(6 * 8)]
582 ldr x10, [x3, #(5 * 8)]
583 ldr x9, [x3, #(4 * 8)]
584 ldr x8, [x3, #(3 * 8)]
585 ldr x7, [x3, #(2 * 8)]
586 ldr x6, [x3, #(1 * 8)]
587 ldr x5, [x3, #(0 * 8)]
590 add x26, x26, x25, lsl #2
593 msr dbgwcr15_el1, x20
594 msr dbgwcr14_el1, x19
595 msr dbgwcr13_el1, x18
596 msr dbgwcr12_el1, x17
597 msr dbgwcr11_el1, x16
598 msr dbgwcr10_el1, x15
610 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
613 add x26, x26, x25, lsl #2
616 ldr x20, [x3, #(15 * 8)]
617 ldr x19, [x3, #(14 * 8)]
618 ldr x18, [x3, #(13 * 8)]
619 ldr x17, [x3, #(12 * 8)]
620 ldr x16, [x3, #(11 * 8)]
621 ldr x15, [x3, #(10 * 8)]
622 ldr x14, [x3, #(9 * 8)]
623 ldr x13, [x3, #(8 * 8)]
624 ldr x12, [x3, #(7 * 8)]
625 ldr x11, [x3, #(6 * 8)]
626 ldr x10, [x3, #(5 * 8)]
627 ldr x9, [x3, #(4 * 8)]
628 ldr x8, [x3, #(3 * 8)]
629 ldr x7, [x3, #(2 * 8)]
630 ldr x6, [x3, #(1 * 8)]
631 ldr x5, [x3, #(0 * 8)]
634 add x26, x26, x25, lsl #2
637 msr dbgwvr15_el1, x20
638 msr dbgwvr14_el1, x19
639 msr dbgwvr13_el1, x18
640 msr dbgwvr12_el1, x17
641 msr dbgwvr11_el1, x16
642 msr dbgwvr10_el1, x15
654 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
658 .macro skip_32bit_state tmp, target
659 // Skip 32bit state if not needed
661 tbnz \tmp, #HCR_RW_SHIFT, \target
664 .macro skip_tee_state tmp, target
665 // Skip ThumbEE state if not needed
666 mrs \tmp, id_pfr0_el1
667 tbz \tmp, #12, \target
670 .macro skip_debug_state tmp, target
671 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
672 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
675 .macro compute_debug_state target
676 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
677 // is set, we do a full save/restore cycle and disable trapping.
678 add x25, x0, #VCPU_CONTEXT
680 // Check the state of MDSCR_EL1
681 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
682 and x26, x25, #DBG_MDSCR_KDE
683 and x25, x25, #DBG_MDSCR_MDE
685 b.eq 9998f // Nothing to see there
687 // If any interesting bits was set, we must set the flag
688 mov x26, #KVM_ARM64_DEBUG_DIRTY
689 str x26, [x0, #VCPU_DEBUG_FLAGS]
690 b 9999f // Don't skip restore
693 // Otherwise load the flags from memory in case we recently
695 skip_debug_state x25, \target
699 .macro save_guest_32bit_state
700 skip_32bit_state x3, 1f
702 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
708 stp x6, x7, [x3, #16]
710 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
717 skip_debug_state x8, 2f
721 skip_tee_state x8, 1f
723 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
730 .macro restore_guest_32bit_state
731 skip_32bit_state x3, 1f
733 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
735 ldp x6, x7, [x3, #16]
741 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
748 skip_debug_state x8, 2f
752 skip_tee_state x8, 1f
754 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
761 .macro activate_traps
762 ldr x2, [x0, #VCPU_HCR_EL2]
764 ldr x2, =(CPTR_EL2_TTA)
767 ldr x2, =(1 << 15) // Trap CP15 Cr=15
771 and x2, x2, #MDCR_EL2_HPMN_MASK
772 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
773 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
775 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
777 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
778 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
779 orr x2, x2, #MDCR_EL2_TDA
784 .macro deactivate_traps
791 and x2, x2, #MDCR_EL2_HPMN_MASK
796 ldr x1, [x0, #VCPU_KVM]
798 ldr x2, [x1, #KVM_VTTBR]
807 * Call into the vgic backend for state saving
809 .macro save_vgic_state
810 adr x24, __vgic_sr_vectors
811 ldr x24, [x24, VGIC_SAVE_FN]
815 mov x25, #HCR_INT_OVERRIDE
822 * Call into the vgic backend for state restoring
824 .macro restore_vgic_state
826 ldr x25, [x0, #VCPU_IRQ_LINES]
827 orr x24, x24, #HCR_INT_OVERRIDE
830 adr x24, __vgic_sr_vectors
831 ldr x24, [x24, #VGIC_RESTORE_FN]
836 .macro save_timer_state
838 ldr x2, [x0, #VCPU_KVM]
840 ldr w3, [x2, #KVM_TIMER_ENABLED]
845 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
846 bic x3, x3, #1 // Clear Enable
851 mrs x3, cntv_cval_el0
852 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
855 // Allow physical timer/counter access for the host
860 // Clear cntvoff for the host
864 .macro restore_timer_state
866 // Disallow physical timer access for the guest
867 // Physical counter access is allowed
873 ldr x2, [x0, #VCPU_KVM]
875 ldr w3, [x2, #KVM_TIMER_ENABLED]
878 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
880 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
881 msr cntv_cval_el0, x2
884 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
915 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
917 * This is the world switch. The first half of the function
918 * deals with entering the guest, and anything from __kvm_vcpu_return
919 * to the end of the function deals with reentering the host.
920 * On the enter path, only x0 (vcpu pointer) must be preserved until
921 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
922 * code) must both be preserved until the epilogue.
923 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
925 ENTRY(__kvm_vcpu_run)
927 msr tpidr_el2, x0 // Save the vcpu register
930 ldr x2, [x0, #VCPU_HOST_CONTEXT]
937 compute_debug_state 1f
947 add x2, x0, #VCPU_CONTEXT
952 skip_debug_state x3, 1f
955 restore_guest_32bit_state
958 // That's it, no more messing around.
962 // Assume x0 is the vcpu pointer, x1 the return code
963 // Guest's x0-x3 are on the stack
966 add x2, x0, #VCPU_CONTEXT
972 skip_debug_state x3, 1f
975 save_guest_32bit_state
984 ldr x2, [x0, #VCPU_HOST_CONTEXT]
990 skip_debug_state x3, 1f
991 // Clear the dirty flag for the next run, as all the state has
992 // already been saved. Note that we nuke the whole 64bit word.
993 // If we ever add more flags, we'll have to be more careful...
994 str xzr, [x0, #VCPU_DEBUG_FLAGS]
1003 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1004 ENTRY(__kvm_tlb_flush_vmid_ipa)
1008 ldr x2, [x0, #KVM_VTTBR]
1013 * We could do so much better if we had the VA as well.
1014 * Instead, we invalidate Stage-2 for this IPA, and the
1015 * whole of Stage-1. Weep...
1019 * We have to ensure completion of the invalidation at Stage-2,
1020 * since a table walk on another CPU could refill a TLB with a
1021 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1022 * the Stage-1 invalidation happened first.
1031 ENDPROC(__kvm_tlb_flush_vmid_ipa)
1033 ENTRY(__kvm_flush_vm_context)
1039 ENDPROC(__kvm_flush_vm_context)
1041 // struct vgic_sr_vectors __vgi_sr_vectors;
1043 ENTRY(__vgic_sr_vectors)
1044 .skip VGIC_SR_VECTOR_SZ
1045 ENDPROC(__vgic_sr_vectors)
1048 // Guess the context by looking at VTTBR:
1049 // If zero, then we're already a host.
1050 // Otherwise restore a minimal host context before panicing.
1059 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1062 bl __restore_sysregs
1064 1: adr x0, __hyp_panic_str
1077 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1085 2: .quad HYP_PAGE_OFFSET
1087 ENDPROC(__kvm_hyp_panic)
1090 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1095 * u64 kvm_call_hyp(void *hypfn, ...);
1097 * This is not really a variadic function in the classic C-way and care must
1098 * be taken when calling this to ensure parameters are passed in registers
1099 * only, since the stack will change between the caller and the callee.
1101 * Call the function with the first argument containing a pointer to the
1102 * function you wish to call in Hyp mode, and subsequent arguments will be
1103 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1104 * function pointer can be passed). The function being called must be mapped
1105 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1106 * passed in r0 and r1.
1108 * A function pointer with a value of 0 has a special meaning, and is
1109 * used to implement __hyp_get_vectors in the same way as in
1110 * arch/arm64/kernel/hyp_stub.S.
1115 ENDPROC(kvm_call_hyp)
1117 .macro invalid_vector label, target
1124 /* None of these should ever happen */
1125 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1126 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1127 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1128 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1129 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1130 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1131 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1132 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1133 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1134 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1135 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1136 invalid_vector el1_error_invalid, __kvm_hyp_panic
1138 el1_sync: // Guest trapped into EL2
1143 lsr x2, x1, #ESR_EL2_EC_SHIFT
1145 cmp x2, #ESR_EL2_EC_HVC64
1148 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1149 cbnz x3, el1_trap // called HVC
1151 /* Here, we're pretty sure the host called HVC. */
1155 /* Check for __hyp_get_vectors */
1163 * Compute the function address in EL2, and shuffle the parameters.
1180 cmp x2, #ESR_EL2_EC_DABT
1181 mov x0, #ESR_EL2_EC_IABT
1183 b.ne 1f // Not an abort we care about
1185 /* This is an abort. Check for permission fault */
1186 and x2, x1, #ESR_EL2_FSC_TYPE
1188 b.ne 1f // Not a permission fault
1191 * Check for Stage-1 page table walk, which is guaranteed
1192 * to give a valid HPFAR_EL2.
1194 tbnz x1, #7, 1f // S1PTW is set
1196 /* Preserve PAR_EL1 */
1201 * Permission fault, HPFAR_EL2 is invalid.
1202 * Resolve the IPA the hard way using the guest VA.
1203 * Stage-1 translation already validated the memory access rights.
1204 * As such, we can use the EL1 translation regime, and don't have
1205 * to distinguish between EL0 and EL1 access.
1213 pop x0, xzr // Restore PAR_EL1 from the stack
1215 tbnz x3, #0, 3f // Bail out if we failed the translation
1216 ubfx x3, x3, #12, #36 // Extract IPA
1217 lsl x3, x3, #4 // and present it like HPFAR
1220 1: mrs x3, hpfar_el2
1223 2: mrs x0, tpidr_el2
1224 str w1, [x0, #VCPU_ESR_EL2]
1225 str x2, [x0, #VCPU_FAR_EL2]
1226 str x3, [x0, #VCPU_HPFAR_EL2]
1228 mov x1, #ARM_EXCEPTION_TRAP
1232 * Translation failed. Just return to the guest and
1233 * let it fault again. Another CPU is probably playing
1245 mov x1, #ARM_EXCEPTION_IRQ
1252 ENTRY(__kvm_hyp_vector)
1253 ventry el2t_sync_invalid // Synchronous EL2t
1254 ventry el2t_irq_invalid // IRQ EL2t
1255 ventry el2t_fiq_invalid // FIQ EL2t
1256 ventry el2t_error_invalid // Error EL2t
1258 ventry el2h_sync_invalid // Synchronous EL2h
1259 ventry el2h_irq_invalid // IRQ EL2h
1260 ventry el2h_fiq_invalid // FIQ EL2h
1261 ventry el2h_error_invalid // Error EL2h
1263 ventry el1_sync // Synchronous 64-bit EL1
1264 ventry el1_irq // IRQ 64-bit EL1
1265 ventry el1_fiq_invalid // FIQ 64-bit EL1
1266 ventry el1_error_invalid // Error 64-bit EL1
1268 ventry el1_sync // Synchronous 32-bit EL1
1269 ventry el1_irq // IRQ 32-bit EL1
1270 ventry el1_fiq_invalid // FIQ 32-bit EL1
1271 ventry el1_error_invalid // Error 32-bit EL1
1272 ENDPROC(__kvm_hyp_vector)