2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/assembler.h>
22 #include <asm/debug-monitors.h>
24 #include <asm/fpsimdmacros.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/memory.h>
31 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
32 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
33 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
34 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
37 .pushsection .hyp.text, "ax"
40 .macro save_common_regs
41 // x2: base address for cpu context
44 add x3, x2, #CPU_XREG_OFFSET(19)
46 stp x21, x22, [x3, #16]
47 stp x23, x24, [x3, #32]
48 stp x25, x26, [x3, #48]
49 stp x27, x28, [x3, #64]
50 stp x29, lr, [x3, #80]
53 mrs x20, elr_el2 // EL1 PC
54 mrs x21, spsr_el2 // EL1 pstate
56 stp x19, x20, [x3, #96]
63 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
64 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
65 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68 .macro restore_common_regs
69 // x2: base address for cpu context
72 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
73 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
74 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
80 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
85 msr elr_el2, x20 // EL1 PC
86 msr spsr_el2, x21 // EL1 pstate
88 add x3, x2, #CPU_XREG_OFFSET(19)
90 ldp x21, x22, [x3, #16]
91 ldp x23, x24, [x3, #32]
92 ldp x25, x26, [x3, #48]
93 ldp x27, x28, [x3, #64]
94 ldp x29, lr, [x3, #80]
101 .macro restore_host_regs
106 // x2: cpu context address
108 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
112 .macro restore_fpsimd
113 // x2: cpu context address
115 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
119 .macro save_guest_regs
120 // x0 is the vcpu address
121 // x1 is the return code, do not corrupt!
122 // x2 is the cpu context
123 // x3 is a tmp register
124 // Guest's x0-x3 are on the stack
126 // Compute base to save registers
127 add x3, x2, #CPU_XREG_OFFSET(4)
129 stp x6, x7, [x3, #16]
130 stp x8, x9, [x3, #32]
131 stp x10, x11, [x3, #48]
132 stp x12, x13, [x3, #64]
133 stp x14, x15, [x3, #80]
134 stp x16, x17, [x3, #96]
140 add x3, x2, #CPU_XREG_OFFSET(0)
142 stp x6, x7, [x3, #16]
147 .macro restore_guest_regs
148 // x0 is the vcpu address.
149 // x2 is the cpu context
150 // x3 is a tmp register
152 // Prepare x0-x3 for later restore
153 add x3, x2, #CPU_XREG_OFFSET(0)
155 ldp x6, x7, [x3, #16]
156 push x4, x5 // Push x0-x3 on the stack
160 ldp x4, x5, [x3, #32]
161 ldp x6, x7, [x3, #48]
162 ldp x8, x9, [x3, #64]
163 ldp x10, x11, [x3, #80]
164 ldp x12, x13, [x3, #96]
165 ldp x14, x15, [x3, #112]
166 ldp x16, x17, [x3, #128]
169 // x19-x29, lr, sp*, elr*, spsr*
172 // Last bits of the 64bit state
176 // Do not touch any register after this!
180 * Macros to perform system register save/restore.
182 * Ordering here is absolutely critical, and must be kept consistent
183 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * In other words, don't touch any of these unless you know what
190 // x2: base address for cpu context
193 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
209 mrs x18, contextidr_el1
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
228 stp x24, x25, [x3, #160]
232 // x2: base address for cpu context
235 mrs x26, id_aa64dfr0_el1
236 ubfx x24, x26, #12, #4 // Extract BRPs
237 ubfx x25, x26, #20, #4 // Extract WRPs
239 sub w24, w26, w24 // How many BPs to skip
240 sub w25, w26, w25 // How many WPs to skip
242 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
245 add x26, x26, x24, lsl #2
248 mrs x20, dbgbcr15_el1
249 mrs x19, dbgbcr14_el1
250 mrs x18, dbgbcr13_el1
251 mrs x17, dbgbcr12_el1
252 mrs x16, dbgbcr11_el1
253 mrs x15, dbgbcr10_el1
266 add x26, x26, x24, lsl #2
270 str x20, [x3, #(15 * 8)]
271 str x19, [x3, #(14 * 8)]
272 str x18, [x3, #(13 * 8)]
273 str x17, [x3, #(12 * 8)]
274 str x16, [x3, #(11 * 8)]
275 str x15, [x3, #(10 * 8)]
276 str x14, [x3, #(9 * 8)]
277 str x13, [x3, #(8 * 8)]
278 str x12, [x3, #(7 * 8)]
279 str x11, [x3, #(6 * 8)]
280 str x10, [x3, #(5 * 8)]
281 str x9, [x3, #(4 * 8)]
282 str x8, [x3, #(3 * 8)]
283 str x7, [x3, #(2 * 8)]
284 str x6, [x3, #(1 * 8)]
285 str x5, [x3, #(0 * 8)]
287 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
290 add x26, x26, x24, lsl #2
293 mrs x20, dbgbvr15_el1
294 mrs x19, dbgbvr14_el1
295 mrs x18, dbgbvr13_el1
296 mrs x17, dbgbvr12_el1
297 mrs x16, dbgbvr11_el1
298 mrs x15, dbgbvr10_el1
311 add x26, x26, x24, lsl #2
315 str x20, [x3, #(15 * 8)]
316 str x19, [x3, #(14 * 8)]
317 str x18, [x3, #(13 * 8)]
318 str x17, [x3, #(12 * 8)]
319 str x16, [x3, #(11 * 8)]
320 str x15, [x3, #(10 * 8)]
321 str x14, [x3, #(9 * 8)]
322 str x13, [x3, #(8 * 8)]
323 str x12, [x3, #(7 * 8)]
324 str x11, [x3, #(6 * 8)]
325 str x10, [x3, #(5 * 8)]
326 str x9, [x3, #(4 * 8)]
327 str x8, [x3, #(3 * 8)]
328 str x7, [x3, #(2 * 8)]
329 str x6, [x3, #(1 * 8)]
330 str x5, [x3, #(0 * 8)]
332 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
335 add x26, x26, x25, lsl #2
338 mrs x20, dbgwcr15_el1
339 mrs x19, dbgwcr14_el1
340 mrs x18, dbgwcr13_el1
341 mrs x17, dbgwcr12_el1
342 mrs x16, dbgwcr11_el1
343 mrs x15, dbgwcr10_el1
356 add x26, x26, x25, lsl #2
360 str x20, [x3, #(15 * 8)]
361 str x19, [x3, #(14 * 8)]
362 str x18, [x3, #(13 * 8)]
363 str x17, [x3, #(12 * 8)]
364 str x16, [x3, #(11 * 8)]
365 str x15, [x3, #(10 * 8)]
366 str x14, [x3, #(9 * 8)]
367 str x13, [x3, #(8 * 8)]
368 str x12, [x3, #(7 * 8)]
369 str x11, [x3, #(6 * 8)]
370 str x10, [x3, #(5 * 8)]
371 str x9, [x3, #(4 * 8)]
372 str x8, [x3, #(3 * 8)]
373 str x7, [x3, #(2 * 8)]
374 str x6, [x3, #(1 * 8)]
375 str x5, [x3, #(0 * 8)]
377 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
380 add x26, x26, x25, lsl #2
383 mrs x20, dbgwvr15_el1
384 mrs x19, dbgwvr14_el1
385 mrs x18, dbgwvr13_el1
386 mrs x17, dbgwvr12_el1
387 mrs x16, dbgwvr11_el1
388 mrs x15, dbgwvr10_el1
401 add x26, x26, x25, lsl #2
405 str x20, [x3, #(15 * 8)]
406 str x19, [x3, #(14 * 8)]
407 str x18, [x3, #(13 * 8)]
408 str x17, [x3, #(12 * 8)]
409 str x16, [x3, #(11 * 8)]
410 str x15, [x3, #(10 * 8)]
411 str x14, [x3, #(9 * 8)]
412 str x13, [x3, #(8 * 8)]
413 str x12, [x3, #(7 * 8)]
414 str x11, [x3, #(6 * 8)]
415 str x10, [x3, #(5 * 8)]
416 str x9, [x3, #(4 * 8)]
417 str x8, [x3, #(3 * 8)]
418 str x7, [x3, #(2 * 8)]
419 str x6, [x3, #(1 * 8)]
420 str x5, [x3, #(0 * 8)]
423 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
426 .macro restore_sysregs
427 // x2: base address for cpu context
430 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
433 ldp x6, x7, [x3, #16]
434 ldp x8, x9, [x3, #32]
435 ldp x10, x11, [x3, #48]
436 ldp x12, x13, [x3, #64]
437 ldp x14, x15, [x3, #80]
438 ldp x16, x17, [x3, #96]
439 ldp x18, x19, [x3, #112]
440 ldp x20, x21, [x3, #128]
441 ldp x22, x23, [x3, #144]
442 ldp x24, x25, [x3, #160]
458 msr contextidr_el1, x18
469 // x2: base address for cpu context
472 mrs x26, id_aa64dfr0_el1
473 ubfx x24, x26, #12, #4 // Extract BRPs
474 ubfx x25, x26, #20, #4 // Extract WRPs
476 sub w24, w26, w24 // How many BPs to skip
477 sub w25, w26, w25 // How many WPs to skip
479 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
482 add x26, x26, x24, lsl #2
485 ldr x20, [x3, #(15 * 8)]
486 ldr x19, [x3, #(14 * 8)]
487 ldr x18, [x3, #(13 * 8)]
488 ldr x17, [x3, #(12 * 8)]
489 ldr x16, [x3, #(11 * 8)]
490 ldr x15, [x3, #(10 * 8)]
491 ldr x14, [x3, #(9 * 8)]
492 ldr x13, [x3, #(8 * 8)]
493 ldr x12, [x3, #(7 * 8)]
494 ldr x11, [x3, #(6 * 8)]
495 ldr x10, [x3, #(5 * 8)]
496 ldr x9, [x3, #(4 * 8)]
497 ldr x8, [x3, #(3 * 8)]
498 ldr x7, [x3, #(2 * 8)]
499 ldr x6, [x3, #(1 * 8)]
500 ldr x5, [x3, #(0 * 8)]
503 add x26, x26, x24, lsl #2
506 msr dbgbcr15_el1, x20
507 msr dbgbcr14_el1, x19
508 msr dbgbcr13_el1, x18
509 msr dbgbcr12_el1, x17
510 msr dbgbcr11_el1, x16
511 msr dbgbcr10_el1, x15
523 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
526 add x26, x26, x24, lsl #2
529 ldr x20, [x3, #(15 * 8)]
530 ldr x19, [x3, #(14 * 8)]
531 ldr x18, [x3, #(13 * 8)]
532 ldr x17, [x3, #(12 * 8)]
533 ldr x16, [x3, #(11 * 8)]
534 ldr x15, [x3, #(10 * 8)]
535 ldr x14, [x3, #(9 * 8)]
536 ldr x13, [x3, #(8 * 8)]
537 ldr x12, [x3, #(7 * 8)]
538 ldr x11, [x3, #(6 * 8)]
539 ldr x10, [x3, #(5 * 8)]
540 ldr x9, [x3, #(4 * 8)]
541 ldr x8, [x3, #(3 * 8)]
542 ldr x7, [x3, #(2 * 8)]
543 ldr x6, [x3, #(1 * 8)]
544 ldr x5, [x3, #(0 * 8)]
547 add x26, x26, x24, lsl #2
550 msr dbgbvr15_el1, x20
551 msr dbgbvr14_el1, x19
552 msr dbgbvr13_el1, x18
553 msr dbgbvr12_el1, x17
554 msr dbgbvr11_el1, x16
555 msr dbgbvr10_el1, x15
567 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
570 add x26, x26, x25, lsl #2
573 ldr x20, [x3, #(15 * 8)]
574 ldr x19, [x3, #(14 * 8)]
575 ldr x18, [x3, #(13 * 8)]
576 ldr x17, [x3, #(12 * 8)]
577 ldr x16, [x3, #(11 * 8)]
578 ldr x15, [x3, #(10 * 8)]
579 ldr x14, [x3, #(9 * 8)]
580 ldr x13, [x3, #(8 * 8)]
581 ldr x12, [x3, #(7 * 8)]
582 ldr x11, [x3, #(6 * 8)]
583 ldr x10, [x3, #(5 * 8)]
584 ldr x9, [x3, #(4 * 8)]
585 ldr x8, [x3, #(3 * 8)]
586 ldr x7, [x3, #(2 * 8)]
587 ldr x6, [x3, #(1 * 8)]
588 ldr x5, [x3, #(0 * 8)]
591 add x26, x26, x25, lsl #2
594 msr dbgwcr15_el1, x20
595 msr dbgwcr14_el1, x19
596 msr dbgwcr13_el1, x18
597 msr dbgwcr12_el1, x17
598 msr dbgwcr11_el1, x16
599 msr dbgwcr10_el1, x15
611 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
614 add x26, x26, x25, lsl #2
617 ldr x20, [x3, #(15 * 8)]
618 ldr x19, [x3, #(14 * 8)]
619 ldr x18, [x3, #(13 * 8)]
620 ldr x17, [x3, #(12 * 8)]
621 ldr x16, [x3, #(11 * 8)]
622 ldr x15, [x3, #(10 * 8)]
623 ldr x14, [x3, #(9 * 8)]
624 ldr x13, [x3, #(8 * 8)]
625 ldr x12, [x3, #(7 * 8)]
626 ldr x11, [x3, #(6 * 8)]
627 ldr x10, [x3, #(5 * 8)]
628 ldr x9, [x3, #(4 * 8)]
629 ldr x8, [x3, #(3 * 8)]
630 ldr x7, [x3, #(2 * 8)]
631 ldr x6, [x3, #(1 * 8)]
632 ldr x5, [x3, #(0 * 8)]
635 add x26, x26, x25, lsl #2
638 msr dbgwvr15_el1, x20
639 msr dbgwvr14_el1, x19
640 msr dbgwvr13_el1, x18
641 msr dbgwvr12_el1, x17
642 msr dbgwvr11_el1, x16
643 msr dbgwvr10_el1, x15
655 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
659 .macro skip_32bit_state tmp, target
660 // Skip 32bit state if not needed
662 tbnz \tmp, #HCR_RW_SHIFT, \target
665 .macro skip_tee_state tmp, target
666 // Skip ThumbEE state if not needed
667 mrs \tmp, id_pfr0_el1
668 tbz \tmp, #12, \target
671 .macro skip_debug_state tmp, target
672 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
673 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
676 .macro compute_debug_state target
677 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
678 // is set, we do a full save/restore cycle and disable trapping.
679 add x25, x0, #VCPU_CONTEXT
681 // Check the state of MDSCR_EL1
682 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
683 and x26, x25, #DBG_MDSCR_KDE
684 and x25, x25, #DBG_MDSCR_MDE
686 b.eq 9998f // Nothing to see there
688 // If any interesting bits was set, we must set the flag
689 mov x26, #KVM_ARM64_DEBUG_DIRTY
690 str x26, [x0, #VCPU_DEBUG_FLAGS]
691 b 9999f // Don't skip restore
694 // Otherwise load the flags from memory in case we recently
696 skip_debug_state x25, \target
700 .macro save_guest_32bit_state
701 skip_32bit_state x3, 1f
703 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
709 stp x6, x7, [x3, #16]
711 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
718 skip_debug_state x8, 2f
722 skip_tee_state x8, 1f
724 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
731 .macro restore_guest_32bit_state
732 skip_32bit_state x3, 1f
734 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
736 ldp x6, x7, [x3, #16]
742 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
749 skip_debug_state x8, 2f
753 skip_tee_state x8, 1f
755 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
762 .macro activate_traps
763 ldr x2, [x0, #VCPU_HCR_EL2]
765 mov x2, #CPTR_EL2_TTA
768 mov x2, #(1 << 15) // Trap CP15 Cr=15
772 and x2, x2, #MDCR_EL2_HPMN_MASK
773 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
774 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
776 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
778 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
779 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
780 orr x2, x2, #MDCR_EL2_TDA
785 .macro deactivate_traps
792 and x2, x2, #MDCR_EL2_HPMN_MASK
797 ldr x1, [x0, #VCPU_KVM]
799 ldr x2, [x1, #KVM_VTTBR]
808 * Call into the vgic backend for state saving
810 .macro save_vgic_state
811 adr x24, __vgic_sr_vectors
812 ldr x24, [x24, VGIC_SAVE_FN]
816 mov x25, #HCR_INT_OVERRIDE
823 * Call into the vgic backend for state restoring
825 .macro restore_vgic_state
827 ldr x25, [x0, #VCPU_IRQ_LINES]
828 orr x24, x24, #HCR_INT_OVERRIDE
831 adr x24, __vgic_sr_vectors
832 ldr x24, [x24, #VGIC_RESTORE_FN]
837 .macro save_timer_state
839 ldr x2, [x0, #VCPU_KVM]
841 ldr w3, [x2, #KVM_TIMER_ENABLED]
846 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
850 mrs x3, cntv_cval_el0
851 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
854 // Disable the virtual timer
855 msr cntv_ctl_el0, xzr
857 // Allow physical timer/counter access for the host
862 // Clear cntvoff for the host
866 .macro restore_timer_state
868 // Disallow physical timer access for the guest
869 // Physical counter access is allowed
875 ldr x2, [x0, #VCPU_KVM]
877 ldr w3, [x2, #KVM_TIMER_ENABLED]
880 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
882 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
883 msr cntv_cval_el0, x2
886 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
917 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
919 * This is the world switch. The first half of the function
920 * deals with entering the guest, and anything from __kvm_vcpu_return
921 * to the end of the function deals with reentering the host.
922 * On the enter path, only x0 (vcpu pointer) must be preserved until
923 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
924 * code) must both be preserved until the epilogue.
925 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
927 ENTRY(__kvm_vcpu_run)
929 msr tpidr_el2, x0 // Save the vcpu register
932 ldr x2, [x0, #VCPU_HOST_CONTEXT]
939 compute_debug_state 1f
949 add x2, x0, #VCPU_CONTEXT
951 // We must restore the 32-bit state before the sysregs, thanks
952 // to Cortex-A57 erratum #852523.
953 restore_guest_32bit_state
957 skip_debug_state x3, 1f
962 // That's it, no more messing around.
966 // Assume x0 is the vcpu pointer, x1 the return code
967 // Guest's x0-x3 are on the stack
970 add x2, x0, #VCPU_CONTEXT
976 skip_debug_state x3, 1f
979 save_guest_32bit_state
988 ldr x2, [x0, #VCPU_HOST_CONTEXT]
994 skip_debug_state x3, 1f
995 // Clear the dirty flag for the next run, as all the state has
996 // already been saved. Note that we nuke the whole 64bit word.
997 // If we ever add more flags, we'll have to be more careful...
998 str xzr, [x0, #VCPU_DEBUG_FLAGS]
1007 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1008 ENTRY(__kvm_tlb_flush_vmid_ipa)
1012 ldr x2, [x0, #KVM_VTTBR]
1017 * We could do so much better if we had the VA as well.
1018 * Instead, we invalidate Stage-2 for this IPA, and the
1019 * whole of Stage-1. Weep...
1024 * We have to ensure completion of the invalidation at Stage-2,
1025 * since a table walk on another CPU could refill a TLB with a
1026 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1027 * the Stage-1 invalidation happened first.
1036 ENDPROC(__kvm_tlb_flush_vmid_ipa)
1039 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
1040 * @struct kvm *kvm - pointer to kvm structure
1042 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
1044 ENTRY(__kvm_tlb_flush_vmid)
1048 ldr x2, [x0, #KVM_VTTBR]
1058 ENDPROC(__kvm_tlb_flush_vmid)
1060 ENTRY(__kvm_flush_vm_context)
1066 ENDPROC(__kvm_flush_vm_context)
1068 // struct vgic_sr_vectors __vgi_sr_vectors;
1070 ENTRY(__vgic_sr_vectors)
1071 .skip VGIC_SR_VECTOR_SZ
1072 ENDPROC(__vgic_sr_vectors)
1075 // Guess the context by looking at VTTBR:
1076 // If zero, then we're already a host.
1077 // Otherwise restore a minimal host context before panicing.
1086 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1089 bl __restore_sysregs
1091 1: adr x0, __hyp_panic_str
1104 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1112 2: .quad HYP_PAGE_OFFSET
1114 ENDPROC(__kvm_hyp_panic)
1117 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1122 * u64 kvm_call_hyp(void *hypfn, ...);
1124 * This is not really a variadic function in the classic C-way and care must
1125 * be taken when calling this to ensure parameters are passed in registers
1126 * only, since the stack will change between the caller and the callee.
1128 * Call the function with the first argument containing a pointer to the
1129 * function you wish to call in Hyp mode, and subsequent arguments will be
1130 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1131 * function pointer can be passed). The function being called must be mapped
1132 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1133 * passed in r0 and r1.
1135 * A function pointer with a value of 0 has a special meaning, and is
1136 * used to implement __hyp_get_vectors in the same way as in
1137 * arch/arm64/kernel/hyp_stub.S.
1142 ENDPROC(kvm_call_hyp)
1144 .macro invalid_vector label, target
1151 /* None of these should ever happen */
1152 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1153 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1154 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1155 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1156 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1157 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1158 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1159 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1160 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1161 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1162 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1163 invalid_vector el1_error_invalid, __kvm_hyp_panic
1165 el1_sync: // Guest trapped into EL2
1170 lsr x2, x1, #ESR_ELx_EC_SHIFT
1172 cmp x2, #ESR_ELx_EC_HVC64
1175 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1176 cbnz x3, el1_trap // called HVC
1178 /* Here, we're pretty sure the host called HVC. */
1182 /* Check for __hyp_get_vectors */
1190 * Compute the function address in EL2, and shuffle the parameters.
1207 cmp x2, #ESR_ELx_EC_DABT_LOW
1208 mov x0, #ESR_ELx_EC_IABT_LOW
1210 b.ne 1f // Not an abort we care about
1212 /* This is an abort. Check for permission fault */
1213 and x2, x1, #ESR_ELx_FSC_TYPE
1215 b.ne 1f // Not a permission fault
1218 * Check for Stage-1 page table walk, which is guaranteed
1219 * to give a valid HPFAR_EL2.
1221 tbnz x1, #7, 1f // S1PTW is set
1223 /* Preserve PAR_EL1 */
1228 * Permission fault, HPFAR_EL2 is invalid.
1229 * Resolve the IPA the hard way using the guest VA.
1230 * Stage-1 translation already validated the memory access rights.
1231 * As such, we can use the EL1 translation regime, and don't have
1232 * to distinguish between EL0 and EL1 access.
1240 pop x0, xzr // Restore PAR_EL1 from the stack
1242 tbnz x3, #0, 3f // Bail out if we failed the translation
1243 ubfx x3, x3, #12, #36 // Extract IPA
1244 lsl x3, x3, #4 // and present it like HPFAR
1247 1: mrs x3, hpfar_el2
1250 2: mrs x0, tpidr_el2
1251 str w1, [x0, #VCPU_ESR_EL2]
1252 str x2, [x0, #VCPU_FAR_EL2]
1253 str x3, [x0, #VCPU_HPFAR_EL2]
1255 mov x1, #ARM_EXCEPTION_TRAP
1259 * Translation failed. Just return to the guest and
1260 * let it fault again. Another CPU is probably playing
1272 mov x1, #ARM_EXCEPTION_IRQ
1279 ENTRY(__kvm_hyp_vector)
1280 ventry el2t_sync_invalid // Synchronous EL2t
1281 ventry el2t_irq_invalid // IRQ EL2t
1282 ventry el2t_fiq_invalid // FIQ EL2t
1283 ventry el2t_error_invalid // Error EL2t
1285 ventry el2h_sync_invalid // Synchronous EL2h
1286 ventry el2h_irq_invalid // IRQ EL2h
1287 ventry el2h_fiq_invalid // FIQ EL2h
1288 ventry el2h_error_invalid // Error EL2h
1290 ventry el1_sync // Synchronous 64-bit EL1
1291 ventry el1_irq // IRQ 64-bit EL1
1292 ventry el1_fiq_invalid // FIQ 64-bit EL1
1293 ventry el1_error_invalid // Error 64-bit EL1
1295 ventry el1_sync // Synchronous 32-bit EL1
1296 ventry el1_irq // IRQ 32-bit EL1
1297 ventry el1_fiq_invalid // FIQ 32-bit EL1
1298 ventry el1_error_invalid // Error 32-bit EL1
1299 ENDPROC(__kvm_hyp_vector)