2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/alternative.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/debug-monitors.h>
26 #include <asm/fpsimdmacros.h>
28 #include <asm/kvm_arm.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/memory.h>
33 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
34 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
35 #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
36 #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
39 .pushsection .hyp.text, "ax"
42 .macro save_common_regs
43 // x2: base address for cpu context
46 add x3, x2, #CPU_XREG_OFFSET(19)
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
55 mrs x20, elr_el2 // pc before entering el2
56 mrs x21, spsr_el2 // pstate before entering el2
58 stp x19, x20, [x3, #96]
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
70 .macro restore_common_regs
71 // x2: base address for cpu context
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
87 msr elr_el2, x20 // pc on return from el2
88 msr spsr_el2, x21 // pstate on return from el2
90 add x3, x2, #CPU_XREG_OFFSET(19)
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
103 .macro restore_host_regs
108 // x2: cpu context address
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
114 .macro restore_fpsimd
115 // x2: cpu context address
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
121 .macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
142 add x3, x2, #CPU_XREG_OFFSET(0)
144 stp x6, x7, [x3, #16]
149 .macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
171 // x19-x29, lr, sp*, elr*, spsr*
174 // Last bits of the 64bit state
178 // Do not touch any register after this!
182 * Macros to perform system register save/restore.
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
188 * In other words, don't touch any of these unless you know what
192 // x2: base address for cpu context
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
211 mrs x18, contextidr_el1
221 stp x6, x7, [x3, #16]
222 stp x8, x9, [x3, #32]
223 stp x10, x11, [x3, #48]
224 stp x12, x13, [x3, #64]
225 stp x14, x15, [x3, #80]
226 stp x16, x17, [x3, #96]
227 stp x18, x19, [x3, #112]
228 stp x20, x21, [x3, #128]
229 stp x22, x23, [x3, #144]
230 stp x24, x25, [x3, #160]
234 // x2: base address for cpu context
237 mrs x26, id_aa64dfr0_el1
238 ubfx x24, x26, #12, #4 // Extract BRPs
239 ubfx x25, x26, #20, #4 // Extract WRPs
241 sub w24, w26, w24 // How many BPs to skip
242 sub w25, w26, w25 // How many WPs to skip
244 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
247 add x26, x26, x24, lsl #2
250 mrs x20, dbgbcr15_el1
251 mrs x19, dbgbcr14_el1
252 mrs x18, dbgbcr13_el1
253 mrs x17, dbgbcr12_el1
254 mrs x16, dbgbcr11_el1
255 mrs x15, dbgbcr10_el1
268 add x26, x26, x24, lsl #2
272 str x20, [x3, #(15 * 8)]
273 str x19, [x3, #(14 * 8)]
274 str x18, [x3, #(13 * 8)]
275 str x17, [x3, #(12 * 8)]
276 str x16, [x3, #(11 * 8)]
277 str x15, [x3, #(10 * 8)]
278 str x14, [x3, #(9 * 8)]
279 str x13, [x3, #(8 * 8)]
280 str x12, [x3, #(7 * 8)]
281 str x11, [x3, #(6 * 8)]
282 str x10, [x3, #(5 * 8)]
283 str x9, [x3, #(4 * 8)]
284 str x8, [x3, #(3 * 8)]
285 str x7, [x3, #(2 * 8)]
286 str x6, [x3, #(1 * 8)]
287 str x5, [x3, #(0 * 8)]
289 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
292 add x26, x26, x24, lsl #2
295 mrs x20, dbgbvr15_el1
296 mrs x19, dbgbvr14_el1
297 mrs x18, dbgbvr13_el1
298 mrs x17, dbgbvr12_el1
299 mrs x16, dbgbvr11_el1
300 mrs x15, dbgbvr10_el1
313 add x26, x26, x24, lsl #2
317 str x20, [x3, #(15 * 8)]
318 str x19, [x3, #(14 * 8)]
319 str x18, [x3, #(13 * 8)]
320 str x17, [x3, #(12 * 8)]
321 str x16, [x3, #(11 * 8)]
322 str x15, [x3, #(10 * 8)]
323 str x14, [x3, #(9 * 8)]
324 str x13, [x3, #(8 * 8)]
325 str x12, [x3, #(7 * 8)]
326 str x11, [x3, #(6 * 8)]
327 str x10, [x3, #(5 * 8)]
328 str x9, [x3, #(4 * 8)]
329 str x8, [x3, #(3 * 8)]
330 str x7, [x3, #(2 * 8)]
331 str x6, [x3, #(1 * 8)]
332 str x5, [x3, #(0 * 8)]
334 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
337 add x26, x26, x25, lsl #2
340 mrs x20, dbgwcr15_el1
341 mrs x19, dbgwcr14_el1
342 mrs x18, dbgwcr13_el1
343 mrs x17, dbgwcr12_el1
344 mrs x16, dbgwcr11_el1
345 mrs x15, dbgwcr10_el1
358 add x26, x26, x25, lsl #2
362 str x20, [x3, #(15 * 8)]
363 str x19, [x3, #(14 * 8)]
364 str x18, [x3, #(13 * 8)]
365 str x17, [x3, #(12 * 8)]
366 str x16, [x3, #(11 * 8)]
367 str x15, [x3, #(10 * 8)]
368 str x14, [x3, #(9 * 8)]
369 str x13, [x3, #(8 * 8)]
370 str x12, [x3, #(7 * 8)]
371 str x11, [x3, #(6 * 8)]
372 str x10, [x3, #(5 * 8)]
373 str x9, [x3, #(4 * 8)]
374 str x8, [x3, #(3 * 8)]
375 str x7, [x3, #(2 * 8)]
376 str x6, [x3, #(1 * 8)]
377 str x5, [x3, #(0 * 8)]
379 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
382 add x26, x26, x25, lsl #2
385 mrs x20, dbgwvr15_el1
386 mrs x19, dbgwvr14_el1
387 mrs x18, dbgwvr13_el1
388 mrs x17, dbgwvr12_el1
389 mrs x16, dbgwvr11_el1
390 mrs x15, dbgwvr10_el1
403 add x26, x26, x25, lsl #2
407 str x20, [x3, #(15 * 8)]
408 str x19, [x3, #(14 * 8)]
409 str x18, [x3, #(13 * 8)]
410 str x17, [x3, #(12 * 8)]
411 str x16, [x3, #(11 * 8)]
412 str x15, [x3, #(10 * 8)]
413 str x14, [x3, #(9 * 8)]
414 str x13, [x3, #(8 * 8)]
415 str x12, [x3, #(7 * 8)]
416 str x11, [x3, #(6 * 8)]
417 str x10, [x3, #(5 * 8)]
418 str x9, [x3, #(4 * 8)]
419 str x8, [x3, #(3 * 8)]
420 str x7, [x3, #(2 * 8)]
421 str x6, [x3, #(1 * 8)]
422 str x5, [x3, #(0 * 8)]
425 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
428 .macro restore_sysregs
429 // x2: base address for cpu context
432 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
435 ldp x6, x7, [x3, #16]
436 ldp x8, x9, [x3, #32]
437 ldp x10, x11, [x3, #48]
438 ldp x12, x13, [x3, #64]
439 ldp x14, x15, [x3, #80]
440 ldp x16, x17, [x3, #96]
441 ldp x18, x19, [x3, #112]
442 ldp x20, x21, [x3, #128]
443 ldp x22, x23, [x3, #144]
444 ldp x24, x25, [x3, #160]
460 msr contextidr_el1, x18
471 // x2: base address for cpu context
474 mrs x26, id_aa64dfr0_el1
475 ubfx x24, x26, #12, #4 // Extract BRPs
476 ubfx x25, x26, #20, #4 // Extract WRPs
478 sub w24, w26, w24 // How many BPs to skip
479 sub w25, w26, w25 // How many WPs to skip
481 add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
484 add x26, x26, x24, lsl #2
487 ldr x20, [x3, #(15 * 8)]
488 ldr x19, [x3, #(14 * 8)]
489 ldr x18, [x3, #(13 * 8)]
490 ldr x17, [x3, #(12 * 8)]
491 ldr x16, [x3, #(11 * 8)]
492 ldr x15, [x3, #(10 * 8)]
493 ldr x14, [x3, #(9 * 8)]
494 ldr x13, [x3, #(8 * 8)]
495 ldr x12, [x3, #(7 * 8)]
496 ldr x11, [x3, #(6 * 8)]
497 ldr x10, [x3, #(5 * 8)]
498 ldr x9, [x3, #(4 * 8)]
499 ldr x8, [x3, #(3 * 8)]
500 ldr x7, [x3, #(2 * 8)]
501 ldr x6, [x3, #(1 * 8)]
502 ldr x5, [x3, #(0 * 8)]
505 add x26, x26, x24, lsl #2
508 msr dbgbcr15_el1, x20
509 msr dbgbcr14_el1, x19
510 msr dbgbcr13_el1, x18
511 msr dbgbcr12_el1, x17
512 msr dbgbcr11_el1, x16
513 msr dbgbcr10_el1, x15
525 add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
528 add x26, x26, x24, lsl #2
531 ldr x20, [x3, #(15 * 8)]
532 ldr x19, [x3, #(14 * 8)]
533 ldr x18, [x3, #(13 * 8)]
534 ldr x17, [x3, #(12 * 8)]
535 ldr x16, [x3, #(11 * 8)]
536 ldr x15, [x3, #(10 * 8)]
537 ldr x14, [x3, #(9 * 8)]
538 ldr x13, [x3, #(8 * 8)]
539 ldr x12, [x3, #(7 * 8)]
540 ldr x11, [x3, #(6 * 8)]
541 ldr x10, [x3, #(5 * 8)]
542 ldr x9, [x3, #(4 * 8)]
543 ldr x8, [x3, #(3 * 8)]
544 ldr x7, [x3, #(2 * 8)]
545 ldr x6, [x3, #(1 * 8)]
546 ldr x5, [x3, #(0 * 8)]
549 add x26, x26, x24, lsl #2
552 msr dbgbvr15_el1, x20
553 msr dbgbvr14_el1, x19
554 msr dbgbvr13_el1, x18
555 msr dbgbvr12_el1, x17
556 msr dbgbvr11_el1, x16
557 msr dbgbvr10_el1, x15
569 add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
572 add x26, x26, x25, lsl #2
575 ldr x20, [x3, #(15 * 8)]
576 ldr x19, [x3, #(14 * 8)]
577 ldr x18, [x3, #(13 * 8)]
578 ldr x17, [x3, #(12 * 8)]
579 ldr x16, [x3, #(11 * 8)]
580 ldr x15, [x3, #(10 * 8)]
581 ldr x14, [x3, #(9 * 8)]
582 ldr x13, [x3, #(8 * 8)]
583 ldr x12, [x3, #(7 * 8)]
584 ldr x11, [x3, #(6 * 8)]
585 ldr x10, [x3, #(5 * 8)]
586 ldr x9, [x3, #(4 * 8)]
587 ldr x8, [x3, #(3 * 8)]
588 ldr x7, [x3, #(2 * 8)]
589 ldr x6, [x3, #(1 * 8)]
590 ldr x5, [x3, #(0 * 8)]
593 add x26, x26, x25, lsl #2
596 msr dbgwcr15_el1, x20
597 msr dbgwcr14_el1, x19
598 msr dbgwcr13_el1, x18
599 msr dbgwcr12_el1, x17
600 msr dbgwcr11_el1, x16
601 msr dbgwcr10_el1, x15
613 add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
616 add x26, x26, x25, lsl #2
619 ldr x20, [x3, #(15 * 8)]
620 ldr x19, [x3, #(14 * 8)]
621 ldr x18, [x3, #(13 * 8)]
622 ldr x17, [x3, #(12 * 8)]
623 ldr x16, [x3, #(11 * 8)]
624 ldr x15, [x3, #(10 * 8)]
625 ldr x14, [x3, #(9 * 8)]
626 ldr x13, [x3, #(8 * 8)]
627 ldr x12, [x3, #(7 * 8)]
628 ldr x11, [x3, #(6 * 8)]
629 ldr x10, [x3, #(5 * 8)]
630 ldr x9, [x3, #(4 * 8)]
631 ldr x8, [x3, #(3 * 8)]
632 ldr x7, [x3, #(2 * 8)]
633 ldr x6, [x3, #(1 * 8)]
634 ldr x5, [x3, #(0 * 8)]
637 add x26, x26, x25, lsl #2
640 msr dbgwvr15_el1, x20
641 msr dbgwvr14_el1, x19
642 msr dbgwvr13_el1, x18
643 msr dbgwvr12_el1, x17
644 msr dbgwvr11_el1, x16
645 msr dbgwvr10_el1, x15
657 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
661 .macro skip_32bit_state tmp, target
662 // Skip 32bit state if not needed
664 tbnz \tmp, #HCR_RW_SHIFT, \target
667 .macro skip_tee_state tmp, target
668 // Skip ThumbEE state if not needed
669 mrs \tmp, id_pfr0_el1
670 tbz \tmp, #12, \target
673 .macro skip_debug_state tmp, target
674 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
675 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
678 .macro compute_debug_state target
679 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
680 // is set, we do a full save/restore cycle and disable trapping.
681 add x25, x0, #VCPU_CONTEXT
683 // Check the state of MDSCR_EL1
684 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
685 and x26, x25, #DBG_MDSCR_KDE
686 and x25, x25, #DBG_MDSCR_MDE
688 b.eq 9998f // Nothing to see there
690 // If any interesting bits was set, we must set the flag
691 mov x26, #KVM_ARM64_DEBUG_DIRTY
692 str x26, [x0, #VCPU_DEBUG_FLAGS]
693 b 9999f // Don't skip restore
696 // Otherwise load the flags from memory in case we recently
698 skip_debug_state x25, \target
702 .macro save_guest_32bit_state
703 skip_32bit_state x3, 1f
705 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
711 stp x6, x7, [x3, #16]
713 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
720 skip_debug_state x8, 2f
724 skip_tee_state x8, 1f
726 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
733 .macro restore_guest_32bit_state
734 skip_32bit_state x3, 1f
736 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
738 ldp x6, x7, [x3, #16]
744 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
751 skip_debug_state x8, 2f
755 skip_tee_state x8, 1f
757 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
764 .macro activate_traps
765 ldr x2, [x0, #VCPU_HCR_EL2]
767 mov x2, #CPTR_EL2_TTA
770 mov x2, #(1 << 15) // Trap CP15 Cr=15
774 and x2, x2, #MDCR_EL2_HPMN_MASK
775 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
776 orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
778 // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
780 ldr x3, [x0, #VCPU_DEBUG_FLAGS]
781 tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
782 orr x2, x2, #MDCR_EL2_TDA
787 .macro deactivate_traps
794 and x2, x2, #MDCR_EL2_HPMN_MASK
799 ldr x1, [x0, #VCPU_KVM]
801 ldr x2, [x1, #KVM_VTTBR]
810 * Call into the vgic backend for state saving
812 .macro save_vgic_state
813 alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
815 mov x25, #HCR_INT_OVERRIDE
822 * Call into the vgic backend for state restoring
824 .macro restore_vgic_state
826 ldr x25, [x0, #VCPU_IRQ_LINES]
827 orr x24, x24, #HCR_INT_OVERRIDE
830 alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
833 .macro save_timer_state
835 ldr x2, [x0, #VCPU_KVM]
837 ldr w3, [x2, #KVM_TIMER_ENABLED]
842 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
843 bic x3, x3, #1 // Clear Enable
848 mrs x3, cntv_cval_el0
849 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
852 // Allow physical timer/counter access for the host
857 // Clear cntvoff for the host
861 .macro restore_timer_state
863 // Disallow physical timer access for the guest
864 // Physical counter access is allowed
870 ldr x2, [x0, #VCPU_KVM]
872 ldr w3, [x2, #KVM_TIMER_ENABLED]
875 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
877 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
878 msr cntv_cval_el0, x2
881 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
912 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
914 * This is the world switch. The first half of the function
915 * deals with entering the guest, and anything from __kvm_vcpu_return
916 * to the end of the function deals with reentering the host.
917 * On the enter path, only x0 (vcpu pointer) must be preserved until
918 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
919 * code) must both be preserved until the epilogue.
920 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
922 ENTRY(__kvm_vcpu_run)
924 msr tpidr_el2, x0 // Save the vcpu register
927 ldr x2, [x0, #VCPU_HOST_CONTEXT]
934 compute_debug_state 1f
944 add x2, x0, #VCPU_CONTEXT
949 skip_debug_state x3, 1f
952 restore_guest_32bit_state
955 // That's it, no more messing around.
959 // Assume x0 is the vcpu pointer, x1 the return code
960 // Guest's x0-x3 are on the stack
963 add x2, x0, #VCPU_CONTEXT
969 skip_debug_state x3, 1f
972 save_guest_32bit_state
981 ldr x2, [x0, #VCPU_HOST_CONTEXT]
987 skip_debug_state x3, 1f
988 // Clear the dirty flag for the next run, as all the state has
989 // already been saved. Note that we nuke the whole 64bit word.
990 // If we ever add more flags, we'll have to be more careful...
991 str xzr, [x0, #VCPU_DEBUG_FLAGS]
1000 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
1001 ENTRY(__kvm_tlb_flush_vmid_ipa)
1005 ldr x2, [x0, #KVM_VTTBR]
1010 * We could do so much better if we had the VA as well.
1011 * Instead, we invalidate Stage-2 for this IPA, and the
1012 * whole of Stage-1. Weep...
1017 * We have to ensure completion of the invalidation at Stage-2,
1018 * since a table walk on another CPU could refill a TLB with a
1019 * complete (S1 + S2) walk based on the old Stage-2 mapping if
1020 * the Stage-1 invalidation happened first.
1029 ENDPROC(__kvm_tlb_flush_vmid_ipa)
1032 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
1033 * @struct kvm *kvm - pointer to kvm structure
1035 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
1037 ENTRY(__kvm_tlb_flush_vmid)
1041 ldr x2, [x0, #KVM_VTTBR]
1051 ENDPROC(__kvm_tlb_flush_vmid)
1053 ENTRY(__kvm_flush_vm_context)
1059 ENDPROC(__kvm_flush_vm_context)
1062 // Guess the context by looking at VTTBR:
1063 // If zero, then we're already a host.
1064 // Otherwise restore a minimal host context before panicing.
1073 ldr x2, [x0, #VCPU_HOST_CONTEXT]
1076 bl __restore_sysregs
1078 1: adr x0, __hyp_panic_str
1091 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
1099 2: .quad HYP_PAGE_OFFSET
1101 ENDPROC(__kvm_hyp_panic)
1104 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
1109 * u64 kvm_call_hyp(void *hypfn, ...);
1111 * This is not really a variadic function in the classic C-way and care must
1112 * be taken when calling this to ensure parameters are passed in registers
1113 * only, since the stack will change between the caller and the callee.
1115 * Call the function with the first argument containing a pointer to the
1116 * function you wish to call in Hyp mode, and subsequent arguments will be
1117 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
1118 * function pointer can be passed). The function being called must be mapped
1119 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
1120 * passed in r0 and r1.
1122 * A function pointer with a value of 0 has a special meaning, and is
1123 * used to implement __hyp_get_vectors in the same way as in
1124 * arch/arm64/kernel/hyp_stub.S.
1129 ENDPROC(kvm_call_hyp)
1131 .macro invalid_vector label, target
1138 /* None of these should ever happen */
1139 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
1140 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
1141 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
1142 invalid_vector el2t_error_invalid, __kvm_hyp_panic
1143 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
1144 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
1145 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
1146 invalid_vector el2h_error_invalid, __kvm_hyp_panic
1147 invalid_vector el1_sync_invalid, __kvm_hyp_panic
1148 invalid_vector el1_irq_invalid, __kvm_hyp_panic
1149 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
1150 invalid_vector el1_error_invalid, __kvm_hyp_panic
1152 el1_sync: // Guest trapped into EL2
1157 lsr x2, x1, #ESR_ELx_EC_SHIFT
1159 cmp x2, #ESR_ELx_EC_HVC64
1162 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
1163 cbnz x3, el1_trap // called HVC
1165 /* Here, we're pretty sure the host called HVC. */
1169 /* Check for __hyp_get_vectors */
1177 * Compute the function address in EL2, and shuffle the parameters.
1194 cmp x2, #ESR_ELx_EC_DABT_LOW
1195 mov x0, #ESR_ELx_EC_IABT_LOW
1197 b.ne 1f // Not an abort we care about
1199 /* This is an abort. Check for permission fault */
1200 and x2, x1, #ESR_ELx_FSC_TYPE
1202 b.ne 1f // Not a permission fault
1205 * Check for Stage-1 page table walk, which is guaranteed
1206 * to give a valid HPFAR_EL2.
1208 tbnz x1, #7, 1f // S1PTW is set
1210 /* Preserve PAR_EL1 */
1215 * Permission fault, HPFAR_EL2 is invalid.
1216 * Resolve the IPA the hard way using the guest VA.
1217 * Stage-1 translation already validated the memory access rights.
1218 * As such, we can use the EL1 translation regime, and don't have
1219 * to distinguish between EL0 and EL1 access.
1227 pop x0, xzr // Restore PAR_EL1 from the stack
1229 tbnz x3, #0, 3f // Bail out if we failed the translation
1230 ubfx x3, x3, #12, #36 // Extract IPA
1231 lsl x3, x3, #4 // and present it like HPFAR
1234 1: mrs x3, hpfar_el2
1237 2: mrs x0, tpidr_el2
1238 str w1, [x0, #VCPU_ESR_EL2]
1239 str x2, [x0, #VCPU_FAR_EL2]
1240 str x3, [x0, #VCPU_HPFAR_EL2]
1242 mov x1, #ARM_EXCEPTION_TRAP
1246 * Translation failed. Just return to the guest and
1247 * let it fault again. Another CPU is probably playing
1259 mov x1, #ARM_EXCEPTION_IRQ
1266 ENTRY(__kvm_hyp_vector)
1267 ventry el2t_sync_invalid // Synchronous EL2t
1268 ventry el2t_irq_invalid // IRQ EL2t
1269 ventry el2t_fiq_invalid // FIQ EL2t
1270 ventry el2t_error_invalid // Error EL2t
1272 ventry el2h_sync_invalid // Synchronous EL2h
1273 ventry el2h_irq_invalid // IRQ EL2h
1274 ventry el2h_fiq_invalid // FIQ EL2h
1275 ventry el2h_error_invalid // Error EL2h
1277 ventry el1_sync // Synchronous 64-bit EL1
1278 ventry el1_irq // IRQ 64-bit EL1
1279 ventry el1_fiq_invalid // FIQ 64-bit EL1
1280 ventry el1_error_invalid // Error 64-bit EL1
1282 ventry el1_sync // Synchronous 32-bit EL1
1283 ventry el1_irq // IRQ 32-bit EL1
1284 ventry el1_fiq_invalid // FIQ 32-bit EL1
1285 ventry el1_error_invalid // Error 32-bit EL1
1286 ENDPROC(__kvm_hyp_vector)