2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/book3s/64/mmu-hash.h>
34 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
36 /* Values in HSTATE_NAPPING(r13) */
37 #define NAPPING_CEDE 1
38 #define NAPPING_NOVCPU 2
41 * Call kvmppc_hv_entry in real mode.
42 * Must be called with interrupts hard-disabled.
46 * LR = return address to continue at after eventually re-enabling MMU
48 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
50 std r0, PPC_LR_STKOFF(r1)
53 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
58 mtmsrd r0,1 /* clear RI in MSR */
64 ld r4, HSTATE_KVM_VCPU(r13)
67 /* Back from guest - restore host state and return to caller */
70 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
75 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
78 ld r3,PACA_SPRG_VDSO(r13)
79 mtspr SPRN_SPRG_VDSO_WRITE,r3
81 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
85 beq 23f /* skip if not */
87 ld r3, HSTATE_MMCR0(r13)
88 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
91 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
92 lwz r3, HSTATE_PMC1(r13)
93 lwz r4, HSTATE_PMC2(r13)
94 lwz r5, HSTATE_PMC3(r13)
95 lwz r6, HSTATE_PMC4(r13)
96 lwz r8, HSTATE_PMC5(r13)
97 lwz r9, HSTATE_PMC6(r13)
104 ld r3, HSTATE_MMCR0(r13)
105 ld r4, HSTATE_MMCR1(r13)
106 ld r5, HSTATE_MMCRA(r13)
107 ld r6, HSTATE_SIAR(r13)
108 ld r7, HSTATE_SDAR(r13)
114 ld r8, HSTATE_MMCR2(r13)
115 ld r9, HSTATE_SIER(r13)
118 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
124 * Reload DEC. HDEC interrupts were disabled when
125 * we reloaded the host's LPCR value.
127 ld r3, HSTATE_DECEXP(r13)
132 /* hwthread_req may have got set by cede or no vcpu, so clear it */
134 stb r0, HSTATE_HWTHREAD_REQ(r13)
137 * For external and machine check interrupts, we need
138 * to call the Linux handler to process the interrupt.
139 * We do that by jumping to absolute address 0x500 for
140 * external interrupts, or the machine_check_fwnmi label
141 * for machine checks (since firmware might have patched
142 * the vector area at 0x200). The [h]rfid at the end of the
143 * handler will return to the book3s_hv_interrupts.S code.
144 * For other interrupts we do the rfid to get back
145 * to the book3s_hv_interrupts.S code here.
147 ld r8, 112+PPC_LR_STKOFF(r1)
149 ld r7, HSTATE_HOST_MSR(r13)
151 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
152 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
154 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
155 beq 15f /* Invoke the H_DOORBELL handler */
156 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
157 beq cr2, 14f /* HMI check */
159 /* RFI into the highmem handler, or branch to interrupt handler */
163 mtmsrd r6, 1 /* Clear RI in MSR */
166 beq cr1, 13f /* machine check */
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
170 11: mtspr SPRN_HSRR0, r8
174 13: b machine_check_fwnmi
176 14: mtspr SPRN_HSRR0, r8
178 b hmi_exception_after_realmode
180 15: mtspr SPRN_HSRR0, r8
184 kvmppc_primary_no_guest:
185 /* We handle this much like a ceded vcpu */
186 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
190 * Make sure the primary has finished the MMU switch.
191 * We should never get here on a secondary thread, but
192 * check it for robustness' sake.
194 ld r5, HSTATE_KVM_VCORE(r13)
195 65: lbz r0, VCORE_IN_GUEST(r5)
202 /* set our bit in napping_threads */
203 ld r5, HSTATE_KVM_VCORE(r13)
204 lbz r7, HSTATE_PTID(r13)
207 addi r6, r5, VCORE_NAPPING_THREADS
212 /* order napping_threads update vs testing entry_exit_map */
215 lwz r7, VCORE_ENTRY_EXIT(r5)
217 bge kvm_novcpu_exit /* another thread already exiting */
218 li r3, NAPPING_NOVCPU
219 stb r3, HSTATE_NAPPING(r13)
221 li r3, 0 /* Don't wake on privileged (OS) doorbell */
226 * Entered from kvm_start_guest if kvm_hstate.napping is set
232 ld r1, HSTATE_HOST_R1(r13)
233 ld r5, HSTATE_KVM_VCORE(r13)
235 stb r0, HSTATE_NAPPING(r13)
237 /* check the wake reason */
238 bl kvmppc_check_wake_reason
241 * Restore volatile registers since we could have called
242 * a C routine in kvmppc_check_wake_reason.
245 ld r5, HSTATE_KVM_VCORE(r13)
247 /* see if any other thread is already exiting */
248 lwz r0, VCORE_ENTRY_EXIT(r5)
252 /* clear our bit in napping_threads */
253 lbz r7, HSTATE_PTID(r13)
256 addi r6, r5, VCORE_NAPPING_THREADS
262 /* See if the wake reason means we need to exit */
266 /* See if our timeslice has expired (HDEC is negative) */
268 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
272 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
273 ld r4, HSTATE_KVM_VCPU(r13)
275 beq kvmppc_primary_no_guest
277 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
278 addi r3, r4, VCPU_TB_RMENTRY
279 bl kvmhv_start_timing
284 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
285 ld r4, HSTATE_KVM_VCPU(r13)
288 addi r3, r4, VCPU_TB_RMEXIT
289 bl kvmhv_accumulate_time
293 bl kvmhv_commence_exit
296 b kvmhv_switch_to_host
299 * We come in here when wakened from nap mode.
300 * Relocation is off and most register values are lost.
301 * r13 points to the PACA.
303 .globl kvm_start_guest
306 /* Set runlatch bit the minute you wake up from nap */
313 li r0,KVM_HWTHREAD_IN_KVM
314 stb r0,HSTATE_HWTHREAD_STATE(r13)
316 /* NV GPR values from power7_idle() will no longer be valid */
318 stb r0,PACA_NAPSTATELOST(r13)
320 /* were we napping due to cede? */
321 lbz r0,HSTATE_NAPPING(r13)
322 cmpwi r0,NAPPING_CEDE
324 cmpwi r0,NAPPING_NOVCPU
325 beq kvm_novcpu_wakeup
327 ld r1,PACAEMERGSP(r13)
328 subi r1,r1,STACK_FRAME_OVERHEAD
331 * We weren't napping due to cede, so this must be a secondary
332 * thread being woken up to run a guest, or being woken up due
333 * to a stray IPI. (Or due to some machine check or hypervisor
334 * maintenance interrupt while the core is in KVM.)
337 /* Check the wake reason in SRR1 to see why we got here */
338 bl kvmppc_check_wake_reason
340 * kvmppc_check_wake_reason could invoke a C routine, but we
341 * have no volatile registers to restore when we return.
347 /* get vcore pointer, NULL if we have nothing to run */
348 ld r5,HSTATE_KVM_VCORE(r13)
350 /* if we have no vcore to run, go back to sleep */
353 kvm_secondary_got_guest:
355 /* Set HSTATE_DSCR(r13) to something sensible */
356 ld r6, PACA_DSCR_DEFAULT(r13)
357 std r6, HSTATE_DSCR(r13)
359 /* On thread 0 of a subcore, set HDEC to max */
360 lbz r4, HSTATE_PTID(r13)
366 /* and set per-LPAR registers, if doing dynamic micro-threading */
367 ld r6, HSTATE_SPLIT_MODE(r13)
370 ld r0, KVM_SPLIT_RPR(r6)
372 ld r0, KVM_SPLIT_PMMAR(r6)
374 ld r0, KVM_SPLIT_LDBAR(r6)
378 /* Order load of vcpu after load of vcore */
380 ld r4, HSTATE_KVM_VCPU(r13)
383 /* Back from the guest, go back to nap */
384 /* Clear our vcpu and vcore pointers so we don't come back in early */
386 std r0, HSTATE_KVM_VCPU(r13)
388 * Once we clear HSTATE_KVM_VCORE(r13), the code in
389 * kvmppc_run_core() is going to assume that all our vcpu
390 * state is visible in memory. This lwsync makes sure
394 std r0, HSTATE_KVM_VCORE(r13)
397 * All secondaries exiting guest will fall through this path.
398 * Before proceeding, just check for HMI interrupt and
399 * invoke opal hmi handler. By now we are sure that the
400 * primary thread on this core/subcore has already made partition
401 * switch/TB resync and we are good to call opal hmi handler.
403 cmpwi r12, BOOK3S_INTERRUPT_HMI
406 li r3,0 /* NULL argument */
407 bl hmi_exception_realmode
409 * At this point we have finished executing in the guest.
410 * We need to wait for hwthread_req to become zero, since
411 * we may not turn on the MMU while hwthread_req is non-zero.
412 * While waiting we also need to check if we get given a vcpu to run.
415 lbz r3, HSTATE_HWTHREAD_REQ(r13)
419 li r0, KVM_HWTHREAD_IN_KERNEL
420 stb r0, HSTATE_HWTHREAD_STATE(r13)
421 /* need to recheck hwthread_req after a barrier, to avoid race */
423 lbz r3, HSTATE_HWTHREAD_REQ(r13)
427 * We jump to pnv_wakeup_loss, which will return to the caller
428 * of power7_nap in the powernv cpu offline loop. The value we
429 * put in r3 becomes the return value for power7_nap.
433 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
439 ld r5, HSTATE_KVM_VCORE(r13)
442 ld r3, HSTATE_SPLIT_MODE(r13)
445 lbz r0, KVM_SPLIT_DO_NAP(r3)
451 b kvm_secondary_got_guest
453 54: li r0, KVM_HWTHREAD_IN_KVM
454 stb r0, HSTATE_HWTHREAD_STATE(r13)
458 * Here the primary thread is trying to return the core to
459 * whole-core mode, so we need to nap.
463 * When secondaries are napping in kvm_unsplit_nap() with
464 * hwthread_req = 1, HMI goes ignored even though subcores are
465 * already exited the guest. Hence HMI keeps waking up secondaries
466 * from nap in a loop and secondaries always go back to nap since
467 * no vcore is assigned to them. This makes impossible for primary
468 * thread to get hold of secondary threads resulting into a soft
469 * lockup in KVM path.
471 * Let us check if HMI is pending and handle it before we go to nap.
473 cmpwi r12, BOOK3S_INTERRUPT_HMI
475 li r3, 0 /* NULL argument */
476 bl hmi_exception_realmode
479 * Ensure that secondary doesn't nap when it has
480 * its vcore pointer set.
482 sync /* matches smp_mb() before setting split_info.do_nap */
483 ld r0, HSTATE_KVM_VCORE(r13)
486 /* clear any pending message */
488 lis r6, (PPC_DBELL_SERVER << (63-36))@h
490 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
491 /* Set kvm_split_mode.napped[tid] = 1 */
492 ld r3, HSTATE_SPLIT_MODE(r13)
494 lhz r4, PACAPACAINDEX(r13)
495 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
496 addi r4, r4, KVM_SPLIT_NAPPED
498 /* Check the do_nap flag again after setting napped[] */
500 lbz r0, KVM_SPLIT_DO_NAP(r3)
503 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
505 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
512 /******************************************************************************
516 *****************************************************************************/
518 /* Stack frame offsets */
519 #define STACK_SLOT_TID (112-16)
520 #define STACK_SLOT_PSSCR (112-24)
522 .global kvmppc_hv_entry
527 * R4 = vcpu pointer (or NULL)
532 * all other volatile GPRS = free
535 std r0, PPC_LR_STKOFF(r1)
538 /* Save R1 in the PACA */
539 std r1, HSTATE_HOST_R1(r13)
541 li r6, KVM_GUEST_MODE_HOST_HV
542 stb r6, HSTATE_IN_GUEST(r13)
544 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
545 /* Store initial timestamp */
548 addi r3, r4, VCPU_TB_RMENTRY
549 bl kvmhv_start_timing
559 * POWER7/POWER8 host -> guest partition switch code.
560 * We don't have to lock against concurrent tlbies,
561 * but we do have to coordinate across hardware threads.
563 /* Set bit in entry map iff exit map is zero. */
564 ld r5, HSTATE_KVM_VCORE(r13)
566 lbz r6, HSTATE_PTID(r13)
568 addi r9, r5, VCORE_ENTRY_EXIT
570 cmpwi r3, 0x100 /* any threads starting to exit? */
571 bge secondary_too_late /* if so we're too late to the party */
576 /* Primary thread switches to guest partition. */
577 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
583 li r0,LPID_RSVD /* switch to reserved LPID */
586 mtspr SPRN_SDR1,r6 /* switch to partition page table */
587 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
591 /* See if we need to flush the TLB */
592 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
593 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
594 srdi r6,r6,6 /* doubleword number */
595 sldi r6,r6,3 /* address offset */
597 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
603 23: ldarx r7,0,r6 /* if set, clear the bit */
607 /* Flush the TLB of any entries for this LPID */
608 lwz r6,KVM_TLB_SETS(r9)
609 li r0,0 /* RS for P9 version of tlbiel */
611 li r7,0x800 /* IS field = 0b10 */
618 /* Add timebase offset onto timebase */
619 22: ld r8,VCORE_TB_OFFSET(r5)
622 mftb r6 /* current host timebase */
624 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
625 mftb r7 /* check if lower 24 bits overflowed */
630 addis r8,r8,0x100 /* if so, increment upper 40 bits */
633 /* Load guest PCR value to select appropriate compat mode */
634 37: ld r7, VCORE_PCR(r5)
641 /* DPDES and VTB are shared between threads */
642 ld r8, VCORE_DPDES(r5)
646 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
648 /* Mark the subcore state as inside guest */
649 bl kvmppc_subcore_enter_guest
651 ld r5, HSTATE_KVM_VCORE(r13)
652 ld r4, HSTATE_KVM_VCPU(r13)
654 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
656 /* Do we have a guest vcpu to run? */
658 beq kvmppc_primary_no_guest
661 /* Load up guest SLB entries */
662 lwz r5,VCPU_SLB_MAX(r4)
667 1: ld r8,VCPU_SLB_E(r6)
670 addi r6,r6,VCPU_SLB_SIZE
673 /* Increment yield count if they have a VPA */
677 li r6, LPPACA_YIELDCOUNT
682 stb r6, VCPU_VPA_DIRTY(r4)
685 /* Save purr/spurr */
688 std r5,HSTATE_PURR(r13)
689 std r6,HSTATE_SPURR(r13)
695 /* Save host values of some registers */
699 std r5, STACK_SLOT_TID(r1)
700 std r6, STACK_SLOT_PSSCR(r1)
701 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
704 /* Set partition DABR */
705 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
706 lwz r5,VCPU_DABRX(r4)
711 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
713 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
716 END_FTR_SECTION_IFSET(CPU_FTR_TM)
719 /* Load guest PMU registers */
720 /* R4 is live here (vcpu pointer) */
722 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
723 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
727 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
730 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
731 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
732 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
733 lwz r6, VCPU_PMC + 8(r4)
734 lwz r7, VCPU_PMC + 12(r4)
735 lwz r8, VCPU_PMC + 16(r4)
736 lwz r9, VCPU_PMC + 20(r4)
744 ld r5, VCPU_MMCR + 8(r4)
745 ld r6, VCPU_MMCR + 16(r4)
753 ld r5, VCPU_MMCR + 24(r4)
757 BEGIN_FTR_SECTION_NESTED(96)
758 lwz r7, VCPU_PMC + 24(r4)
759 lwz r8, VCPU_PMC + 28(r4)
760 ld r9, VCPU_MMCR + 32(r4)
764 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
765 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
769 /* Load up FP, VMX and VSX registers */
772 ld r14, VCPU_GPR(R14)(r4)
773 ld r15, VCPU_GPR(R15)(r4)
774 ld r16, VCPU_GPR(R16)(r4)
775 ld r17, VCPU_GPR(R17)(r4)
776 ld r18, VCPU_GPR(R18)(r4)
777 ld r19, VCPU_GPR(R19)(r4)
778 ld r20, VCPU_GPR(R20)(r4)
779 ld r21, VCPU_GPR(R21)(r4)
780 ld r22, VCPU_GPR(R22)(r4)
781 ld r23, VCPU_GPR(R23)(r4)
782 ld r24, VCPU_GPR(R24)(r4)
783 ld r25, VCPU_GPR(R25)(r4)
784 ld r26, VCPU_GPR(R26)(r4)
785 ld r27, VCPU_GPR(R27)(r4)
786 ld r28, VCPU_GPR(R28)(r4)
787 ld r29, VCPU_GPR(R29)(r4)
788 ld r30, VCPU_GPR(R30)(r4)
789 ld r31, VCPU_GPR(R31)(r4)
791 /* Switch DSCR to guest value */
796 /* Skip next section on POWER7 */
798 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
799 /* Load up POWER8-specific registers */
801 lwz r6, VCPU_PSPB(r4)
807 ld r6, VCPU_DAWRX(r4)
808 ld r7, VCPU_CIABR(r4)
815 ld r8, VCPU_EBBHR(r4)
818 ld r5, VCPU_EBBRR(r4)
819 ld r6, VCPU_BESCR(r4)
820 lwz r7, VCPU_GUEST_PID(r4)
827 /* POWER8-only registers */
828 ld r5, VCPU_TCSCR(r4)
830 ld r7, VCPU_CSIGR(r4)
837 /* POWER9-only registers */
839 ld r6, VCPU_PSSCR(r4)
840 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
843 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
847 * Set the decrementer to the guest decrementer.
849 ld r8,VCPU_DEC_EXPIRES(r4)
850 /* r8 is a host timebase value here, convert to guest TB */
851 ld r5,HSTATE_KVM_VCORE(r13)
852 ld r6,VCORE_TB_OFFSET(r5)
859 ld r5, VCPU_SPRG0(r4)
860 ld r6, VCPU_SPRG1(r4)
861 ld r7, VCPU_SPRG2(r4)
862 ld r8, VCPU_SPRG3(r4)
868 /* Load up DAR and DSISR */
870 lwz r6, VCPU_DSISR(r4)
874 /* Restore AMR and UAMOR, set AMOR to all 1s */
882 /* Restore state of CTRL run bit; assume 1 on entry */
890 /* Secondary threads wait for primary to have done partition switch */
891 ld r5, HSTATE_KVM_VCORE(r13)
892 lbz r6, HSTATE_PTID(r13)
895 lbz r0, VCORE_IN_GUEST(r5)
899 20: lwz r3, VCORE_ENTRY_EXIT(r5)
902 lbz r0, VCORE_IN_GUEST(r5)
912 /* Check if HDEC expires soon */
914 cmpwi r3, 512 /* 1 microsecond */
917 deliver_guest_interrupt:
924 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
932 /* r11 = vcpu->arch.msr & ~MSR_HV */
933 rldicl r11, r11, 63 - MSR_HV_LG, 1
934 rotldi r11, r11, 1 + MSR_HV_LG
937 /* Check if we can deliver an external or decrementer interrupt now */
938 ld r0, VCPU_PENDING_EXC(r4)
939 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
941 andi. r8, r11, MSR_EE
943 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
944 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
948 li r0, BOOK3S_INTERRUPT_EXTERNAL
952 li r0, BOOK3S_INTERRUPT_DECREMENTER
955 12: mtspr SPRN_SRR0, r10
959 bl kvmppc_msr_interrupt
965 * R10: value for HSRR0
966 * R11: value for HSRR1
971 stb r0,VCPU_CEDED(r4) /* cancel cede */
975 /* Activate guest mode, so faults get handled by KVM */
976 li r9, KVM_GUEST_MODE_GUEST_HV
977 stb r9, HSTATE_IN_GUEST(r13)
979 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
980 /* Accumulate timing */
981 addi r3, r4, VCPU_TB_GUEST
982 bl kvmhv_accumulate_time
990 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
993 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1000 ld r1, VCPU_GPR(R1)(r4)
1001 ld r2, VCPU_GPR(R2)(r4)
1002 ld r3, VCPU_GPR(R3)(r4)
1003 ld r5, VCPU_GPR(R5)(r4)
1004 ld r6, VCPU_GPR(R6)(r4)
1005 ld r7, VCPU_GPR(R7)(r4)
1006 ld r8, VCPU_GPR(R8)(r4)
1007 ld r9, VCPU_GPR(R9)(r4)
1008 ld r10, VCPU_GPR(R10)(r4)
1009 ld r11, VCPU_GPR(R11)(r4)
1010 ld r12, VCPU_GPR(R12)(r4)
1011 ld r13, VCPU_GPR(R13)(r4)
1015 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1016 ld r0, VCPU_GPR(R0)(r4)
1017 ld r4, VCPU_GPR(R4)(r4)
1026 stw r12, VCPU_TRAP(r4)
1027 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1028 addi r3, r4, VCPU_TB_RMEXIT
1029 bl kvmhv_accumulate_time
1031 11: b kvmhv_switch_to_host
1038 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1039 12: stw r12, VCPU_TRAP(r4)
1041 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1042 addi r3, r4, VCPU_TB_RMEXIT
1043 bl kvmhv_accumulate_time
1047 /******************************************************************************
1051 *****************************************************************************/
1054 * We come here from the first-level interrupt handlers.
1056 .globl kvmppc_interrupt_hv
1057 kvmppc_interrupt_hv:
1059 * Register contents:
1060 * R12 = interrupt vector
1062 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1063 * guest R13 saved in SPRN_SCRATCH0
1065 std r9, HSTATE_SCRATCH2(r13)
1067 lbz r9, HSTATE_IN_GUEST(r13)
1068 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1069 beq kvmppc_bad_host_intr
1070 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1071 cmpwi r9, KVM_GUEST_MODE_GUEST
1072 ld r9, HSTATE_SCRATCH2(r13)
1073 beq kvmppc_interrupt_pr
1075 /* We're now back in the host but in guest MMU context */
1076 li r9, KVM_GUEST_MODE_HOST_HV
1077 stb r9, HSTATE_IN_GUEST(r13)
1079 ld r9, HSTATE_KVM_VCPU(r13)
1081 /* Save registers */
1083 std r0, VCPU_GPR(R0)(r9)
1084 std r1, VCPU_GPR(R1)(r9)
1085 std r2, VCPU_GPR(R2)(r9)
1086 std r3, VCPU_GPR(R3)(r9)
1087 std r4, VCPU_GPR(R4)(r9)
1088 std r5, VCPU_GPR(R5)(r9)
1089 std r6, VCPU_GPR(R6)(r9)
1090 std r7, VCPU_GPR(R7)(r9)
1091 std r8, VCPU_GPR(R8)(r9)
1092 ld r0, HSTATE_SCRATCH2(r13)
1093 std r0, VCPU_GPR(R9)(r9)
1094 std r10, VCPU_GPR(R10)(r9)
1095 std r11, VCPU_GPR(R11)(r9)
1096 ld r3, HSTATE_SCRATCH0(r13)
1097 lwz r4, HSTATE_SCRATCH1(r13)
1098 std r3, VCPU_GPR(R12)(r9)
1101 ld r3, HSTATE_CFAR(r13)
1102 std r3, VCPU_CFAR(r9)
1103 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1105 ld r4, HSTATE_PPR(r13)
1106 std r4, VCPU_PPR(r9)
1107 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1109 /* Restore R1/R2 so we can handle faults */
1110 ld r1, HSTATE_HOST_R1(r13)
1113 mfspr r10, SPRN_SRR0
1114 mfspr r11, SPRN_SRR1
1115 std r10, VCPU_SRR0(r9)
1116 std r11, VCPU_SRR1(r9)
1117 andi. r0, r12, 2 /* need to read HSRR0/1? */
1119 mfspr r10, SPRN_HSRR0
1120 mfspr r11, SPRN_HSRR1
1122 1: std r10, VCPU_PC(r9)
1123 std r11, VCPU_MSR(r9)
1127 std r3, VCPU_GPR(R13)(r9)
1130 stw r12,VCPU_TRAP(r9)
1132 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1133 addi r3, r9, VCPU_TB_RMINTR
1135 bl kvmhv_accumulate_time
1136 ld r5, VCPU_GPR(R5)(r9)
1137 ld r6, VCPU_GPR(R6)(r9)
1138 ld r7, VCPU_GPR(R7)(r9)
1139 ld r8, VCPU_GPR(R8)(r9)
1142 /* Save HEIR (HV emulation assist reg) in emul_inst
1143 if this is an HEI (HV emulation interrupt, e40) */
1144 li r3,KVM_INST_FETCH_FAILED
1145 stw r3,VCPU_LAST_INST(r9)
1146 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1149 11: stw r3,VCPU_HEIR(r9)
1151 /* these are volatile across C function calls */
1154 std r3, VCPU_CTR(r9)
1155 std r4, VCPU_XER(r9)
1157 /* If this is a page table miss then see if it's theirs or ours */
1158 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1160 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1163 /* See if this is a leftover HDEC interrupt */
1164 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1169 bge fast_guest_return
1171 /* See if this is an hcall we can handle in real mode */
1172 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1173 beq hcall_try_real_mode
1175 /* Hypervisor doorbell - exit only if host IPI flag set */
1176 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1178 lbz r0, HSTATE_HOST_IPI(r13)
1183 /* External interrupt ? */
1184 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1185 bne+ guest_exit_cont
1187 /* External interrupt, first check for host_ipi. If this is
1188 * set, we know the host wants us out so let's do it now
1193 * Restore the active volatile registers after returning from
1196 ld r9, HSTATE_KVM_VCPU(r13)
1197 li r12, BOOK3S_INTERRUPT_EXTERNAL
1200 * kvmppc_read_intr return codes:
1202 * Exit to host (r3 > 0)
1203 * 1 An interrupt is pending that needs to be handled by the host
1204 * Exit guest and return to host by branching to guest_exit_cont
1206 * 2 Passthrough that needs completion in the host
1207 * Exit guest and return to host by branching to guest_exit_cont
1208 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1209 * to indicate to the host to complete handling the interrupt
1211 * Before returning to guest, we check if any CPU is heading out
1212 * to the host and if so, we head out also. If no CPUs are heading
1213 * check return values <= 0.
1215 * Return to guest (r3 <= 0)
1216 * 0 No external interrupt is pending
1217 * -1 A guest wakeup IPI (which has now been cleared)
1218 * In either case, we return to guest to deliver any pending
1221 * -2 A PCI passthrough external interrupt was handled
1222 * (interrupt was delivered directly to guest)
1223 * Return to guest to deliver any pending guest interrupts.
1229 /* Return code = 2 */
1230 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1231 stw r12, VCPU_TRAP(r9)
1234 1: /* Return code <= 1 */
1238 /* Return code <= 0 */
1239 4: ld r5, HSTATE_KVM_VCORE(r13)
1240 lwz r0, VCORE_ENTRY_EXIT(r5)
1243 blt deliver_guest_interrupt
1245 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1246 /* Save more register state */
1249 std r6, VCPU_DAR(r9)
1250 stw r7, VCPU_DSISR(r9)
1251 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1252 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1254 std r6, VCPU_FAULT_DAR(r9)
1255 stw r7, VCPU_FAULT_DSISR(r9)
1257 /* See if it is a machine check */
1258 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1259 beq machine_check_realmode
1261 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1262 addi r3, r9, VCPU_TB_RMEXIT
1264 bl kvmhv_accumulate_time
1268 /* Increment exit count, poke other threads to exit */
1269 bl kvmhv_commence_exit
1271 ld r9, HSTATE_KVM_VCPU(r13)
1272 lwz r12, VCPU_TRAP(r9)
1274 /* Stop others sending VCPU interrupts to this physical CPU */
1276 stw r0, VCPU_CPU(r9)
1277 stw r0, VCPU_THREAD_CPU(r9)
1279 /* Save guest CTRL register, set runlatch to 1 */
1281 stw r6,VCPU_CTRL(r9)
1287 /* Read the guest SLB and save it away */
1288 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1294 andis. r0,r8,SLB_ESID_V@h
1296 add r8,r8,r6 /* put index in */
1298 std r8,VCPU_SLB_E(r7)
1299 std r3,VCPU_SLB_V(r7)
1300 addi r7,r7,VCPU_SLB_SIZE
1304 stw r5,VCPU_SLB_MAX(r9)
1307 * Save the guest PURR/SPURR
1312 ld r8,VCPU_SPURR(r9)
1313 std r5,VCPU_PURR(r9)
1314 std r6,VCPU_SPURR(r9)
1319 * Restore host PURR/SPURR and add guest times
1320 * so that the time in the guest gets accounted.
1322 ld r3,HSTATE_PURR(r13)
1323 ld r4,HSTATE_SPURR(r13)
1334 /* r5 is a guest timebase value here, convert to host TB */
1335 ld r3,HSTATE_KVM_VCORE(r13)
1336 ld r4,VCORE_TB_OFFSET(r3)
1338 std r5,VCPU_DEC_EXPIRES(r9)
1342 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1343 /* Save POWER8-specific registers */
1347 std r5, VCPU_IAMR(r9)
1348 stw r6, VCPU_PSPB(r9)
1349 std r7, VCPU_FSCR(r9)
1353 std r7, VCPU_TAR(r9)
1354 mfspr r8, SPRN_EBBHR
1355 std r8, VCPU_EBBHR(r9)
1356 mfspr r5, SPRN_EBBRR
1357 mfspr r6, SPRN_BESCR
1360 std r5, VCPU_EBBRR(r9)
1361 std r6, VCPU_BESCR(r9)
1362 stw r7, VCPU_GUEST_PID(r9)
1363 std r8, VCPU_WORT(r9)
1365 mfspr r5, SPRN_TCSCR
1367 mfspr r7, SPRN_CSIGR
1369 std r5, VCPU_TCSCR(r9)
1370 std r6, VCPU_ACOP(r9)
1371 std r7, VCPU_CSIGR(r9)
1372 std r8, VCPU_TACR(r9)
1375 mfspr r6, SPRN_PSSCR
1376 std r5, VCPU_TID(r9)
1377 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1379 std r6, VCPU_PSSCR(r9)
1380 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1382 * Restore various registers to 0, where non-zero values
1383 * set by the guest could disrupt the host.
1387 mtspr SPRN_CIABR, r0
1388 mtspr SPRN_DAWRX, r0
1391 mtspr SPRN_TCSCR, r0
1392 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1395 mtspr SPRN_MMCRS, r0
1396 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1399 /* Save and reset AMR and UAMOR before turning on the MMU */
1403 std r6,VCPU_UAMOR(r9)
1407 /* Switch DSCR back to host value */
1409 ld r7, HSTATE_DSCR(r13)
1410 std r8, VCPU_DSCR(r9)
1413 /* Save non-volatile GPRs */
1414 std r14, VCPU_GPR(R14)(r9)
1415 std r15, VCPU_GPR(R15)(r9)
1416 std r16, VCPU_GPR(R16)(r9)
1417 std r17, VCPU_GPR(R17)(r9)
1418 std r18, VCPU_GPR(R18)(r9)
1419 std r19, VCPU_GPR(R19)(r9)
1420 std r20, VCPU_GPR(R20)(r9)
1421 std r21, VCPU_GPR(R21)(r9)
1422 std r22, VCPU_GPR(R22)(r9)
1423 std r23, VCPU_GPR(R23)(r9)
1424 std r24, VCPU_GPR(R24)(r9)
1425 std r25, VCPU_GPR(R25)(r9)
1426 std r26, VCPU_GPR(R26)(r9)
1427 std r27, VCPU_GPR(R27)(r9)
1428 std r28, VCPU_GPR(R28)(r9)
1429 std r29, VCPU_GPR(R29)(r9)
1430 std r30, VCPU_GPR(R30)(r9)
1431 std r31, VCPU_GPR(R31)(r9)
1434 mfspr r3, SPRN_SPRG0
1435 mfspr r4, SPRN_SPRG1
1436 mfspr r5, SPRN_SPRG2
1437 mfspr r6, SPRN_SPRG3
1438 std r3, VCPU_SPRG0(r9)
1439 std r4, VCPU_SPRG1(r9)
1440 std r5, VCPU_SPRG2(r9)
1441 std r6, VCPU_SPRG3(r9)
1447 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1450 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1453 /* Increment yield count if they have a VPA */
1454 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1457 li r4, LPPACA_YIELDCOUNT
1462 stb r3, VCPU_VPA_DIRTY(r9)
1464 /* Save PMU registers if requested */
1465 /* r8 and cr0.eq are live here */
1468 * POWER8 seems to have a hardware bug where setting
1469 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1470 * when some counters are already negative doesn't seem
1471 * to cause a performance monitor alert (and hence interrupt).
1472 * The effect of this is that when saving the PMU state,
1473 * if there is no PMU alert pending when we read MMCR0
1474 * before freezing the counters, but one becomes pending
1475 * before we read the counters, we lose it.
1476 * To work around this, we need a way to freeze the counters
1477 * before reading MMCR0. Normally, freezing the counters
1478 * is done by writing MMCR0 (to set MMCR0[FC]) which
1479 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1480 * we can also freeze the counters using MMCR2, by writing
1481 * 1s to all the counter freeze condition bits (there are
1482 * 9 bits each for 6 counters).
1484 li r3, -1 /* set all freeze bits */
1486 mfspr r10, SPRN_MMCR2
1487 mtspr SPRN_MMCR2, r3
1489 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1491 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1492 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1493 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1494 mfspr r6, SPRN_MMCRA
1495 /* Clear MMCRA in order to disable SDAR updates */
1497 mtspr SPRN_MMCRA, r7
1499 beq 21f /* if no VPA, save PMU stuff anyway */
1500 lbz r7, LPPACA_PMCINUSE(r8)
1501 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1503 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1505 21: mfspr r5, SPRN_MMCR1
1508 std r4, VCPU_MMCR(r9)
1509 std r5, VCPU_MMCR + 8(r9)
1510 std r6, VCPU_MMCR + 16(r9)
1512 std r10, VCPU_MMCR + 24(r9)
1513 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1514 std r7, VCPU_SIAR(r9)
1515 std r8, VCPU_SDAR(r9)
1522 stw r3, VCPU_PMC(r9)
1523 stw r4, VCPU_PMC + 4(r9)
1524 stw r5, VCPU_PMC + 8(r9)
1525 stw r6, VCPU_PMC + 12(r9)
1526 stw r7, VCPU_PMC + 16(r9)
1527 stw r8, VCPU_PMC + 20(r9)
1530 std r5, VCPU_SIER(r9)
1531 BEGIN_FTR_SECTION_NESTED(96)
1532 mfspr r6, SPRN_SPMC1
1533 mfspr r7, SPRN_SPMC2
1534 mfspr r8, SPRN_MMCRS
1535 stw r6, VCPU_PMC + 24(r9)
1536 stw r7, VCPU_PMC + 28(r9)
1537 std r8, VCPU_MMCR + 32(r9)
1539 mtspr SPRN_MMCRS, r4
1540 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1541 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1549 /* Restore host values of some registers */
1551 ld r5, STACK_SLOT_TID(r1)
1552 ld r6, STACK_SLOT_PSSCR(r1)
1554 mtspr SPRN_PSSCR, r6
1555 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1558 * POWER7/POWER8 guest -> host partition switch code.
1559 * We don't have to lock against tlbies but we do
1560 * have to coordinate the hardware threads.
1562 kvmhv_switch_to_host:
1563 /* Secondary threads wait for primary to do partition switch */
1564 ld r5,HSTATE_KVM_VCORE(r13)
1565 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1566 lbz r3,HSTATE_PTID(r13)
1570 13: lbz r3,VCORE_IN_GUEST(r5)
1576 /* Primary thread waits for all the secondaries to exit guest */
1577 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1578 rlwinm r0,r3,32-8,0xff
1584 /* Did we actually switch to the guest at all? */
1585 lbz r6, VCORE_IN_GUEST(r5)
1589 /* Primary thread switches back to host partition */
1590 lwz r7,KVM_HOST_LPID(r4)
1592 ld r6,KVM_HOST_SDR1(r4)
1593 li r8,LPID_RSVD /* switch to reserved LPID */
1596 mtspr SPRN_SDR1,r6 /* switch to host page table */
1597 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1602 /* DPDES and VTB are shared between threads */
1603 mfspr r7, SPRN_DPDES
1605 std r7, VCORE_DPDES(r5)
1606 std r8, VCORE_VTB(r5)
1607 /* clear DPDES so we don't get guest doorbells in the host */
1609 mtspr SPRN_DPDES, r8
1610 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1612 /* If HMI, call kvmppc_realmode_hmi_handler() */
1613 cmpwi r12, BOOK3S_INTERRUPT_HMI
1615 bl kvmppc_realmode_hmi_handler
1617 li r12, BOOK3S_INTERRUPT_HMI
1619 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1620 * the TB. Hence it is not required to subtract guest timebase
1621 * offset from timebase. So, skip it.
1623 * Also, do not call kvmppc_subcore_exit_guest() because it has
1624 * been invoked as part of kvmppc_realmode_hmi_handler().
1629 /* Subtract timebase offset from timebase */
1630 ld r8,VCORE_TB_OFFSET(r5)
1633 mftb r6 /* current guest timebase */
1635 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1636 mftb r7 /* check if lower 24 bits overflowed */
1641 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1644 17: bl kvmppc_subcore_exit_guest
1646 30: ld r5,HSTATE_KVM_VCORE(r13)
1647 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1650 ld r0, VCORE_PCR(r5)
1656 /* Signal secondary CPUs to continue */
1657 stb r0,VCORE_IN_GUEST(r5)
1658 19: lis r8,0x7fff /* MAX_INT@h */
1661 16: ld r8,KVM_HOST_LPCR(r4)
1665 /* load host SLB entries */
1666 ld r8,PACA_SLBSHADOWPTR(r13)
1668 .rept SLB_NUM_BOLTED
1669 li r3, SLBSHADOW_SAVEAREA
1673 andis. r7,r5,SLB_ESID_V@h
1679 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1680 /* Finish timing, if we have a vcpu */
1681 ld r4, HSTATE_KVM_VCPU(r13)
1685 bl kvmhv_accumulate_time
1688 /* Unset guest mode */
1689 li r0, KVM_GUEST_MODE_NONE
1690 stb r0, HSTATE_IN_GUEST(r13)
1692 ld r0, 112+PPC_LR_STKOFF(r1)
1698 * Check whether an HDSI is an HPTE not found fault or something else.
1699 * If it is an HPTE not found fault that is due to the guest accessing
1700 * a page that they have mapped but which we have paged out, then
1701 * we continue on with the guest exit path. In all other cases,
1702 * reflect the HDSI to the guest as a DSI.
1706 mfspr r6, SPRN_HDSISR
1707 /* HPTE not found fault or protection fault? */
1708 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1709 beq 1f /* if not, send it to the guest */
1710 andi. r0, r11, MSR_DR /* data relocation enabled? */
1713 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1714 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1715 bne 7f /* if no SLB entry found */
1716 4: std r4, VCPU_FAULT_DAR(r9)
1717 stw r6, VCPU_FAULT_DSISR(r9)
1719 /* Search the hash table. */
1720 mr r3, r9 /* vcpu pointer */
1721 li r7, 1 /* data fault */
1722 bl kvmppc_hpte_hv_fault
1723 ld r9, HSTATE_KVM_VCPU(r13)
1725 ld r11, VCPU_MSR(r9)
1726 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1727 cmpdi r3, 0 /* retry the instruction */
1729 cmpdi r3, -1 /* handle in kernel mode */
1731 cmpdi r3, -2 /* MMIO emulation; need instr word */
1734 /* Synthesize a DSI (or DSegI) for the guest */
1735 ld r4, VCPU_FAULT_DAR(r9)
1737 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
1738 mtspr SPRN_DSISR, r6
1739 7: mtspr SPRN_DAR, r4
1740 mtspr SPRN_SRR0, r10
1741 mtspr SPRN_SRR1, r11
1743 bl kvmppc_msr_interrupt
1744 fast_interrupt_c_return:
1745 6: ld r7, VCPU_CTR(r9)
1752 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1753 ld r5, KVM_VRMA_SLB_V(r5)
1756 /* If this is for emulated MMIO, load the instruction word */
1757 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1759 /* Set guest mode to 'jump over instruction' so if lwz faults
1760 * we'll just continue at the next IP. */
1761 li r0, KVM_GUEST_MODE_SKIP
1762 stb r0, HSTATE_IN_GUEST(r13)
1764 /* Do the access with MSR:DR enabled */
1766 ori r4, r3, MSR_DR /* Enable paging for data */
1771 /* Store the result */
1772 stw r8, VCPU_LAST_INST(r9)
1774 /* Unset guest mode. */
1775 li r0, KVM_GUEST_MODE_HOST_HV
1776 stb r0, HSTATE_IN_GUEST(r13)
1780 * Similarly for an HISI, reflect it to the guest as an ISI unless
1781 * it is an HPTE not found fault for a page that we have paged out.
1784 andis. r0, r11, SRR1_ISI_NOPT@h
1786 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1789 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1790 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1791 bne 7f /* if no SLB entry found */
1793 /* Search the hash table. */
1794 mr r3, r9 /* vcpu pointer */
1797 li r7, 0 /* instruction fault */
1798 bl kvmppc_hpte_hv_fault
1799 ld r9, HSTATE_KVM_VCPU(r13)
1801 ld r11, VCPU_MSR(r9)
1802 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1803 cmpdi r3, 0 /* retry the instruction */
1804 beq fast_interrupt_c_return
1805 cmpdi r3, -1 /* handle in kernel mode */
1808 /* Synthesize an ISI (or ISegI) for the guest */
1810 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
1811 7: mtspr SPRN_SRR0, r10
1812 mtspr SPRN_SRR1, r11
1814 bl kvmppc_msr_interrupt
1815 b fast_interrupt_c_return
1817 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1818 ld r5, KVM_VRMA_SLB_V(r6)
1822 * Try to handle an hcall in real mode.
1823 * Returns to the guest if we handle it, or continues on up to
1824 * the kernel if we can't (i.e. if we don't have a handler for
1825 * it, or if the handler returns H_TOO_HARD).
1827 * r5 - r8 contain hcall args,
1828 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1830 hcall_try_real_mode:
1831 ld r3,VCPU_GPR(R3)(r9)
1833 /* sc 1 from userspace - reflect to guest syscall */
1834 bne sc_1_fast_return
1836 cmpldi r3,hcall_real_table_end - hcall_real_table
1838 /* See if this hcall is enabled for in-kernel handling */
1840 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1841 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1843 ld r0, KVM_ENABLED_HCALLS(r4)
1844 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1848 /* Get pointer to handler, if any, and call it */
1849 LOAD_REG_ADDR(r4, hcall_real_table)
1855 mr r3,r9 /* get vcpu pointer */
1856 ld r4,VCPU_GPR(R4)(r9)
1859 beq hcall_real_fallback
1860 ld r4,HSTATE_KVM_VCPU(r13)
1861 std r3,VCPU_GPR(R3)(r4)
1869 li r10, BOOK3S_INTERRUPT_SYSCALL
1870 bl kvmppc_msr_interrupt
1874 /* We've attempted a real mode hcall, but it's punted it back
1875 * to userspace. We need to restore some clobbered volatiles
1876 * before resuming the pass-it-to-qemu path */
1877 hcall_real_fallback:
1878 li r12,BOOK3S_INTERRUPT_SYSCALL
1879 ld r9, HSTATE_KVM_VCPU(r13)
1883 .globl hcall_real_table
1885 .long 0 /* 0 - unused */
1886 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1887 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1888 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1889 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1890 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
1891 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1892 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1893 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
1894 .long 0 /* 0x24 - H_SET_SPRG0 */
1895 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1910 #ifdef CONFIG_KVM_XICS
1911 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1912 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1913 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1914 .long 0 /* 0x70 - H_IPOLL */
1915 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1917 .long 0 /* 0x64 - H_EOI */
1918 .long 0 /* 0x68 - H_CPPR */
1919 .long 0 /* 0x6c - H_IPI */
1920 .long 0 /* 0x70 - H_IPOLL */
1921 .long 0 /* 0x74 - H_XIRR */
1949 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1950 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1966 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1970 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1971 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
1972 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2085 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2086 .globl hcall_real_table_end
2087 hcall_real_table_end:
2089 _GLOBAL(kvmppc_h_set_xdabr)
2090 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2092 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2095 6: li r3, H_PARAMETER
2098 _GLOBAL(kvmppc_h_set_dabr)
2099 li r5, DABRX_USER | DABRX_KERNEL
2103 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2104 std r4,VCPU_DABR(r3)
2105 stw r5, VCPU_DABRX(r3)
2106 mtspr SPRN_DABRX, r5
2107 /* Work around P7 bug where DABR can get corrupted on mtspr */
2108 1: mtspr SPRN_DABR,r4
2116 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2117 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2118 rlwimi r5, r4, 2, DAWRX_WT
2120 std r4, VCPU_DAWR(r3)
2121 std r5, VCPU_DAWRX(r3)
2123 mtspr SPRN_DAWRX, r5
2127 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2129 std r11,VCPU_MSR(r3)
2131 stb r0,VCPU_CEDED(r3)
2132 sync /* order setting ceded vs. testing prodded */
2133 lbz r5,VCPU_PRODDED(r3)
2135 bne kvm_cede_prodded
2136 li r12,0 /* set trap to 0 to say hcall is handled */
2137 stw r12,VCPU_TRAP(r3)
2139 std r0,VCPU_GPR(R3)(r3)
2142 * Set our bit in the bitmask of napping threads unless all the
2143 * other threads are already napping, in which case we send this
2146 ld r5,HSTATE_KVM_VCORE(r13)
2147 lbz r6,HSTATE_PTID(r13)
2148 lwz r8,VCORE_ENTRY_EXIT(r5)
2152 addi r6,r5,VCORE_NAPPING_THREADS
2159 /* order napping_threads update vs testing entry_exit_map */
2162 stb r0,HSTATE_NAPPING(r13)
2163 lwz r7,VCORE_ENTRY_EXIT(r5)
2165 bge 33f /* another thread already exiting */
2168 * Although not specifically required by the architecture, POWER7
2169 * preserves the following registers in nap mode, even if an SMT mode
2170 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2171 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2173 /* Save non-volatile GPRs */
2174 std r14, VCPU_GPR(R14)(r3)
2175 std r15, VCPU_GPR(R15)(r3)
2176 std r16, VCPU_GPR(R16)(r3)
2177 std r17, VCPU_GPR(R17)(r3)
2178 std r18, VCPU_GPR(R18)(r3)
2179 std r19, VCPU_GPR(R19)(r3)
2180 std r20, VCPU_GPR(R20)(r3)
2181 std r21, VCPU_GPR(R21)(r3)
2182 std r22, VCPU_GPR(R22)(r3)
2183 std r23, VCPU_GPR(R23)(r3)
2184 std r24, VCPU_GPR(R24)(r3)
2185 std r25, VCPU_GPR(R25)(r3)
2186 std r26, VCPU_GPR(R26)(r3)
2187 std r27, VCPU_GPR(R27)(r3)
2188 std r28, VCPU_GPR(R28)(r3)
2189 std r29, VCPU_GPR(R29)(r3)
2190 std r30, VCPU_GPR(R30)(r3)
2191 std r31, VCPU_GPR(R31)(r3)
2196 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2198 ld r9, HSTATE_KVM_VCPU(r13)
2200 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2204 * Set DEC to the smaller of DEC and HDEC, so that we wake
2205 * no later than the end of our timeslice (HDEC interrupts
2206 * don't wake us from nap).
2215 /* save expiry time of guest decrementer */
2218 ld r4, HSTATE_KVM_VCPU(r13)
2219 ld r5, HSTATE_KVM_VCORE(r13)
2220 ld r6, VCORE_TB_OFFSET(r5)
2221 subf r3, r6, r3 /* convert to host TB value */
2222 std r3, VCPU_DEC_EXPIRES(r4)
2224 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2225 ld r4, HSTATE_KVM_VCPU(r13)
2226 addi r3, r4, VCPU_TB_CEDE
2227 bl kvmhv_accumulate_time
2230 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2233 * Take a nap until a decrementer or external or doobell interrupt
2234 * occurs, with PECE1 and PECE0 set in LPCR.
2235 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2236 * Also clear the runlatch bit before napping.
2239 mfspr r0, SPRN_CTRLF
2241 mtspr SPRN_CTRLT, r0
2244 stb r0,HSTATE_HWTHREAD_REQ(r13)
2246 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2248 ori r5, r5, LPCR_PECEDH
2249 rlwimi r5, r3, 0, LPCR_PECEDP
2250 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2252 kvm_nap_sequence: /* desired LPCR value in r5 */
2255 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2256 * enable state loss = 1 (allow SMT mode switch)
2257 * requested level = 0 (just stop dispatching)
2259 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2260 mtspr SPRN_PSSCR, r3
2261 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2262 li r4, LPCR_PECE_HVEE@higher
2265 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2269 std r0, HSTATE_SCRATCH0(r13)
2271 ld r0, HSTATE_SCRATCH0(r13)
2278 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2287 /* get vcpu pointer */
2288 ld r4, HSTATE_KVM_VCPU(r13)
2290 /* Woken by external or decrementer interrupt */
2291 ld r1, HSTATE_HOST_R1(r13)
2293 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2294 addi r3, r4, VCPU_TB_RMINTR
2295 bl kvmhv_accumulate_time
2298 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2300 bl kvmppc_restore_tm
2301 END_FTR_SECTION_IFSET(CPU_FTR_TM)
2304 /* load up FP state */
2307 /* Restore guest decrementer */
2308 ld r3, VCPU_DEC_EXPIRES(r4)
2309 ld r5, HSTATE_KVM_VCORE(r13)
2310 ld r6, VCORE_TB_OFFSET(r5)
2311 add r3, r3, r6 /* convert host TB to guest TB value */
2317 ld r14, VCPU_GPR(R14)(r4)
2318 ld r15, VCPU_GPR(R15)(r4)
2319 ld r16, VCPU_GPR(R16)(r4)
2320 ld r17, VCPU_GPR(R17)(r4)
2321 ld r18, VCPU_GPR(R18)(r4)
2322 ld r19, VCPU_GPR(R19)(r4)
2323 ld r20, VCPU_GPR(R20)(r4)
2324 ld r21, VCPU_GPR(R21)(r4)
2325 ld r22, VCPU_GPR(R22)(r4)
2326 ld r23, VCPU_GPR(R23)(r4)
2327 ld r24, VCPU_GPR(R24)(r4)
2328 ld r25, VCPU_GPR(R25)(r4)
2329 ld r26, VCPU_GPR(R26)(r4)
2330 ld r27, VCPU_GPR(R27)(r4)
2331 ld r28, VCPU_GPR(R28)(r4)
2332 ld r29, VCPU_GPR(R29)(r4)
2333 ld r30, VCPU_GPR(R30)(r4)
2334 ld r31, VCPU_GPR(R31)(r4)
2336 /* Check the wake reason in SRR1 to see why we got here */
2337 bl kvmppc_check_wake_reason
2340 * Restore volatile registers since we could have called a
2341 * C routine in kvmppc_check_wake_reason
2343 * r3 tells us whether we need to return to host or not
2344 * WARNING: it gets checked further down:
2345 * should not modify r3 until this check is done.
2347 ld r4, HSTATE_KVM_VCPU(r13)
2349 /* clear our bit in vcore->napping_threads */
2350 34: ld r5,HSTATE_KVM_VCORE(r13)
2351 lbz r7,HSTATE_PTID(r13)
2354 addi r6,r5,VCORE_NAPPING_THREADS
2360 stb r0,HSTATE_NAPPING(r13)
2362 /* See if the wake reason saved in r3 means we need to exit */
2363 stw r12, VCPU_TRAP(r4)
2368 /* see if any other thread is already exiting */
2369 lwz r0,VCORE_ENTRY_EXIT(r5)
2373 b kvmppc_cede_reentry /* if not go back to guest */
2375 /* cede when already previously prodded case */
2378 stb r0,VCPU_PRODDED(r3)
2379 sync /* order testing prodded vs. clearing ceded */
2380 stb r0,VCPU_CEDED(r3)
2384 /* we've ceded but we want to give control to the host */
2386 ld r9, HSTATE_KVM_VCPU(r13)
2389 /* Try to handle a machine check in real mode */
2390 machine_check_realmode:
2391 mr r3, r9 /* get vcpu pointer */
2392 bl kvmppc_realmode_machine_check
2394 ld r9, HSTATE_KVM_VCPU(r13)
2395 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2397 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2398 * machine check interrupt (set HSRR0 to 0x200). And for handled
2399 * errors (no-fatal), just go back to guest execution with current
2400 * HSRR0 instead of exiting guest. This new approach will inject
2401 * machine check to guest for fatal error causing guest to crash.
2403 * The old code used to return to host for unhandled errors which
2404 * was causing guest to hang with soft lockups inside guest and
2405 * makes it difficult to recover guest instance.
2407 * if we receive machine check with MSR(RI=0) then deliver it to
2408 * guest as machine check causing guest to crash.
2410 ld r11, VCPU_MSR(r9)
2411 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2412 bne mc_cont /* if so, exit to host */
2413 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2414 beq 1f /* Deliver a machine check to guest */
2416 cmpdi r3, 0 /* Did we handle MCE ? */
2417 bne 2f /* Continue guest execution. */
2418 /* If not, deliver a machine check. SRR0/1 are already set */
2419 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2420 bl kvmppc_msr_interrupt
2421 2: b fast_interrupt_c_return
2424 * Check the reason we woke from nap, and take appropriate action.
2426 * 0 if nothing needs to be done
2427 * 1 if something happened that needs to be handled by the host
2428 * -1 if there was a guest wakeup (IPI or msgsnd)
2429 * -2 if we handled a PCI passthrough interrupt (returned by
2430 * kvmppc_read_intr only)
2432 * Also sets r12 to the interrupt vector for any interrupt that needs
2433 * to be handled now by the host (0x500 for external interrupt), or zero.
2434 * Modifies all volatile registers (since it may call a C function).
2435 * This routine calls kvmppc_read_intr, a C function, if an external
2436 * interrupt is pending.
2438 kvmppc_check_wake_reason:
2441 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2443 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2444 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2445 cmpwi r6, 8 /* was it an external interrupt? */
2446 beq 7f /* if so, see what it was */
2449 cmpwi r6, 6 /* was it the decrementer? */
2452 cmpwi r6, 5 /* privileged doorbell? */
2454 cmpwi r6, 3 /* hypervisor doorbell? */
2456 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2457 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2459 li r3, 1 /* anything else, return 1 */
2462 /* hypervisor doorbell */
2463 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2466 * Clear the doorbell as we will invoke the handler
2467 * explicitly in the guest exit path.
2469 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2471 /* see if it's a host IPI */
2473 lbz r0, HSTATE_HOST_IPI(r13)
2476 /* if not, return -1 */
2480 /* Woken up due to Hypervisor maintenance interrupt */
2481 4: li r12, BOOK3S_INTERRUPT_HMI
2485 /* external interrupt - create a stack frame so we can call C */
2487 std r0, PPC_LR_STKOFF(r1)
2488 stdu r1, -PPC_MIN_STKFRM(r1)
2491 li r12, BOOK3S_INTERRUPT_EXTERNAL
2496 * Return code of 2 means PCI passthrough interrupt, but
2497 * we need to return back to host to complete handling the
2498 * interrupt. Trap reason is expected in r12 by guest
2501 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2503 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2504 addi r1, r1, PPC_MIN_STKFRM
2509 * Save away FP, VMX and VSX registers.
2511 * N.B. r30 and r31 are volatile across this function,
2512 * thus it is not callable from C.
2519 #ifdef CONFIG_ALTIVEC
2521 oris r8,r8,MSR_VEC@h
2522 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2526 oris r8,r8,MSR_VSX@h
2527 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2530 addi r3,r3,VCPU_FPRS
2532 #ifdef CONFIG_ALTIVEC
2534 addi r3,r31,VCPU_VRS
2536 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2538 mfspr r6,SPRN_VRSAVE
2539 stw r6,VCPU_VRSAVE(r31)
2544 * Load up FP, VMX and VSX registers
2546 * N.B. r30 and r31 are volatile across this function,
2547 * thus it is not callable from C.
2554 #ifdef CONFIG_ALTIVEC
2556 oris r8,r8,MSR_VEC@h
2557 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2561 oris r8,r8,MSR_VSX@h
2562 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2565 addi r3,r4,VCPU_FPRS
2567 #ifdef CONFIG_ALTIVEC
2569 addi r3,r31,VCPU_VRS
2571 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2573 lwz r7,VCPU_VRSAVE(r31)
2574 mtspr SPRN_VRSAVE,r7
2579 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2581 * Save transactional state and TM-related registers.
2582 * Called with r9 pointing to the vcpu struct.
2583 * This can modify all checkpointed registers, but
2584 * restores r1, r2 and r9 (vcpu pointer) before exit.
2588 std r0, PPC_LR_STKOFF(r1)
2593 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2597 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2598 beq 1f /* TM not active in guest. */
2600 std r1, HSTATE_HOST_R1(r13)
2601 li r3, TM_CAUSE_KVM_RESCHED
2603 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2607 /* All GPRs are volatile at this point. */
2610 /* Temporarily store r13 and r9 so we have some regs to play with */
2613 std r9, PACATMSCRATCH(r13)
2614 ld r9, HSTATE_KVM_VCPU(r13)
2616 /* Get a few more GPRs free. */
2617 std r29, VCPU_GPRS_TM(29)(r9)
2618 std r30, VCPU_GPRS_TM(30)(r9)
2619 std r31, VCPU_GPRS_TM(31)(r9)
2621 /* Save away PPR and DSCR soon so don't run with user values. */
2624 mfspr r30, SPRN_DSCR
2625 ld r29, HSTATE_DSCR(r13)
2626 mtspr SPRN_DSCR, r29
2628 /* Save all but r9, r13 & r29-r31 */
2631 .if (reg != 9) && (reg != 13)
2632 std reg, VCPU_GPRS_TM(reg)(r9)
2636 /* ... now save r13 */
2638 std r4, VCPU_GPRS_TM(13)(r9)
2639 /* ... and save r9 */
2640 ld r4, PACATMSCRATCH(r13)
2641 std r4, VCPU_GPRS_TM(9)(r9)
2643 /* Reload stack pointer and TOC. */
2644 ld r1, HSTATE_HOST_R1(r13)
2647 /* Set MSR RI now we have r1 and r13 back. */
2651 /* Save away checkpinted SPRs. */
2652 std r31, VCPU_PPR_TM(r9)
2653 std r30, VCPU_DSCR_TM(r9)
2660 std r5, VCPU_LR_TM(r9)
2661 stw r6, VCPU_CR_TM(r9)
2662 std r7, VCPU_CTR_TM(r9)
2663 std r8, VCPU_AMR_TM(r9)
2664 std r10, VCPU_TAR_TM(r9)
2665 std r11, VCPU_XER_TM(r9)
2667 /* Restore r12 as trap number. */
2668 lwz r12, VCPU_TRAP(r9)
2671 addi r3, r9, VCPU_FPRS_TM
2673 addi r3, r9, VCPU_VRS_TM
2675 mfspr r6, SPRN_VRSAVE
2676 stw r6, VCPU_VRSAVE_TM(r9)
2679 * We need to save these SPRs after the treclaim so that the software
2680 * error code is recorded correctly in the TEXASR. Also the user may
2681 * change these outside of a transaction, so they must always be
2684 mfspr r5, SPRN_TFHAR
2685 mfspr r6, SPRN_TFIAR
2686 mfspr r7, SPRN_TEXASR
2687 std r5, VCPU_TFHAR(r9)
2688 std r6, VCPU_TFIAR(r9)
2689 std r7, VCPU_TEXASR(r9)
2691 ld r0, PPC_LR_STKOFF(r1)
2696 * Restore transactional state and TM-related registers.
2697 * Called with r4 pointing to the vcpu struct.
2698 * This potentially modifies all checkpointed registers.
2699 * It restores r1, r2, r4 from the PACA.
2703 std r0, PPC_LR_STKOFF(r1)
2705 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2711 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2715 * The user may change these outside of a transaction, so they must
2716 * always be context switched.
2718 ld r5, VCPU_TFHAR(r4)
2719 ld r6, VCPU_TFIAR(r4)
2720 ld r7, VCPU_TEXASR(r4)
2721 mtspr SPRN_TFHAR, r5
2722 mtspr SPRN_TFIAR, r6
2723 mtspr SPRN_TEXASR, r7
2726 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2727 beqlr /* TM not active in guest */
2728 std r1, HSTATE_HOST_R1(r13)
2730 /* Make sure the failure summary is set, otherwise we'll program check
2731 * when we trechkpt. It's possible that this might have been not set
2732 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2735 oris r7, r7, (TEXASR_FS)@h
2736 mtspr SPRN_TEXASR, r7
2739 * We need to load up the checkpointed state for the guest.
2740 * We need to do this early as it will blow away any GPRs, VSRs and
2745 addi r3, r31, VCPU_FPRS_TM
2747 addi r3, r31, VCPU_VRS_TM
2750 lwz r7, VCPU_VRSAVE_TM(r4)
2751 mtspr SPRN_VRSAVE, r7
2753 ld r5, VCPU_LR_TM(r4)
2754 lwz r6, VCPU_CR_TM(r4)
2755 ld r7, VCPU_CTR_TM(r4)
2756 ld r8, VCPU_AMR_TM(r4)
2757 ld r9, VCPU_TAR_TM(r4)
2758 ld r10, VCPU_XER_TM(r4)
2767 * Load up PPR and DSCR values but don't put them in the actual SPRs
2768 * till the last moment to avoid running with userspace PPR and DSCR for
2771 ld r29, VCPU_DSCR_TM(r4)
2772 ld r30, VCPU_PPR_TM(r4)
2774 std r2, PACATMSCRATCH(r13) /* Save TOC */
2776 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2780 /* Load GPRs r0-r28 */
2783 ld reg, VCPU_GPRS_TM(reg)(r31)
2787 mtspr SPRN_DSCR, r29
2790 /* Load final GPRs */
2791 ld 29, VCPU_GPRS_TM(29)(r31)
2792 ld 30, VCPU_GPRS_TM(30)(r31)
2793 ld 31, VCPU_GPRS_TM(31)(r31)
2795 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2798 /* Now let's get back the state we need. */
2801 ld r29, HSTATE_DSCR(r13)
2802 mtspr SPRN_DSCR, r29
2803 ld r4, HSTATE_KVM_VCPU(r13)
2804 ld r1, HSTATE_HOST_R1(r13)
2805 ld r2, PACATMSCRATCH(r13)
2807 /* Set the MSR RI since we have our registers back. */
2811 ld r0, PPC_LR_STKOFF(r1)
2817 * We come here if we get any exception or interrupt while we are
2818 * executing host real mode code while in guest MMU context.
2819 * For now just spin, but we should do something better.
2821 kvmppc_bad_host_intr:
2825 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2826 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2827 * r11 has the guest MSR value (in/out)
2828 * r9 has a vcpu pointer (in)
2829 * r0 is used as a scratch register
2831 kvmppc_msr_interrupt:
2832 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2833 cmpwi r0, 2 /* Check if we are in transactional state.. */
2834 ld r11, VCPU_INTR_MSR(r9)
2836 /* ... if transactional, change to suspended */
2838 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2842 * This works around a hardware bug on POWER8E processors, where
2843 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2844 * performance monitor interrupt. Instead, when we need to have
2845 * an interrupt pending, we have to arrange for a counter to overflow.
2849 mtspr SPRN_MMCR2, r3
2850 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2851 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2852 mtspr SPRN_MMCR0, r3
2859 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2861 * Start timing an activity
2862 * r3 = pointer to time accumulation struct, r4 = vcpu
2865 ld r5, HSTATE_KVM_VCORE(r13)
2866 lbz r6, VCORE_IN_GUEST(r5)
2868 beq 5f /* if in guest, need to */
2869 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2872 std r3, VCPU_CUR_ACTIVITY(r4)
2873 std r5, VCPU_ACTIVITY_START(r4)
2877 * Accumulate time to one activity and start another.
2878 * r3 = pointer to new time accumulation struct, r4 = vcpu
2880 kvmhv_accumulate_time:
2881 ld r5, HSTATE_KVM_VCORE(r13)
2882 lbz r8, VCORE_IN_GUEST(r5)
2884 beq 4f /* if in guest, need to */
2885 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
2886 4: ld r5, VCPU_CUR_ACTIVITY(r4)
2887 ld r6, VCPU_ACTIVITY_START(r4)
2888 std r3, VCPU_CUR_ACTIVITY(r4)
2891 std r7, VCPU_ACTIVITY_START(r4)
2895 ld r8, TAS_SEQCOUNT(r5)
2898 std r8, TAS_SEQCOUNT(r5)
2900 ld r7, TAS_TOTAL(r5)
2902 std r7, TAS_TOTAL(r5)
2908 3: std r3, TAS_MIN(r5)
2914 std r8, TAS_SEQCOUNT(r5)