2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
45 unsigned long kvmppc_booke_handlers
;
47 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
48 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
50 struct kvm_stats_debugfs_item debugfs_entries
[] = {
51 { "mmio", VCPU_STAT(mmio_exits
) },
52 { "dcr", VCPU_STAT(dcr_exits
) },
53 { "sig", VCPU_STAT(signal_exits
) },
54 { "itlb_r", VCPU_STAT(itlb_real_miss_exits
) },
55 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits
) },
56 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits
) },
57 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits
) },
58 { "sysc", VCPU_STAT(syscall_exits
) },
59 { "isi", VCPU_STAT(isi_exits
) },
60 { "dsi", VCPU_STAT(dsi_exits
) },
61 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
62 { "dec", VCPU_STAT(dec_exits
) },
63 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
64 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
65 { "doorbell", VCPU_STAT(dbell_exits
) },
66 { "guest doorbell", VCPU_STAT(gdbell_exits
) },
67 { "remote_tlb_flush", VM_STAT(remote_tlb_flush
) },
71 /* TODO: use vcpu_printf() */
72 void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
)
76 printk("pc: %08lx msr: %08llx\n", vcpu
->arch
.pc
, vcpu
->arch
.shared
->msr
);
77 printk("lr: %08lx ctr: %08lx\n", vcpu
->arch
.lr
, vcpu
->arch
.ctr
);
78 printk("srr0: %08llx srr1: %08llx\n", vcpu
->arch
.shared
->srr0
,
79 vcpu
->arch
.shared
->srr1
);
81 printk("exceptions: %08lx\n", vcpu
->arch
.pending_exceptions
);
83 for (i
= 0; i
< 32; i
+= 4) {
84 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i
,
85 kvmppc_get_gpr(vcpu
, i
),
86 kvmppc_get_gpr(vcpu
, i
+1),
87 kvmppc_get_gpr(vcpu
, i
+2),
88 kvmppc_get_gpr(vcpu
, i
+3));
93 void kvmppc_vcpu_disable_spe(struct kvm_vcpu
*vcpu
)
97 kvmppc_save_guest_spe(vcpu
);
98 vcpu
->arch
.shadow_msr
&= ~MSR_SPE
;
102 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu
*vcpu
)
106 kvmppc_load_guest_spe(vcpu
);
107 vcpu
->arch
.shadow_msr
|= MSR_SPE
;
111 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
113 if (vcpu
->arch
.shared
->msr
& MSR_SPE
) {
114 if (!(vcpu
->arch
.shadow_msr
& MSR_SPE
))
115 kvmppc_vcpu_enable_spe(vcpu
);
116 } else if (vcpu
->arch
.shadow_msr
& MSR_SPE
) {
117 kvmppc_vcpu_disable_spe(vcpu
);
121 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu
*vcpu
)
126 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu
*vcpu
)
128 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu
->arch
.shadow_msr
&= ~MSR_FP
;
132 vcpu
->arch
.shadow_msr
|= vcpu
->arch
.shared
->msr
& MSR_FP
;
137 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing.
140 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u32 new_msr
)
142 u32 old_msr
= vcpu
->arch
.shared
->msr
;
144 #ifdef CONFIG_KVM_BOOKE_HV
148 vcpu
->arch
.shared
->msr
= new_msr
;
150 kvmppc_mmu_msr_notify(vcpu
, old_msr
);
151 kvmppc_vcpu_sync_spe(vcpu
);
152 kvmppc_vcpu_sync_fpu(vcpu
);
155 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu
*vcpu
,
156 unsigned int priority
)
158 trace_kvm_booke_queue_irqprio(vcpu
, priority
);
159 set_bit(priority
, &vcpu
->arch
.pending_exceptions
);
162 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
,
163 ulong dear_flags
, ulong esr_flags
)
165 vcpu
->arch
.queued_dear
= dear_flags
;
166 vcpu
->arch
.queued_esr
= esr_flags
;
167 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DTLB_MISS
);
170 static void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
171 ulong dear_flags
, ulong esr_flags
)
173 vcpu
->arch
.queued_dear
= dear_flags
;
174 vcpu
->arch
.queued_esr
= esr_flags
;
175 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DATA_STORAGE
);
178 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
181 vcpu
->arch
.queued_esr
= esr_flags
;
182 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_INST_STORAGE
);
185 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong esr_flags
)
187 vcpu
->arch
.queued_esr
= esr_flags
;
188 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_PROGRAM
);
191 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
193 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_DECREMENTER
);
196 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
198 return test_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
201 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
203 clear_bit(BOOKE_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
206 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
207 struct kvm_interrupt
*irq
)
209 unsigned int prio
= BOOKE_IRQPRIO_EXTERNAL
;
211 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
212 prio
= BOOKE_IRQPRIO_EXTERNAL_LEVEL
;
214 kvmppc_booke_queue_irqprio(vcpu
, prio
);
217 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
,
218 struct kvm_interrupt
*irq
)
220 clear_bit(BOOKE_IRQPRIO_EXTERNAL
, &vcpu
->arch
.pending_exceptions
);
221 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL
, &vcpu
->arch
.pending_exceptions
);
224 static void kvmppc_core_queue_watchdog(struct kvm_vcpu
*vcpu
)
226 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_WATCHDOG
);
229 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu
*vcpu
)
231 clear_bit(BOOKE_IRQPRIO_WATCHDOG
, &vcpu
->arch
.pending_exceptions
);
234 static void set_guest_srr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
236 #ifdef CONFIG_KVM_BOOKE_HV
237 mtspr(SPRN_GSRR0
, srr0
);
238 mtspr(SPRN_GSRR1
, srr1
);
240 vcpu
->arch
.shared
->srr0
= srr0
;
241 vcpu
->arch
.shared
->srr1
= srr1
;
245 static void set_guest_csrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
247 vcpu
->arch
.csrr0
= srr0
;
248 vcpu
->arch
.csrr1
= srr1
;
251 static void set_guest_dsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
253 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC
)) {
254 vcpu
->arch
.dsrr0
= srr0
;
255 vcpu
->arch
.dsrr1
= srr1
;
257 set_guest_csrr(vcpu
, srr0
, srr1
);
261 static void set_guest_mcsrr(struct kvm_vcpu
*vcpu
, unsigned long srr0
, u32 srr1
)
263 vcpu
->arch
.mcsrr0
= srr0
;
264 vcpu
->arch
.mcsrr1
= srr1
;
267 static unsigned long get_guest_dear(struct kvm_vcpu
*vcpu
)
269 #ifdef CONFIG_KVM_BOOKE_HV
270 return mfspr(SPRN_GDEAR
);
272 return vcpu
->arch
.shared
->dar
;
276 static void set_guest_dear(struct kvm_vcpu
*vcpu
, unsigned long dear
)
278 #ifdef CONFIG_KVM_BOOKE_HV
279 mtspr(SPRN_GDEAR
, dear
);
281 vcpu
->arch
.shared
->dar
= dear
;
285 static unsigned long get_guest_esr(struct kvm_vcpu
*vcpu
)
287 #ifdef CONFIG_KVM_BOOKE_HV
288 return mfspr(SPRN_GESR
);
290 return vcpu
->arch
.shared
->esr
;
294 static void set_guest_esr(struct kvm_vcpu
*vcpu
, u32 esr
)
296 #ifdef CONFIG_KVM_BOOKE_HV
297 mtspr(SPRN_GESR
, esr
);
299 vcpu
->arch
.shared
->esr
= esr
;
303 /* Deliver the interrupt of the corresponding priority, if possible. */
304 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu
*vcpu
,
305 unsigned int priority
)
309 bool update_esr
= false, update_dear
= false;
310 ulong crit_raw
= vcpu
->arch
.shared
->critical
;
311 ulong crit_r1
= kvmppc_get_gpr(vcpu
, 1);
313 bool keep_irq
= false;
314 enum int_class int_class
;
315 ulong new_msr
= vcpu
->arch
.shared
->msr
;
317 /* Truncate crit indicators in 32 bit mode */
318 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
319 crit_raw
&= 0xffffffff;
320 crit_r1
&= 0xffffffff;
323 /* Critical section when crit == r1 */
324 crit
= (crit_raw
== crit_r1
);
325 /* ... and we're in supervisor mode */
326 crit
= crit
&& !(vcpu
->arch
.shared
->msr
& MSR_PR
);
328 if (priority
== BOOKE_IRQPRIO_EXTERNAL_LEVEL
) {
329 priority
= BOOKE_IRQPRIO_EXTERNAL
;
334 case BOOKE_IRQPRIO_DTLB_MISS
:
335 case BOOKE_IRQPRIO_DATA_STORAGE
:
338 case BOOKE_IRQPRIO_INST_STORAGE
:
339 case BOOKE_IRQPRIO_PROGRAM
:
342 case BOOKE_IRQPRIO_ITLB_MISS
:
343 case BOOKE_IRQPRIO_SYSCALL
:
344 case BOOKE_IRQPRIO_FP_UNAVAIL
:
345 case BOOKE_IRQPRIO_SPE_UNAVAIL
:
346 case BOOKE_IRQPRIO_SPE_FP_DATA
:
347 case BOOKE_IRQPRIO_SPE_FP_ROUND
:
348 case BOOKE_IRQPRIO_AP_UNAVAIL
:
349 case BOOKE_IRQPRIO_ALIGNMENT
:
351 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
352 int_class
= INT_CLASS_NONCRIT
;
354 case BOOKE_IRQPRIO_WATCHDOG
:
355 case BOOKE_IRQPRIO_CRITICAL
:
356 case BOOKE_IRQPRIO_DBELL_CRIT
:
357 allowed
= vcpu
->arch
.shared
->msr
& MSR_CE
;
358 allowed
= allowed
&& !crit
;
360 int_class
= INT_CLASS_CRIT
;
362 case BOOKE_IRQPRIO_MACHINE_CHECK
:
363 allowed
= vcpu
->arch
.shared
->msr
& MSR_ME
;
364 allowed
= allowed
&& !crit
;
365 int_class
= INT_CLASS_MC
;
367 case BOOKE_IRQPRIO_DECREMENTER
:
368 case BOOKE_IRQPRIO_FIT
:
371 case BOOKE_IRQPRIO_EXTERNAL
:
372 case BOOKE_IRQPRIO_DBELL
:
373 allowed
= vcpu
->arch
.shared
->msr
& MSR_EE
;
374 allowed
= allowed
&& !crit
;
375 msr_mask
= MSR_CE
| MSR_ME
| MSR_DE
;
376 int_class
= INT_CLASS_NONCRIT
;
378 case BOOKE_IRQPRIO_DEBUG
:
379 allowed
= vcpu
->arch
.shared
->msr
& MSR_DE
;
380 allowed
= allowed
&& !crit
;
382 int_class
= INT_CLASS_CRIT
;
388 case INT_CLASS_NONCRIT
:
389 set_guest_srr(vcpu
, vcpu
->arch
.pc
,
390 vcpu
->arch
.shared
->msr
);
393 set_guest_csrr(vcpu
, vcpu
->arch
.pc
,
394 vcpu
->arch
.shared
->msr
);
397 set_guest_dsrr(vcpu
, vcpu
->arch
.pc
,
398 vcpu
->arch
.shared
->msr
);
401 set_guest_mcsrr(vcpu
, vcpu
->arch
.pc
,
402 vcpu
->arch
.shared
->msr
);
406 vcpu
->arch
.pc
= vcpu
->arch
.ivpr
| vcpu
->arch
.ivor
[priority
];
407 if (update_esr
== true)
408 set_guest_esr(vcpu
, vcpu
->arch
.queued_esr
);
409 if (update_dear
== true)
410 set_guest_dear(vcpu
, vcpu
->arch
.queued_dear
);
413 #if defined(CONFIG_64BIT)
414 if (vcpu
->arch
.epcr
& SPRN_EPCR_ICM
)
417 kvmppc_set_msr(vcpu
, new_msr
);
420 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
423 #ifdef CONFIG_KVM_BOOKE_HV
425 * If an interrupt is pending but masked, raise a guest doorbell
426 * so that we are notified when the guest enables the relevant
429 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_EE
)
430 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_NONCRIT
);
431 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQMASK_CE
)
432 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_CRIT
);
433 if (vcpu
->arch
.pending_exceptions
& BOOKE_IRQPRIO_MACHINE_CHECK
)
434 kvmppc_set_pending_interrupt(vcpu
, INT_CLASS_MC
);
441 * Return the number of jiffies until the next timeout. If the timeout is
442 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
443 * because the larger value can break the timer APIs.
445 static unsigned long watchdog_next_timeout(struct kvm_vcpu
*vcpu
)
447 u64 tb
, wdt_tb
, wdt_ticks
= 0;
449 u32 period
= TCR_GET_WP(vcpu
->arch
.tcr
);
451 wdt_tb
= 1ULL << (63 - period
);
454 * The watchdog timeout will hapeen when TB bit corresponding
455 * to watchdog will toggle from 0 to 1.
460 wdt_ticks
+= wdt_tb
- (tb
& (wdt_tb
- 1));
462 /* Convert timebase ticks to jiffies */
463 nr_jiffies
= wdt_ticks
;
465 if (do_div(nr_jiffies
, tb_ticks_per_jiffy
))
468 return min_t(unsigned long long, nr_jiffies
, NEXT_TIMER_MAX_DELTA
);
471 static void arm_next_watchdog(struct kvm_vcpu
*vcpu
)
473 unsigned long nr_jiffies
;
477 * If TSR_ENW and TSR_WIS are not set then no need to exit to
478 * userspace, so clear the KVM_REQ_WATCHDOG request.
480 if ((vcpu
->arch
.tsr
& (TSR_ENW
| TSR_WIS
)) != (TSR_ENW
| TSR_WIS
))
481 clear_bit(KVM_REQ_WATCHDOG
, &vcpu
->requests
);
483 spin_lock_irqsave(&vcpu
->arch
.wdt_lock
, flags
);
484 nr_jiffies
= watchdog_next_timeout(vcpu
);
486 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
487 * then do not run the watchdog timer as this can break timer APIs.
489 if (nr_jiffies
< NEXT_TIMER_MAX_DELTA
)
490 mod_timer(&vcpu
->arch
.wdt_timer
, jiffies
+ nr_jiffies
);
492 del_timer(&vcpu
->arch
.wdt_timer
);
493 spin_unlock_irqrestore(&vcpu
->arch
.wdt_lock
, flags
);
496 void kvmppc_watchdog_func(unsigned long data
)
498 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
503 new_tsr
= tsr
= vcpu
->arch
.tsr
;
511 new_tsr
= tsr
| TSR_WIS
;
513 new_tsr
= tsr
| TSR_ENW
;
515 } while (cmpxchg(&vcpu
->arch
.tsr
, tsr
, new_tsr
) != tsr
);
517 if (new_tsr
& TSR_WIS
) {
519 kvm_make_request(KVM_REQ_PENDING_TIMER
, vcpu
);
524 * If this is final watchdog expiry and some action is required
525 * then exit to userspace.
527 if (final
&& (vcpu
->arch
.tcr
& TCR_WRC_MASK
) &&
528 vcpu
->arch
.watchdog_enabled
) {
530 kvm_make_request(KVM_REQ_WATCHDOG
, vcpu
);
535 * Stop running the watchdog timer after final expiration to
536 * prevent the host from being flooded with timers if the
537 * guest sets a short period.
538 * Timers will resume when TSR/TCR is updated next time.
541 arm_next_watchdog(vcpu
);
544 static void update_timer_ints(struct kvm_vcpu
*vcpu
)
546 if ((vcpu
->arch
.tcr
& TCR_DIE
) && (vcpu
->arch
.tsr
& TSR_DIS
))
547 kvmppc_core_queue_dec(vcpu
);
549 kvmppc_core_dequeue_dec(vcpu
);
551 if ((vcpu
->arch
.tcr
& TCR_WIE
) && (vcpu
->arch
.tsr
& TSR_WIS
))
552 kvmppc_core_queue_watchdog(vcpu
);
554 kvmppc_core_dequeue_watchdog(vcpu
);
557 static void kvmppc_core_check_exceptions(struct kvm_vcpu
*vcpu
)
559 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
560 unsigned int priority
;
562 priority
= __ffs(*pending
);
563 while (priority
< BOOKE_IRQPRIO_MAX
) {
564 if (kvmppc_booke_irqprio_deliver(vcpu
, priority
))
567 priority
= find_next_bit(pending
,
568 BITS_PER_BYTE
* sizeof(*pending
),
572 /* Tell the guest about our interrupt status */
573 vcpu
->arch
.shared
->int_pending
= !!*pending
;
576 /* Check pending exceptions and deliver one, if possible. */
577 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
580 WARN_ON_ONCE(!irqs_disabled());
582 kvmppc_core_check_exceptions(vcpu
);
584 if (vcpu
->arch
.shared
->msr
& MSR_WE
) {
586 kvm_vcpu_block(vcpu
);
587 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
590 kvmppc_set_exit_type(vcpu
, EMULATED_MTMSRWE_EXITS
);
597 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
599 int r
= 1; /* Indicate we want to get back into the guest */
601 if (kvm_check_request(KVM_REQ_PENDING_TIMER
, vcpu
))
602 update_timer_ints(vcpu
);
603 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
604 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
605 kvmppc_core_flush_tlb(vcpu
);
608 if (kvm_check_request(KVM_REQ_WATCHDOG
, vcpu
)) {
609 vcpu
->run
->exit_reason
= KVM_EXIT_WATCHDOG
;
616 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
619 #ifdef CONFIG_PPC_FPU
625 if (!vcpu
->arch
.sane
) {
626 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
631 s
= kvmppc_prepare_to_enter(vcpu
);
637 kvmppc_lazy_ee_enable();
641 #ifdef CONFIG_PPC_FPU
642 /* Save userspace FPU state in stack */
644 memcpy(fpr
, current
->thread
.fpr
, sizeof(current
->thread
.fpr
));
645 fpscr
= current
->thread
.fpscr
.val
;
646 fpexc_mode
= current
->thread
.fpexc_mode
;
648 /* Restore guest FPU state to thread */
649 memcpy(current
->thread
.fpr
, vcpu
->arch
.fpr
, sizeof(vcpu
->arch
.fpr
));
650 current
->thread
.fpscr
.val
= vcpu
->arch
.fpscr
;
653 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
654 * as always using the FPU. Kernel usage of FP (via
655 * enable_kernel_fp()) in this thread must not occur while
656 * vcpu->fpu_active is set.
658 vcpu
->fpu_active
= 1;
660 kvmppc_load_guest_fp(vcpu
);
663 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
665 /* No need for kvm_guest_exit. It's done in handle_exit.
666 We also get here with interrupts enabled. */
668 #ifdef CONFIG_PPC_FPU
669 kvmppc_save_guest_fp(vcpu
);
671 vcpu
->fpu_active
= 0;
673 /* Save guest FPU state from thread */
674 memcpy(vcpu
->arch
.fpr
, current
->thread
.fpr
, sizeof(vcpu
->arch
.fpr
));
675 vcpu
->arch
.fpscr
= current
->thread
.fpscr
.val
;
677 /* Restore userspace FPU state from stack */
678 memcpy(current
->thread
.fpr
, fpr
, sizeof(current
->thread
.fpr
));
679 current
->thread
.fpscr
.val
= fpscr
;
680 current
->thread
.fpexc_mode
= fpexc_mode
;
684 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
688 static int emulation_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
690 enum emulation_result er
;
692 er
= kvmppc_emulate_instruction(run
, vcpu
);
695 /* don't overwrite subtypes, just account kvm_stats */
696 kvmppc_account_exit_stat(vcpu
, EMULATED_INST_EXITS
);
697 /* Future optimization: only reload non-volatiles if
698 * they were actually modified by emulation. */
699 return RESUME_GUEST_NV
;
702 run
->exit_reason
= KVM_EXIT_DCR
;
706 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
707 __func__
, vcpu
->arch
.pc
, vcpu
->arch
.last_inst
);
708 /* For debugging, encode the failing instruction and
709 * report it to userspace. */
710 run
->hw
.hardware_exit_reason
= ~0ULL << 32;
711 run
->hw
.hardware_exit_reason
|= vcpu
->arch
.last_inst
;
712 kvmppc_core_queue_program(vcpu
, ESR_PIL
);
720 static void kvmppc_fill_pt_regs(struct pt_regs
*regs
)
722 ulong r1
, ip
, msr
, lr
;
724 asm("mr %0, 1" : "=r"(r1
));
725 asm("mflr %0" : "=r"(lr
));
726 asm("mfmsr %0" : "=r"(msr
));
727 asm("bl 1f; 1: mflr %0" : "=r"(ip
));
729 memset(regs
, 0, sizeof(*regs
));
737 * For interrupts needed to be handled by host interrupt handlers,
738 * corresponding host handler are called from here in similar way
739 * (but not exact) as they are called from low level handler
740 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
742 static void kvmppc_restart_interrupt(struct kvm_vcpu
*vcpu
,
743 unsigned int exit_nr
)
748 case BOOKE_INTERRUPT_EXTERNAL
:
749 kvmppc_fill_pt_regs(®s
);
752 case BOOKE_INTERRUPT_DECREMENTER
:
753 kvmppc_fill_pt_regs(®s
);
754 timer_interrupt(®s
);
756 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
757 case BOOKE_INTERRUPT_DOORBELL
:
758 kvmppc_fill_pt_regs(®s
);
759 doorbell_exception(®s
);
762 case BOOKE_INTERRUPT_MACHINE_CHECK
:
765 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
766 kvmppc_fill_pt_regs(®s
);
767 performance_monitor_exception(®s
);
769 case BOOKE_INTERRUPT_WATCHDOG
:
770 kvmppc_fill_pt_regs(®s
);
771 #ifdef CONFIG_BOOKE_WDT
772 WatchdogException(®s
);
774 unknown_exception(®s
);
777 case BOOKE_INTERRUPT_CRITICAL
:
778 unknown_exception(®s
);
786 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
788 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
789 unsigned int exit_nr
)
794 /* update before a new last_exit_type is rewritten */
795 kvmppc_update_timing_stats(vcpu
);
797 /* restart interrupts if they were meant for the host */
798 kvmppc_restart_interrupt(vcpu
, exit_nr
);
802 trace_kvm_exit(exit_nr
, vcpu
);
805 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
806 run
->ready_for_interrupt_injection
= 1;
809 case BOOKE_INTERRUPT_MACHINE_CHECK
:
810 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR
));
811 kvmppc_dump_vcpu(vcpu
);
812 /* For debugging, send invalid exit reason to user space */
813 run
->hw
.hardware_exit_reason
= ~1ULL << 32;
814 run
->hw
.hardware_exit_reason
|= mfspr(SPRN_MCSR
);
818 case BOOKE_INTERRUPT_EXTERNAL
:
819 kvmppc_account_exit(vcpu
, EXT_INTR_EXITS
);
823 case BOOKE_INTERRUPT_DECREMENTER
:
824 kvmppc_account_exit(vcpu
, DEC_EXITS
);
828 case BOOKE_INTERRUPT_WATCHDOG
:
832 case BOOKE_INTERRUPT_DOORBELL
:
833 kvmppc_account_exit(vcpu
, DBELL_EXITS
);
837 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT
:
838 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
841 * We are here because there is a pending guest interrupt
842 * which could not be delivered as MSR_CE or MSR_ME was not
843 * set. Once we break from here we will retry delivery.
848 case BOOKE_INTERRUPT_GUEST_DBELL
:
849 kvmppc_account_exit(vcpu
, GDBELL_EXITS
);
852 * We are here because there is a pending guest interrupt
853 * which could not be delivered as MSR_EE was not set. Once
854 * we break from here we will retry delivery.
859 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR
:
863 case BOOKE_INTERRUPT_HV_PRIV
:
864 r
= emulation_exit(run
, vcpu
);
867 case BOOKE_INTERRUPT_PROGRAM
:
868 if (vcpu
->arch
.shared
->msr
& (MSR_PR
| MSR_GS
)) {
870 * Program traps generated by user-level software must
871 * be handled by the guest kernel.
873 * In GS mode, hypervisor privileged instructions trap
874 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
875 * actual program interrupts, handled by the guest.
877 kvmppc_core_queue_program(vcpu
, vcpu
->arch
.fault_esr
);
879 kvmppc_account_exit(vcpu
, USR_PR_INST
);
883 r
= emulation_exit(run
, vcpu
);
886 case BOOKE_INTERRUPT_FP_UNAVAIL
:
887 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_FP_UNAVAIL
);
888 kvmppc_account_exit(vcpu
, FP_UNAVAIL
);
893 case BOOKE_INTERRUPT_SPE_UNAVAIL
: {
894 if (vcpu
->arch
.shared
->msr
& MSR_SPE
)
895 kvmppc_vcpu_enable_spe(vcpu
);
897 kvmppc_booke_queue_irqprio(vcpu
,
898 BOOKE_IRQPRIO_SPE_UNAVAIL
);
903 case BOOKE_INTERRUPT_SPE_FP_DATA
:
904 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_DATA
);
908 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
909 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SPE_FP_ROUND
);
913 case BOOKE_INTERRUPT_SPE_UNAVAIL
:
915 * Guest wants SPE, but host kernel doesn't support it. Send
916 * an "unimplemented operation" program check to the guest.
918 kvmppc_core_queue_program(vcpu
, ESR_PUO
| ESR_SPV
);
923 * These really should never happen without CONFIG_SPE,
924 * as we should never enable the real MSR[SPE] in the guest.
926 case BOOKE_INTERRUPT_SPE_FP_DATA
:
927 case BOOKE_INTERRUPT_SPE_FP_ROUND
:
928 printk(KERN_CRIT
"%s: unexpected SPE interrupt %u at %08lx\n",
929 __func__
, exit_nr
, vcpu
->arch
.pc
);
930 run
->hw
.hardware_exit_reason
= exit_nr
;
935 case BOOKE_INTERRUPT_DATA_STORAGE
:
936 kvmppc_core_queue_data_storage(vcpu
, vcpu
->arch
.fault_dear
,
937 vcpu
->arch
.fault_esr
);
938 kvmppc_account_exit(vcpu
, DSI_EXITS
);
942 case BOOKE_INTERRUPT_INST_STORAGE
:
943 kvmppc_core_queue_inst_storage(vcpu
, vcpu
->arch
.fault_esr
);
944 kvmppc_account_exit(vcpu
, ISI_EXITS
);
948 #ifdef CONFIG_KVM_BOOKE_HV
949 case BOOKE_INTERRUPT_HV_SYSCALL
:
950 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
951 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
954 * hcall from guest userspace -- send privileged
955 * instruction program check.
957 kvmppc_core_queue_program(vcpu
, ESR_PPR
);
963 case BOOKE_INTERRUPT_SYSCALL
:
964 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
965 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
966 /* KVM PV hypercalls */
967 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
971 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_SYSCALL
);
973 kvmppc_account_exit(vcpu
, SYSCALL_EXITS
);
978 case BOOKE_INTERRUPT_DTLB_MISS
: {
979 unsigned long eaddr
= vcpu
->arch
.fault_dear
;
984 #ifdef CONFIG_KVM_E500V2
985 if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
986 (eaddr
& PAGE_MASK
) == vcpu
->arch
.magic_page_ea
) {
987 kvmppc_map_magic(vcpu
);
988 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
995 /* Check the guest TLB. */
996 gtlb_index
= kvmppc_mmu_dtlb_index(vcpu
, eaddr
);
997 if (gtlb_index
< 0) {
998 /* The guest didn't have a mapping for it. */
999 kvmppc_core_queue_dtlb_miss(vcpu
,
1000 vcpu
->arch
.fault_dear
,
1001 vcpu
->arch
.fault_esr
);
1002 kvmppc_mmu_dtlb_miss(vcpu
);
1003 kvmppc_account_exit(vcpu
, DTLB_REAL_MISS_EXITS
);
1008 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
1009 gfn
= gpaddr
>> PAGE_SHIFT
;
1011 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
1012 /* The guest TLB had a mapping, but the shadow TLB
1013 * didn't, and it is RAM. This could be because:
1014 * a) the entry is mapping the host kernel, or
1015 * b) the guest used a large mapping which we're faking
1016 * Either way, we need to satisfy the fault without
1017 * invoking the guest. */
1018 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
1019 kvmppc_account_exit(vcpu
, DTLB_VIRT_MISS_EXITS
);
1022 /* Guest has mapped and accessed a page which is not
1024 vcpu
->arch
.paddr_accessed
= gpaddr
;
1025 vcpu
->arch
.vaddr_accessed
= eaddr
;
1026 r
= kvmppc_emulate_mmio(run
, vcpu
);
1027 kvmppc_account_exit(vcpu
, MMIO_EXITS
);
1033 case BOOKE_INTERRUPT_ITLB_MISS
: {
1034 unsigned long eaddr
= vcpu
->arch
.pc
;
1041 /* Check the guest TLB. */
1042 gtlb_index
= kvmppc_mmu_itlb_index(vcpu
, eaddr
);
1043 if (gtlb_index
< 0) {
1044 /* The guest didn't have a mapping for it. */
1045 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_ITLB_MISS
);
1046 kvmppc_mmu_itlb_miss(vcpu
);
1047 kvmppc_account_exit(vcpu
, ITLB_REAL_MISS_EXITS
);
1051 kvmppc_account_exit(vcpu
, ITLB_VIRT_MISS_EXITS
);
1053 gpaddr
= kvmppc_mmu_xlate(vcpu
, gtlb_index
, eaddr
);
1054 gfn
= gpaddr
>> PAGE_SHIFT
;
1056 if (kvm_is_visible_gfn(vcpu
->kvm
, gfn
)) {
1057 /* The guest TLB had a mapping, but the shadow TLB
1058 * didn't. This could be because:
1059 * a) the entry is mapping the host kernel, or
1060 * b) the guest used a large mapping which we're faking
1061 * Either way, we need to satisfy the fault without
1062 * invoking the guest. */
1063 kvmppc_mmu_map(vcpu
, eaddr
, gpaddr
, gtlb_index
);
1065 /* Guest mapped and leaped at non-RAM! */
1066 kvmppc_booke_queue_irqprio(vcpu
, BOOKE_IRQPRIO_MACHINE_CHECK
);
1072 case BOOKE_INTERRUPT_DEBUG
: {
1075 vcpu
->arch
.pc
= mfspr(SPRN_CSRR0
);
1077 /* clear IAC events in DBSR register */
1078 dbsr
= mfspr(SPRN_DBSR
);
1079 dbsr
&= DBSR_IAC1
| DBSR_IAC2
| DBSR_IAC3
| DBSR_IAC4
;
1080 mtspr(SPRN_DBSR
, dbsr
);
1082 run
->exit_reason
= KVM_EXIT_DEBUG
;
1083 kvmppc_account_exit(vcpu
, DEBUG_EXITS
);
1089 printk(KERN_EMERG
"exit_nr %d\n", exit_nr
);
1094 * To avoid clobbering exit_reason, only check for signals if we
1095 * aren't already exiting to userspace for some other reason.
1097 if (!(r
& RESUME_HOST
)) {
1098 local_irq_disable();
1099 s
= kvmppc_prepare_to_enter(vcpu
);
1102 r
= (s
<< 2) | RESUME_HOST
| (r
& RESUME_FLAG_NV
);
1104 kvmppc_lazy_ee_enable();
1111 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1112 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1118 vcpu
->arch
.shared
->pir
= vcpu
->vcpu_id
;
1119 kvmppc_set_gpr(vcpu
, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
1120 kvmppc_set_msr(vcpu
, 0);
1122 #ifndef CONFIG_KVM_BOOKE_HV
1123 vcpu
->arch
.shadow_msr
= MSR_USER
| MSR_DE
| MSR_IS
| MSR_DS
;
1124 vcpu
->arch
.shadow_pid
= 1;
1125 vcpu
->arch
.shared
->msr
= 0;
1128 /* Eye-catching numbers so we know if the guest takes an interrupt
1129 * before it's programmed its own IVPR/IVORs. */
1130 vcpu
->arch
.ivpr
= 0x55550000;
1131 for (i
= 0; i
< BOOKE_IRQPRIO_MAX
; i
++)
1132 vcpu
->arch
.ivor
[i
] = 0x7700 | i
* 4;
1134 kvmppc_init_timing_stats(vcpu
);
1136 r
= kvmppc_core_vcpu_setup(vcpu
);
1137 kvmppc_sanity_check(vcpu
);
1141 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
1143 /* setup watchdog timer once */
1144 spin_lock_init(&vcpu
->arch
.wdt_lock
);
1145 setup_timer(&vcpu
->arch
.wdt_timer
, kvmppc_watchdog_func
,
1146 (unsigned long)vcpu
);
1151 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
1153 del_timer_sync(&vcpu
->arch
.wdt_timer
);
1156 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1160 regs
->pc
= vcpu
->arch
.pc
;
1161 regs
->cr
= kvmppc_get_cr(vcpu
);
1162 regs
->ctr
= vcpu
->arch
.ctr
;
1163 regs
->lr
= vcpu
->arch
.lr
;
1164 regs
->xer
= kvmppc_get_xer(vcpu
);
1165 regs
->msr
= vcpu
->arch
.shared
->msr
;
1166 regs
->srr0
= vcpu
->arch
.shared
->srr0
;
1167 regs
->srr1
= vcpu
->arch
.shared
->srr1
;
1168 regs
->pid
= vcpu
->arch
.pid
;
1169 regs
->sprg0
= vcpu
->arch
.shared
->sprg0
;
1170 regs
->sprg1
= vcpu
->arch
.shared
->sprg1
;
1171 regs
->sprg2
= vcpu
->arch
.shared
->sprg2
;
1172 regs
->sprg3
= vcpu
->arch
.shared
->sprg3
;
1173 regs
->sprg4
= vcpu
->arch
.shared
->sprg4
;
1174 regs
->sprg5
= vcpu
->arch
.shared
->sprg5
;
1175 regs
->sprg6
= vcpu
->arch
.shared
->sprg6
;
1176 regs
->sprg7
= vcpu
->arch
.shared
->sprg7
;
1178 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1179 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
1184 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1188 vcpu
->arch
.pc
= regs
->pc
;
1189 kvmppc_set_cr(vcpu
, regs
->cr
);
1190 vcpu
->arch
.ctr
= regs
->ctr
;
1191 vcpu
->arch
.lr
= regs
->lr
;
1192 kvmppc_set_xer(vcpu
, regs
->xer
);
1193 kvmppc_set_msr(vcpu
, regs
->msr
);
1194 vcpu
->arch
.shared
->srr0
= regs
->srr0
;
1195 vcpu
->arch
.shared
->srr1
= regs
->srr1
;
1196 kvmppc_set_pid(vcpu
, regs
->pid
);
1197 vcpu
->arch
.shared
->sprg0
= regs
->sprg0
;
1198 vcpu
->arch
.shared
->sprg1
= regs
->sprg1
;
1199 vcpu
->arch
.shared
->sprg2
= regs
->sprg2
;
1200 vcpu
->arch
.shared
->sprg3
= regs
->sprg3
;
1201 vcpu
->arch
.shared
->sprg4
= regs
->sprg4
;
1202 vcpu
->arch
.shared
->sprg5
= regs
->sprg5
;
1203 vcpu
->arch
.shared
->sprg6
= regs
->sprg6
;
1204 vcpu
->arch
.shared
->sprg7
= regs
->sprg7
;
1206 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
1207 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
1212 static void get_sregs_base(struct kvm_vcpu
*vcpu
,
1213 struct kvm_sregs
*sregs
)
1217 sregs
->u
.e
.features
|= KVM_SREGS_E_BASE
;
1219 sregs
->u
.e
.csrr0
= vcpu
->arch
.csrr0
;
1220 sregs
->u
.e
.csrr1
= vcpu
->arch
.csrr1
;
1221 sregs
->u
.e
.mcsr
= vcpu
->arch
.mcsr
;
1222 sregs
->u
.e
.esr
= get_guest_esr(vcpu
);
1223 sregs
->u
.e
.dear
= get_guest_dear(vcpu
);
1224 sregs
->u
.e
.tsr
= vcpu
->arch
.tsr
;
1225 sregs
->u
.e
.tcr
= vcpu
->arch
.tcr
;
1226 sregs
->u
.e
.dec
= kvmppc_get_dec(vcpu
, tb
);
1228 sregs
->u
.e
.vrsave
= vcpu
->arch
.vrsave
;
1231 static int set_sregs_base(struct kvm_vcpu
*vcpu
,
1232 struct kvm_sregs
*sregs
)
1234 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_BASE
))
1237 vcpu
->arch
.csrr0
= sregs
->u
.e
.csrr0
;
1238 vcpu
->arch
.csrr1
= sregs
->u
.e
.csrr1
;
1239 vcpu
->arch
.mcsr
= sregs
->u
.e
.mcsr
;
1240 set_guest_esr(vcpu
, sregs
->u
.e
.esr
);
1241 set_guest_dear(vcpu
, sregs
->u
.e
.dear
);
1242 vcpu
->arch
.vrsave
= sregs
->u
.e
.vrsave
;
1243 kvmppc_set_tcr(vcpu
, sregs
->u
.e
.tcr
);
1245 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_DEC
) {
1246 vcpu
->arch
.dec
= sregs
->u
.e
.dec
;
1247 kvmppc_emulate_dec(vcpu
);
1250 if (sregs
->u
.e
.update_special
& KVM_SREGS_E_UPDATE_TSR
) {
1251 u32 old_tsr
= vcpu
->arch
.tsr
;
1253 vcpu
->arch
.tsr
= sregs
->u
.e
.tsr
;
1255 if ((old_tsr
^ vcpu
->arch
.tsr
) & (TSR_ENW
| TSR_WIS
))
1256 arm_next_watchdog(vcpu
);
1258 update_timer_ints(vcpu
);
1264 static void get_sregs_arch206(struct kvm_vcpu
*vcpu
,
1265 struct kvm_sregs
*sregs
)
1267 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206
;
1269 sregs
->u
.e
.pir
= vcpu
->vcpu_id
;
1270 sregs
->u
.e
.mcsrr0
= vcpu
->arch
.mcsrr0
;
1271 sregs
->u
.e
.mcsrr1
= vcpu
->arch
.mcsrr1
;
1272 sregs
->u
.e
.decar
= vcpu
->arch
.decar
;
1273 sregs
->u
.e
.ivpr
= vcpu
->arch
.ivpr
;
1276 static int set_sregs_arch206(struct kvm_vcpu
*vcpu
,
1277 struct kvm_sregs
*sregs
)
1279 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206
))
1282 if (sregs
->u
.e
.pir
!= vcpu
->vcpu_id
)
1285 vcpu
->arch
.mcsrr0
= sregs
->u
.e
.mcsrr0
;
1286 vcpu
->arch
.mcsrr1
= sregs
->u
.e
.mcsrr1
;
1287 vcpu
->arch
.decar
= sregs
->u
.e
.decar
;
1288 vcpu
->arch
.ivpr
= sregs
->u
.e
.ivpr
;
1293 void kvmppc_get_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1295 sregs
->u
.e
.features
|= KVM_SREGS_E_IVOR
;
1297 sregs
->u
.e
.ivor_low
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
];
1298 sregs
->u
.e
.ivor_low
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
];
1299 sregs
->u
.e
.ivor_low
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
];
1300 sregs
->u
.e
.ivor_low
[3] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
];
1301 sregs
->u
.e
.ivor_low
[4] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
];
1302 sregs
->u
.e
.ivor_low
[5] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
];
1303 sregs
->u
.e
.ivor_low
[6] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
];
1304 sregs
->u
.e
.ivor_low
[7] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
];
1305 sregs
->u
.e
.ivor_low
[8] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
];
1306 sregs
->u
.e
.ivor_low
[9] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
];
1307 sregs
->u
.e
.ivor_low
[10] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
];
1308 sregs
->u
.e
.ivor_low
[11] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
];
1309 sregs
->u
.e
.ivor_low
[12] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
];
1310 sregs
->u
.e
.ivor_low
[13] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
];
1311 sregs
->u
.e
.ivor_low
[14] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
];
1312 sregs
->u
.e
.ivor_low
[15] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
];
1315 int kvmppc_set_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1317 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
1320 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_CRITICAL
] = sregs
->u
.e
.ivor_low
[0];
1321 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_MACHINE_CHECK
] = sregs
->u
.e
.ivor_low
[1];
1322 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
] = sregs
->u
.e
.ivor_low
[2];
1323 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_INST_STORAGE
] = sregs
->u
.e
.ivor_low
[3];
1324 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_EXTERNAL
] = sregs
->u
.e
.ivor_low
[4];
1325 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ALIGNMENT
] = sregs
->u
.e
.ivor_low
[5];
1326 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PROGRAM
] = sregs
->u
.e
.ivor_low
[6];
1327 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[7];
1328 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
] = sregs
->u
.e
.ivor_low
[8];
1329 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_AP_UNAVAIL
] = sregs
->u
.e
.ivor_low
[9];
1330 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DECREMENTER
] = sregs
->u
.e
.ivor_low
[10];
1331 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_FIT
] = sregs
->u
.e
.ivor_low
[11];
1332 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_WATCHDOG
] = sregs
->u
.e
.ivor_low
[12];
1333 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DTLB_MISS
] = sregs
->u
.e
.ivor_low
[13];
1334 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_ITLB_MISS
] = sregs
->u
.e
.ivor_low
[14];
1335 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DEBUG
] = sregs
->u
.e
.ivor_low
[15];
1340 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1341 struct kvm_sregs
*sregs
)
1343 sregs
->pvr
= vcpu
->arch
.pvr
;
1345 get_sregs_base(vcpu
, sregs
);
1346 get_sregs_arch206(vcpu
, sregs
);
1347 kvmppc_core_get_sregs(vcpu
, sregs
);
1351 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1352 struct kvm_sregs
*sregs
)
1356 if (vcpu
->arch
.pvr
!= sregs
->pvr
)
1359 ret
= set_sregs_base(vcpu
, sregs
);
1363 ret
= set_sregs_arch206(vcpu
, sregs
);
1367 return kvmppc_core_set_sregs(vcpu
, sregs
);
1370 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1375 case KVM_REG_PPC_IAC1
:
1376 case KVM_REG_PPC_IAC2
:
1377 case KVM_REG_PPC_IAC3
:
1378 case KVM_REG_PPC_IAC4
: {
1379 int iac
= reg
->id
- KVM_REG_PPC_IAC1
;
1380 r
= copy_to_user((u64 __user
*)(long)reg
->addr
,
1381 &vcpu
->arch
.dbg_reg
.iac
[iac
], sizeof(u64
));
1384 case KVM_REG_PPC_DAC1
:
1385 case KVM_REG_PPC_DAC2
: {
1386 int dac
= reg
->id
- KVM_REG_PPC_DAC1
;
1387 r
= copy_to_user((u64 __user
*)(long)reg
->addr
,
1388 &vcpu
->arch
.dbg_reg
.dac
[dac
], sizeof(u64
));
1391 #if defined(CONFIG_64BIT)
1392 case KVM_REG_PPC_EPCR
:
1393 r
= put_user(vcpu
->arch
.epcr
, (u32 __user
*)(long)reg
->addr
);
1402 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1407 case KVM_REG_PPC_IAC1
:
1408 case KVM_REG_PPC_IAC2
:
1409 case KVM_REG_PPC_IAC3
:
1410 case KVM_REG_PPC_IAC4
: {
1411 int iac
= reg
->id
- KVM_REG_PPC_IAC1
;
1412 r
= copy_from_user(&vcpu
->arch
.dbg_reg
.iac
[iac
],
1413 (u64 __user
*)(long)reg
->addr
, sizeof(u64
));
1416 case KVM_REG_PPC_DAC1
:
1417 case KVM_REG_PPC_DAC2
: {
1418 int dac
= reg
->id
- KVM_REG_PPC_DAC1
;
1419 r
= copy_from_user(&vcpu
->arch
.dbg_reg
.dac
[dac
],
1420 (u64 __user
*)(long)reg
->addr
, sizeof(u64
));
1423 #if defined(CONFIG_64BIT)
1424 case KVM_REG_PPC_EPCR
: {
1426 r
= get_user(new_epcr
, (u32 __user
*)(long)reg
->addr
);
1428 kvmppc_set_epcr(vcpu
, new_epcr
);
1438 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1443 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1448 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1449 struct kvm_translation
*tr
)
1453 r
= kvmppc_core_vcpu_translate(vcpu
, tr
);
1457 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1462 void kvmppc_core_free_memslot(struct kvm_memory_slot
*free
,
1463 struct kvm_memory_slot
*dont
)
1467 int kvmppc_core_create_memslot(struct kvm_memory_slot
*slot
,
1468 unsigned long npages
)
1473 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1474 struct kvm_memory_slot
*memslot
,
1475 struct kvm_userspace_memory_region
*mem
)
1480 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1481 struct kvm_userspace_memory_region
*mem
,
1482 struct kvm_memory_slot old
)
1486 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
1490 void kvmppc_set_epcr(struct kvm_vcpu
*vcpu
, u32 new_epcr
)
1492 #if defined(CONFIG_64BIT)
1493 vcpu
->arch
.epcr
= new_epcr
;
1494 #ifdef CONFIG_KVM_BOOKE_HV
1495 vcpu
->arch
.shadow_epcr
&= ~SPRN_EPCR_GICM
;
1496 if (vcpu
->arch
.epcr
& SPRN_EPCR_ICM
)
1497 vcpu
->arch
.shadow_epcr
|= SPRN_EPCR_GICM
;
1502 void kvmppc_set_tcr(struct kvm_vcpu
*vcpu
, u32 new_tcr
)
1504 vcpu
->arch
.tcr
= new_tcr
;
1505 arm_next_watchdog(vcpu
);
1506 update_timer_ints(vcpu
);
1509 void kvmppc_set_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1511 set_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1513 kvm_make_request(KVM_REQ_PENDING_TIMER
, vcpu
);
1514 kvm_vcpu_kick(vcpu
);
1517 void kvmppc_clr_tsr_bits(struct kvm_vcpu
*vcpu
, u32 tsr_bits
)
1519 clear_bits(tsr_bits
, &vcpu
->arch
.tsr
);
1522 * We may have stopped the watchdog due to
1523 * being stuck on final expiration.
1525 if (tsr_bits
& (TSR_ENW
| TSR_WIS
))
1526 arm_next_watchdog(vcpu
);
1528 update_timer_ints(vcpu
);
1531 void kvmppc_decrementer_func(unsigned long data
)
1533 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
1535 if (vcpu
->arch
.tcr
& TCR_ARE
) {
1536 vcpu
->arch
.dec
= vcpu
->arch
.decar
;
1537 kvmppc_emulate_dec(vcpu
);
1540 kvmppc_set_tsr_bits(vcpu
, TSR_DIS
);
1543 void kvmppc_booke_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1545 vcpu
->cpu
= smp_processor_id();
1546 current
->thread
.kvm_vcpu
= vcpu
;
1549 void kvmppc_booke_vcpu_put(struct kvm_vcpu
*vcpu
)
1551 current
->thread
.kvm_vcpu
= NULL
;
1555 int __init
kvmppc_booke_init(void)
1557 #ifndef CONFIG_KVM_BOOKE_HV
1558 unsigned long ivor
[16];
1559 unsigned long max_ivor
= 0;
1562 /* We install our own exception handlers by hijacking IVPR. IVPR must
1563 * be 16-bit aligned, so we need a 64KB allocation. */
1564 kvmppc_booke_handlers
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1566 if (!kvmppc_booke_handlers
)
1569 /* XXX make sure our handlers are smaller than Linux's */
1571 /* Copy our interrupt handlers to match host IVORs. That way we don't
1572 * have to swap the IVORs on every guest/host transition. */
1573 ivor
[0] = mfspr(SPRN_IVOR0
);
1574 ivor
[1] = mfspr(SPRN_IVOR1
);
1575 ivor
[2] = mfspr(SPRN_IVOR2
);
1576 ivor
[3] = mfspr(SPRN_IVOR3
);
1577 ivor
[4] = mfspr(SPRN_IVOR4
);
1578 ivor
[5] = mfspr(SPRN_IVOR5
);
1579 ivor
[6] = mfspr(SPRN_IVOR6
);
1580 ivor
[7] = mfspr(SPRN_IVOR7
);
1581 ivor
[8] = mfspr(SPRN_IVOR8
);
1582 ivor
[9] = mfspr(SPRN_IVOR9
);
1583 ivor
[10] = mfspr(SPRN_IVOR10
);
1584 ivor
[11] = mfspr(SPRN_IVOR11
);
1585 ivor
[12] = mfspr(SPRN_IVOR12
);
1586 ivor
[13] = mfspr(SPRN_IVOR13
);
1587 ivor
[14] = mfspr(SPRN_IVOR14
);
1588 ivor
[15] = mfspr(SPRN_IVOR15
);
1590 for (i
= 0; i
< 16; i
++) {
1591 if (ivor
[i
] > max_ivor
)
1594 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
1595 kvmppc_handlers_start
+ i
* kvmppc_handler_len
,
1596 kvmppc_handler_len
);
1598 flush_icache_range(kvmppc_booke_handlers
,
1599 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
1600 #endif /* !BOOKE_HV */
1604 void __exit
kvmppc_booke_exit(void)
1606 free_pages(kvmppc_booke_handlers
, VCPU_SIZE_ORDER
);