1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
7 * Kevin Wolf <mail@kevin-wolf.de>
10 * This file is derived from arch/powerpc/kvm/44x.c,
11 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 #include <linux/kvm_host.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/miscdevice.h>
20 #include <linux/gfp.h>
21 #include <linux/sched.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
26 #include <asm/cputable.h>
27 #include <asm/cacheflush.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
39 #define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
40 #define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
42 /* #define EXIT_DEBUG */
44 struct kvm_stats_debugfs_item debugfs_entries
[] = {
45 { "exits", VCPU_STAT(sum_exits
) },
46 { "mmio", VCPU_STAT(mmio_exits
) },
47 { "sig", VCPU_STAT(signal_exits
) },
48 { "sysc", VCPU_STAT(syscall_exits
) },
49 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
50 { "dec", VCPU_STAT(dec_exits
) },
51 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
52 { "queue_intr", VCPU_STAT(queue_intr
) },
53 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns
) },
54 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns
) },
55 { "halt_wait_ns", VCPU_STAT(halt_wait_ns
) },
56 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), },
57 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), },
58 { "halt_successful_wait", VCPU_STAT(halt_successful_wait
) },
59 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
) },
60 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
61 { "pf_storage", VCPU_STAT(pf_storage
) },
62 { "sp_storage", VCPU_STAT(sp_storage
) },
63 { "pf_instruc", VCPU_STAT(pf_instruc
) },
64 { "sp_instruc", VCPU_STAT(sp_instruc
) },
65 { "ld", VCPU_STAT(ld
) },
66 { "ld_slow", VCPU_STAT(ld_slow
) },
67 { "st", VCPU_STAT(st
) },
68 { "st_slow", VCPU_STAT(st_slow
) },
69 { "pthru_all", VCPU_STAT(pthru_all
) },
70 { "pthru_host", VCPU_STAT(pthru_host
) },
71 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff
) },
72 { "largepages_2M", VM_STAT(num_2M_pages
, .mode
= 0444) },
73 { "largepages_1G", VM_STAT(num_1G_pages
, .mode
= 0444) },
77 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
78 unsigned long pending_now
, unsigned long old_pending
)
80 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
83 kvmppc_set_int_pending(vcpu
, 1);
85 kvmppc_set_int_pending(vcpu
, 0);
88 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
94 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
97 crit_raw
= kvmppc_get_critical(vcpu
);
98 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
100 /* Truncate crit indicators in 32 bit mode */
101 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
102 crit_raw
&= 0xffffffff;
103 crit_r1
&= 0xffffffff;
106 /* Critical section when crit == r1 */
107 crit
= (crit_raw
== crit_r1
);
108 /* ... and we're in supervisor mode */
109 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
114 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
116 vcpu
->kvm
->arch
.kvm_ops
->inject_interrupt(vcpu
, vec
, flags
);
119 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
124 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
125 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
126 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
127 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
128 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
129 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
130 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
131 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
132 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
133 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
134 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
135 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
136 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
137 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
138 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
139 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
140 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
146 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
149 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
151 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
152 &vcpu
->arch
.pending_exceptions
);
154 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
158 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
160 vcpu
->stat
.queue_intr
++;
162 set_bit(kvmppc_book3s_vec2irqprio(vec
),
163 &vcpu
->arch
.pending_exceptions
);
165 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
168 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
170 void kvmppc_core_queue_machine_check(struct kvm_vcpu
*vcpu
, ulong flags
)
172 /* might as well deliver this straight away */
173 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_MACHINE_CHECK
, flags
);
175 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check
);
177 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
179 /* might as well deliver this straight away */
180 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
182 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
184 void kvmppc_core_queue_fpunavail(struct kvm_vcpu
*vcpu
)
186 /* might as well deliver this straight away */
187 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, 0);
190 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu
*vcpu
)
192 /* might as well deliver this straight away */
193 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_ALTIVEC
, 0);
196 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu
*vcpu
)
198 /* might as well deliver this straight away */
199 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_VSX
, 0);
202 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
204 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
206 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
208 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
210 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
212 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
214 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
216 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
218 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
220 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
221 struct kvm_interrupt
*irq
)
224 * This case (KVM_INTERRUPT_SET) should never actually arise for
225 * a pseries guest (because pseries guests expect their interrupt
226 * controllers to continue asserting an external interrupt request
227 * until it is acknowledged at the interrupt controller), but is
228 * included to avoid ABI breakage and potentially for other
231 * There is a subtlety here: HV KVM does not test the
232 * external_oneshot flag in the code that synthesizes
233 * external interrupts for the guest just before entering
234 * the guest. That is OK even if userspace did do a
235 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
236 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
237 * which ends up doing a smp_send_reschedule(), which will
238 * pull the guest all the way out to the host, meaning that
239 * we will call kvmppc_core_prepare_to_enter() before entering
240 * the guest again, and that will handle the external_oneshot
243 if (irq
->irq
== KVM_INTERRUPT_SET
)
244 vcpu
->arch
.external_oneshot
= 1;
246 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
249 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
251 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
254 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
257 kvmppc_set_dar(vcpu
, dar
);
258 kvmppc_set_dsisr(vcpu
, flags
);
259 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
, 0);
261 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage
);
263 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
265 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
, flags
);
267 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage
);
269 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
,
270 unsigned int priority
)
274 bool crit
= kvmppc_critical_section(vcpu
);
277 case BOOK3S_IRQPRIO_DECREMENTER
:
278 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
279 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
281 case BOOK3S_IRQPRIO_EXTERNAL
:
282 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
283 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
285 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
286 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
288 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
289 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
291 case BOOK3S_IRQPRIO_DATA_STORAGE
:
292 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
294 case BOOK3S_IRQPRIO_INST_STORAGE
:
295 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
297 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
298 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
300 case BOOK3S_IRQPRIO_INST_SEGMENT
:
301 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
303 case BOOK3S_IRQPRIO_ALIGNMENT
:
304 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
306 case BOOK3S_IRQPRIO_PROGRAM
:
307 vec
= BOOK3S_INTERRUPT_PROGRAM
;
309 case BOOK3S_IRQPRIO_VSX
:
310 vec
= BOOK3S_INTERRUPT_VSX
;
312 case BOOK3S_IRQPRIO_ALTIVEC
:
313 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
315 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
316 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
318 case BOOK3S_IRQPRIO_SYSCALL
:
319 vec
= BOOK3S_INTERRUPT_SYSCALL
;
321 case BOOK3S_IRQPRIO_DEBUG
:
322 vec
= BOOK3S_INTERRUPT_TRACE
;
324 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
325 vec
= BOOK3S_INTERRUPT_PERFMON
;
327 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
328 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
332 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
337 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
341 kvmppc_inject_interrupt(vcpu
, vec
, 0);
347 * This function determines if an irqprio should be cleared once issued.
349 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
352 case BOOK3S_IRQPRIO_DECREMENTER
:
353 /* DEC interrupts get cleared by mtdec */
355 case BOOK3S_IRQPRIO_EXTERNAL
:
357 * External interrupts get cleared by userspace
358 * except when set by the KVM_INTERRUPT ioctl with
359 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
361 if (vcpu
->arch
.external_oneshot
) {
362 vcpu
->arch
.external_oneshot
= 0;
371 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
373 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
374 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
375 unsigned int priority
;
378 if (vcpu
->arch
.pending_exceptions
)
379 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
381 priority
= __ffs(*pending
);
382 while (priority
< BOOK3S_IRQPRIO_MAX
) {
383 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
384 clear_irqprio(vcpu
, priority
)) {
385 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
389 priority
= find_next_bit(pending
,
390 BITS_PER_BYTE
* sizeof(*pending
),
394 /* Tell the guest about our interrupt status */
395 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
399 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
401 kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
404 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
405 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
407 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
408 mp_pa
= (uint32_t)mp_pa
;
410 /* Magic page override */
412 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
413 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
416 pfn
= (kvm_pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
417 get_page(pfn_to_page(pfn
));
423 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
425 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
427 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
428 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
430 bool data
= (xlid
== XLATE_DATA
);
431 bool iswrite
= (xlrw
== XLATE_WRITE
);
432 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
436 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
439 pte
->raddr
= eaddr
& KVM_PAM
;
440 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
441 pte
->may_read
= true;
442 pte
->may_write
= true;
443 pte
->may_execute
= true;
446 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
448 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
449 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
450 pte
->raddr
&= ~SPLIT_HACK_MASK
;
457 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
,
458 enum instruction_fetch_type type
, u32
*inst
)
460 ulong pc
= kvmppc_get_pc(vcpu
);
466 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
467 if (r
== EMULATE_DONE
)
470 return EMULATE_AGAIN
;
472 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
474 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
479 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
483 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
484 struct kvm_sregs
*sregs
)
489 ret
= vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
495 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
496 struct kvm_sregs
*sregs
)
501 ret
= vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
507 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
511 regs
->pc
= kvmppc_get_pc(vcpu
);
512 regs
->cr
= kvmppc_get_cr(vcpu
);
513 regs
->ctr
= kvmppc_get_ctr(vcpu
);
514 regs
->lr
= kvmppc_get_lr(vcpu
);
515 regs
->xer
= kvmppc_get_xer(vcpu
);
516 regs
->msr
= kvmppc_get_msr(vcpu
);
517 regs
->srr0
= kvmppc_get_srr0(vcpu
);
518 regs
->srr1
= kvmppc_get_srr1(vcpu
);
519 regs
->pid
= vcpu
->arch
.pid
;
520 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
521 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
522 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
523 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
524 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
525 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
526 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
527 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
529 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
530 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
535 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
539 kvmppc_set_pc(vcpu
, regs
->pc
);
540 kvmppc_set_cr(vcpu
, regs
->cr
);
541 kvmppc_set_ctr(vcpu
, regs
->ctr
);
542 kvmppc_set_lr(vcpu
, regs
->lr
);
543 kvmppc_set_xer(vcpu
, regs
->xer
);
544 kvmppc_set_msr(vcpu
, regs
->msr
);
545 kvmppc_set_srr0(vcpu
, regs
->srr0
);
546 kvmppc_set_srr1(vcpu
, regs
->srr1
);
547 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
548 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
549 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
550 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
551 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
552 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
553 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
554 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
556 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
557 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
562 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
567 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
572 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
573 union kvmppc_one_reg
*val
)
578 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, id
, val
);
582 case KVM_REG_PPC_DAR
:
583 *val
= get_reg_val(id
, kvmppc_get_dar(vcpu
));
585 case KVM_REG_PPC_DSISR
:
586 *val
= get_reg_val(id
, kvmppc_get_dsisr(vcpu
));
588 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
589 i
= id
- KVM_REG_PPC_FPR0
;
590 *val
= get_reg_val(id
, VCPU_FPR(vcpu
, i
));
592 case KVM_REG_PPC_FPSCR
:
593 *val
= get_reg_val(id
, vcpu
->arch
.fp
.fpscr
);
596 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
597 if (cpu_has_feature(CPU_FTR_VSX
)) {
598 i
= id
- KVM_REG_PPC_VSR0
;
599 val
->vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
600 val
->vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
605 #endif /* CONFIG_VSX */
606 case KVM_REG_PPC_DEBUG_INST
:
607 *val
= get_reg_val(id
, INS_TW
);
609 #ifdef CONFIG_KVM_XICS
610 case KVM_REG_PPC_ICP_STATE
:
611 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
616 *val
= get_reg_val(id
, kvmppc_xive_get_icp(vcpu
));
618 *val
= get_reg_val(id
, kvmppc_xics_get_icp(vcpu
));
620 #endif /* CONFIG_KVM_XICS */
621 #ifdef CONFIG_KVM_XIVE
622 case KVM_REG_PPC_VP_STATE
:
623 if (!vcpu
->arch
.xive_vcpu
) {
628 r
= kvmppc_xive_native_get_vp(vcpu
, val
);
632 #endif /* CONFIG_KVM_XIVE */
633 case KVM_REG_PPC_FSCR
:
634 *val
= get_reg_val(id
, vcpu
->arch
.fscr
);
636 case KVM_REG_PPC_TAR
:
637 *val
= get_reg_val(id
, vcpu
->arch
.tar
);
639 case KVM_REG_PPC_EBBHR
:
640 *val
= get_reg_val(id
, vcpu
->arch
.ebbhr
);
642 case KVM_REG_PPC_EBBRR
:
643 *val
= get_reg_val(id
, vcpu
->arch
.ebbrr
);
645 case KVM_REG_PPC_BESCR
:
646 *val
= get_reg_val(id
, vcpu
->arch
.bescr
);
649 *val
= get_reg_val(id
, vcpu
->arch
.ic
);
660 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
661 union kvmppc_one_reg
*val
)
666 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, id
, val
);
670 case KVM_REG_PPC_DAR
:
671 kvmppc_set_dar(vcpu
, set_reg_val(id
, *val
));
673 case KVM_REG_PPC_DSISR
:
674 kvmppc_set_dsisr(vcpu
, set_reg_val(id
, *val
));
676 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
677 i
= id
- KVM_REG_PPC_FPR0
;
678 VCPU_FPR(vcpu
, i
) = set_reg_val(id
, *val
);
680 case KVM_REG_PPC_FPSCR
:
681 vcpu
->arch
.fp
.fpscr
= set_reg_val(id
, *val
);
684 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
685 if (cpu_has_feature(CPU_FTR_VSX
)) {
686 i
= id
- KVM_REG_PPC_VSR0
;
687 vcpu
->arch
.fp
.fpr
[i
][0] = val
->vsxval
[0];
688 vcpu
->arch
.fp
.fpr
[i
][1] = val
->vsxval
[1];
693 #endif /* CONFIG_VSX */
694 #ifdef CONFIG_KVM_XICS
695 case KVM_REG_PPC_ICP_STATE
:
696 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
701 r
= kvmppc_xive_set_icp(vcpu
, set_reg_val(id
, *val
));
703 r
= kvmppc_xics_set_icp(vcpu
, set_reg_val(id
, *val
));
705 #endif /* CONFIG_KVM_XICS */
706 #ifdef CONFIG_KVM_XIVE
707 case KVM_REG_PPC_VP_STATE
:
708 if (!vcpu
->arch
.xive_vcpu
) {
713 r
= kvmppc_xive_native_set_vp(vcpu
, val
);
717 #endif /* CONFIG_KVM_XIVE */
718 case KVM_REG_PPC_FSCR
:
719 vcpu
->arch
.fscr
= set_reg_val(id
, *val
);
721 case KVM_REG_PPC_TAR
:
722 vcpu
->arch
.tar
= set_reg_val(id
, *val
);
724 case KVM_REG_PPC_EBBHR
:
725 vcpu
->arch
.ebbhr
= set_reg_val(id
, *val
);
727 case KVM_REG_PPC_EBBRR
:
728 vcpu
->arch
.ebbrr
= set_reg_val(id
, *val
);
730 case KVM_REG_PPC_BESCR
:
731 vcpu
->arch
.bescr
= set_reg_val(id
, *val
);
734 vcpu
->arch
.ic
= set_reg_val(id
, *val
);
745 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
747 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
750 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
752 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
755 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
757 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
759 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
761 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
763 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
766 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
767 struct kvm_translation
*tr
)
772 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
773 struct kvm_guest_debug
*dbg
)
776 vcpu
->guest_debug
= dbg
->control
;
781 void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
)
783 kvmppc_core_queue_dec(vcpu
);
787 int kvmppc_core_vcpu_create(struct kvm_vcpu
*vcpu
)
789 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_create(vcpu
);
792 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
794 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
797 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
799 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
802 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
804 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
807 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
808 struct kvm_memory_slot
*dont
)
810 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
813 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
814 unsigned long npages
)
816 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
819 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
821 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
824 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
825 struct kvm_memory_slot
*memslot
,
826 const struct kvm_userspace_memory_region
*mem
)
828 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
831 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
832 const struct kvm_userspace_memory_region
*mem
,
833 const struct kvm_memory_slot
*old
,
834 const struct kvm_memory_slot
*new,
835 enum kvm_mr_change change
)
837 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
, new, change
);
840 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
842 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
845 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
847 return kvm
->arch
.kvm_ops
->age_hva(kvm
, start
, end
);
850 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
852 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
855 int kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
857 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
861 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
863 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
866 int kvmppc_core_init_vm(struct kvm
*kvm
)
870 INIT_LIST_HEAD_RCU(&kvm
->arch
.spapr_tce_tables
);
871 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
872 mutex_init(&kvm
->arch
.rtas_token_lock
);
875 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
878 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
880 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
883 kvmppc_rtas_tokens_free(kvm
);
884 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
887 #ifdef CONFIG_KVM_XICS
889 * Free the XIVE devices which are not directly freed by the
890 * device 'release' method
892 kfree(kvm
->arch
.xive_devices
.native
);
893 kvm
->arch
.xive_devices
.native
= NULL
;
894 kfree(kvm
->arch
.xive_devices
.xics_on_xive
);
895 kvm
->arch
.xive_devices
.xics_on_xive
= NULL
;
896 #endif /* CONFIG_KVM_XICS */
899 int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
)
901 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
902 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
907 if (!is_power_of_2(size
) || (size
> sizeof(buf
)))
910 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
911 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
912 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
918 kvmppc_set_gpr(vcpu
, 4, *(u8
*)&buf
);
922 kvmppc_set_gpr(vcpu
, 4, be16_to_cpu(*(__be16
*)&buf
));
926 kvmppc_set_gpr(vcpu
, 4, be32_to_cpu(*(__be32
*)&buf
));
930 kvmppc_set_gpr(vcpu
, 4, be64_to_cpu(*(__be64
*)&buf
));
939 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load
);
941 int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
)
943 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
944 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
945 unsigned long val
= kvmppc_get_gpr(vcpu
, 6);
956 *(__be16
*)&buf
= cpu_to_be16(val
);
960 *(__be32
*)&buf
= cpu_to_be32(val
);
964 *(__be64
*)&buf
= cpu_to_be64(val
);
971 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
972 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
973 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
979 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store
);
981 int kvmppc_core_check_processor_compat(void)
984 * We always return 0 for book3s. We check
985 * for compatibility while loading the HV
991 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
993 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
996 #ifdef CONFIG_KVM_XICS
997 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1001 return kvmppc_xive_set_irq(kvm
, irq_source_id
, irq
, level
,
1004 return kvmppc_xics_set_irq(kvm
, irq_source_id
, irq
, level
,
1008 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
1009 struct kvm
*kvm
, int irq_source_id
,
1010 int level
, bool line_status
)
1012 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
1013 level
, line_status
);
1015 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1016 struct kvm
*kvm
, int irq_source_id
, int level
,
1019 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
1022 int kvm_irq_map_gsi(struct kvm
*kvm
,
1023 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1026 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1027 entries
->set
= kvmppc_book3s_set_irq
;
1028 entries
->irqchip
.irqchip
= 0;
1029 entries
->irqchip
.pin
= gsi
;
1033 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1038 #endif /* CONFIG_KVM_XICS */
1040 static int kvmppc_book3s_init(void)
1044 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1047 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1048 r
= kvmppc_book3s_init_pr();
1051 #ifdef CONFIG_KVM_XICS
1052 #ifdef CONFIG_KVM_XIVE
1053 if (xics_on_xive()) {
1054 kvmppc_xive_init_module();
1055 kvm_register_device_ops(&kvm_xive_ops
, KVM_DEV_TYPE_XICS
);
1056 if (kvmppc_xive_native_supported()) {
1057 kvmppc_xive_native_init_module();
1058 kvm_register_device_ops(&kvm_xive_native_ops
,
1063 kvm_register_device_ops(&kvm_xics_ops
, KVM_DEV_TYPE_XICS
);
1068 static void kvmppc_book3s_exit(void)
1070 #ifdef CONFIG_KVM_XICS
1071 if (xics_on_xive()) {
1072 kvmppc_xive_exit_module();
1073 kvmppc_xive_native_exit_module();
1076 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1077 kvmppc_book3s_exit_pr();
1082 module_init(kvmppc_book3s_init
);
1083 module_exit(kvmppc_book3s_exit
);
1085 /* On 32bit this is our one and only kernel module */
1086 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1087 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1088 MODULE_ALIAS("devname:kvm");