1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
7 * Kevin Wolf <mail@kevin-wolf.de>
10 * This file is derived from arch/powerpc/kvm/44x.c,
11 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 #include <linux/kvm_host.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/miscdevice.h>
20 #include <linux/gfp.h>
21 #include <linux/sched.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
26 #include <asm/cputable.h>
27 #include <asm/cacheflush.h>
28 #include <linux/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
39 /* #define EXIT_DEBUG */
41 struct kvm_stats_debugfs_item debugfs_entries
[] = {
42 VCPU_STAT("exits", sum_exits
),
43 VCPU_STAT("mmio", mmio_exits
),
44 VCPU_STAT("sig", signal_exits
),
45 VCPU_STAT("sysc", syscall_exits
),
46 VCPU_STAT("inst_emu", emulated_inst_exits
),
47 VCPU_STAT("dec", dec_exits
),
48 VCPU_STAT("ext_intr", ext_intr_exits
),
49 VCPU_STAT("queue_intr", queue_intr
),
50 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns
),
51 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns
),
52 VCPU_STAT("halt_wait_ns", halt_wait_ns
),
53 VCPU_STAT("halt_successful_poll", halt_successful_poll
),
54 VCPU_STAT("halt_attempted_poll", halt_attempted_poll
),
55 VCPU_STAT("halt_successful_wait", halt_successful_wait
),
56 VCPU_STAT("halt_poll_invalid", halt_poll_invalid
),
57 VCPU_STAT("halt_wakeup", halt_wakeup
),
58 VCPU_STAT("pf_storage", pf_storage
),
59 VCPU_STAT("sp_storage", sp_storage
),
60 VCPU_STAT("pf_instruc", pf_instruc
),
61 VCPU_STAT("sp_instruc", sp_instruc
),
63 VCPU_STAT("ld_slow", ld_slow
),
65 VCPU_STAT("st_slow", st_slow
),
66 VCPU_STAT("pthru_all", pthru_all
),
67 VCPU_STAT("pthru_host", pthru_host
),
68 VCPU_STAT("pthru_bad_aff", pthru_bad_aff
),
69 VM_STAT("largepages_2M", num_2M_pages
, .mode
= 0444),
70 VM_STAT("largepages_1G", num_1G_pages
, .mode
= 0444),
74 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
75 unsigned long pending_now
, unsigned long old_pending
)
77 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
80 kvmppc_set_int_pending(vcpu
, 1);
82 kvmppc_set_int_pending(vcpu
, 0);
85 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
91 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
94 crit_raw
= kvmppc_get_critical(vcpu
);
95 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
97 /* Truncate crit indicators in 32 bit mode */
98 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
99 crit_raw
&= 0xffffffff;
100 crit_r1
&= 0xffffffff;
103 /* Critical section when crit == r1 */
104 crit
= (crit_raw
== crit_r1
);
105 /* ... and we're in supervisor mode */
106 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
111 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
113 vcpu
->kvm
->arch
.kvm_ops
->inject_interrupt(vcpu
, vec
, flags
);
116 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
121 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
122 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
123 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
124 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
125 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
126 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
127 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
128 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
129 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
130 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
131 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
132 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
133 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
134 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
135 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
136 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
137 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
143 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
146 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
148 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
149 &vcpu
->arch
.pending_exceptions
);
151 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
155 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
157 vcpu
->stat
.queue_intr
++;
159 set_bit(kvmppc_book3s_vec2irqprio(vec
),
160 &vcpu
->arch
.pending_exceptions
);
162 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
165 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
167 void kvmppc_core_queue_machine_check(struct kvm_vcpu
*vcpu
, ulong flags
)
169 /* might as well deliver this straight away */
170 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_MACHINE_CHECK
, flags
);
172 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check
);
174 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
176 /* might as well deliver this straight away */
177 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
179 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
181 void kvmppc_core_queue_fpunavail(struct kvm_vcpu
*vcpu
)
183 /* might as well deliver this straight away */
184 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, 0);
187 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu
*vcpu
)
189 /* might as well deliver this straight away */
190 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_ALTIVEC
, 0);
193 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu
*vcpu
)
195 /* might as well deliver this straight away */
196 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_VSX
, 0);
199 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
201 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
203 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
205 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
207 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
209 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
211 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
213 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
215 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
217 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
218 struct kvm_interrupt
*irq
)
221 * This case (KVM_INTERRUPT_SET) should never actually arise for
222 * a pseries guest (because pseries guests expect their interrupt
223 * controllers to continue asserting an external interrupt request
224 * until it is acknowledged at the interrupt controller), but is
225 * included to avoid ABI breakage and potentially for other
228 * There is a subtlety here: HV KVM does not test the
229 * external_oneshot flag in the code that synthesizes
230 * external interrupts for the guest just before entering
231 * the guest. That is OK even if userspace did do a
232 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
233 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
234 * which ends up doing a smp_send_reschedule(), which will
235 * pull the guest all the way out to the host, meaning that
236 * we will call kvmppc_core_prepare_to_enter() before entering
237 * the guest again, and that will handle the external_oneshot
240 if (irq
->irq
== KVM_INTERRUPT_SET
)
241 vcpu
->arch
.external_oneshot
= 1;
243 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
246 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
248 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
251 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
254 kvmppc_set_dar(vcpu
, dar
);
255 kvmppc_set_dsisr(vcpu
, flags
);
256 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
, 0);
258 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage
);
260 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
262 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
, flags
);
264 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage
);
266 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
,
267 unsigned int priority
)
271 bool crit
= kvmppc_critical_section(vcpu
);
274 case BOOK3S_IRQPRIO_DECREMENTER
:
275 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
276 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
278 case BOOK3S_IRQPRIO_EXTERNAL
:
279 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
280 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
282 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
283 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
285 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
286 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
288 case BOOK3S_IRQPRIO_DATA_STORAGE
:
289 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
291 case BOOK3S_IRQPRIO_INST_STORAGE
:
292 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
294 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
295 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
297 case BOOK3S_IRQPRIO_INST_SEGMENT
:
298 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
300 case BOOK3S_IRQPRIO_ALIGNMENT
:
301 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
303 case BOOK3S_IRQPRIO_PROGRAM
:
304 vec
= BOOK3S_INTERRUPT_PROGRAM
;
306 case BOOK3S_IRQPRIO_VSX
:
307 vec
= BOOK3S_INTERRUPT_VSX
;
309 case BOOK3S_IRQPRIO_ALTIVEC
:
310 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
312 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
313 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
315 case BOOK3S_IRQPRIO_SYSCALL
:
316 vec
= BOOK3S_INTERRUPT_SYSCALL
;
318 case BOOK3S_IRQPRIO_DEBUG
:
319 vec
= BOOK3S_INTERRUPT_TRACE
;
321 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
322 vec
= BOOK3S_INTERRUPT_PERFMON
;
324 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
325 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
329 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
334 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
338 kvmppc_inject_interrupt(vcpu
, vec
, 0);
344 * This function determines if an irqprio should be cleared once issued.
346 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
349 case BOOK3S_IRQPRIO_DECREMENTER
:
350 /* DEC interrupts get cleared by mtdec */
352 case BOOK3S_IRQPRIO_EXTERNAL
:
354 * External interrupts get cleared by userspace
355 * except when set by the KVM_INTERRUPT ioctl with
356 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
358 if (vcpu
->arch
.external_oneshot
) {
359 vcpu
->arch
.external_oneshot
= 0;
368 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
370 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
371 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
372 unsigned int priority
;
375 if (vcpu
->arch
.pending_exceptions
)
376 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
378 priority
= __ffs(*pending
);
379 while (priority
< BOOK3S_IRQPRIO_MAX
) {
380 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
381 clear_irqprio(vcpu
, priority
)) {
382 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
386 priority
= find_next_bit(pending
,
387 BITS_PER_BYTE
* sizeof(*pending
),
391 /* Tell the guest about our interrupt status */
392 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
396 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
398 kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
401 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
402 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
404 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
405 mp_pa
= (uint32_t)mp_pa
;
407 /* Magic page override */
409 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
410 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
413 pfn
= (kvm_pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
414 get_page(pfn_to_page(pfn
));
420 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
422 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
424 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
425 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
427 bool data
= (xlid
== XLATE_DATA
);
428 bool iswrite
= (xlrw
== XLATE_WRITE
);
429 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
433 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
436 pte
->raddr
= eaddr
& KVM_PAM
;
437 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
438 pte
->may_read
= true;
439 pte
->may_write
= true;
440 pte
->may_execute
= true;
443 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
445 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
446 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
447 pte
->raddr
&= ~SPLIT_HACK_MASK
;
454 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
,
455 enum instruction_fetch_type type
, u32
*inst
)
457 ulong pc
= kvmppc_get_pc(vcpu
);
463 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
464 if (r
== EMULATE_DONE
)
467 return EMULATE_AGAIN
;
469 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
471 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
476 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
480 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
481 struct kvm_sregs
*sregs
)
486 ret
= vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
492 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
493 struct kvm_sregs
*sregs
)
498 ret
= vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
504 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
508 regs
->pc
= kvmppc_get_pc(vcpu
);
509 regs
->cr
= kvmppc_get_cr(vcpu
);
510 regs
->ctr
= kvmppc_get_ctr(vcpu
);
511 regs
->lr
= kvmppc_get_lr(vcpu
);
512 regs
->xer
= kvmppc_get_xer(vcpu
);
513 regs
->msr
= kvmppc_get_msr(vcpu
);
514 regs
->srr0
= kvmppc_get_srr0(vcpu
);
515 regs
->srr1
= kvmppc_get_srr1(vcpu
);
516 regs
->pid
= vcpu
->arch
.pid
;
517 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
518 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
519 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
520 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
521 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
522 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
523 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
524 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
526 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
527 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
532 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
536 kvmppc_set_pc(vcpu
, regs
->pc
);
537 kvmppc_set_cr(vcpu
, regs
->cr
);
538 kvmppc_set_ctr(vcpu
, regs
->ctr
);
539 kvmppc_set_lr(vcpu
, regs
->lr
);
540 kvmppc_set_xer(vcpu
, regs
->xer
);
541 kvmppc_set_msr(vcpu
, regs
->msr
);
542 kvmppc_set_srr0(vcpu
, regs
->srr0
);
543 kvmppc_set_srr1(vcpu
, regs
->srr1
);
544 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
545 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
546 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
547 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
548 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
549 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
550 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
551 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
553 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
554 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
559 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
564 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
569 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
570 union kvmppc_one_reg
*val
)
575 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, id
, val
);
579 case KVM_REG_PPC_DAR
:
580 *val
= get_reg_val(id
, kvmppc_get_dar(vcpu
));
582 case KVM_REG_PPC_DSISR
:
583 *val
= get_reg_val(id
, kvmppc_get_dsisr(vcpu
));
585 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
586 i
= id
- KVM_REG_PPC_FPR0
;
587 *val
= get_reg_val(id
, VCPU_FPR(vcpu
, i
));
589 case KVM_REG_PPC_FPSCR
:
590 *val
= get_reg_val(id
, vcpu
->arch
.fp
.fpscr
);
593 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
594 if (cpu_has_feature(CPU_FTR_VSX
)) {
595 i
= id
- KVM_REG_PPC_VSR0
;
596 val
->vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
597 val
->vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
602 #endif /* CONFIG_VSX */
603 case KVM_REG_PPC_DEBUG_INST
:
604 *val
= get_reg_val(id
, INS_TW
);
606 #ifdef CONFIG_KVM_XICS
607 case KVM_REG_PPC_ICP_STATE
:
608 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
613 *val
= get_reg_val(id
, kvmppc_xive_get_icp(vcpu
));
615 *val
= get_reg_val(id
, kvmppc_xics_get_icp(vcpu
));
617 #endif /* CONFIG_KVM_XICS */
618 #ifdef CONFIG_KVM_XIVE
619 case KVM_REG_PPC_VP_STATE
:
620 if (!vcpu
->arch
.xive_vcpu
) {
625 r
= kvmppc_xive_native_get_vp(vcpu
, val
);
629 #endif /* CONFIG_KVM_XIVE */
630 case KVM_REG_PPC_FSCR
:
631 *val
= get_reg_val(id
, vcpu
->arch
.fscr
);
633 case KVM_REG_PPC_TAR
:
634 *val
= get_reg_val(id
, vcpu
->arch
.tar
);
636 case KVM_REG_PPC_EBBHR
:
637 *val
= get_reg_val(id
, vcpu
->arch
.ebbhr
);
639 case KVM_REG_PPC_EBBRR
:
640 *val
= get_reg_val(id
, vcpu
->arch
.ebbrr
);
642 case KVM_REG_PPC_BESCR
:
643 *val
= get_reg_val(id
, vcpu
->arch
.bescr
);
646 *val
= get_reg_val(id
, vcpu
->arch
.ic
);
657 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
658 union kvmppc_one_reg
*val
)
663 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, id
, val
);
667 case KVM_REG_PPC_DAR
:
668 kvmppc_set_dar(vcpu
, set_reg_val(id
, *val
));
670 case KVM_REG_PPC_DSISR
:
671 kvmppc_set_dsisr(vcpu
, set_reg_val(id
, *val
));
673 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
674 i
= id
- KVM_REG_PPC_FPR0
;
675 VCPU_FPR(vcpu
, i
) = set_reg_val(id
, *val
);
677 case KVM_REG_PPC_FPSCR
:
678 vcpu
->arch
.fp
.fpscr
= set_reg_val(id
, *val
);
681 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
682 if (cpu_has_feature(CPU_FTR_VSX
)) {
683 i
= id
- KVM_REG_PPC_VSR0
;
684 vcpu
->arch
.fp
.fpr
[i
][0] = val
->vsxval
[0];
685 vcpu
->arch
.fp
.fpr
[i
][1] = val
->vsxval
[1];
690 #endif /* CONFIG_VSX */
691 #ifdef CONFIG_KVM_XICS
692 case KVM_REG_PPC_ICP_STATE
:
693 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
698 r
= kvmppc_xive_set_icp(vcpu
, set_reg_val(id
, *val
));
700 r
= kvmppc_xics_set_icp(vcpu
, set_reg_val(id
, *val
));
702 #endif /* CONFIG_KVM_XICS */
703 #ifdef CONFIG_KVM_XIVE
704 case KVM_REG_PPC_VP_STATE
:
705 if (!vcpu
->arch
.xive_vcpu
) {
710 r
= kvmppc_xive_native_set_vp(vcpu
, val
);
714 #endif /* CONFIG_KVM_XIVE */
715 case KVM_REG_PPC_FSCR
:
716 vcpu
->arch
.fscr
= set_reg_val(id
, *val
);
718 case KVM_REG_PPC_TAR
:
719 vcpu
->arch
.tar
= set_reg_val(id
, *val
);
721 case KVM_REG_PPC_EBBHR
:
722 vcpu
->arch
.ebbhr
= set_reg_val(id
, *val
);
724 case KVM_REG_PPC_EBBRR
:
725 vcpu
->arch
.ebbrr
= set_reg_val(id
, *val
);
727 case KVM_REG_PPC_BESCR
:
728 vcpu
->arch
.bescr
= set_reg_val(id
, *val
);
731 vcpu
->arch
.ic
= set_reg_val(id
, *val
);
742 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
744 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
747 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
749 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
752 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
754 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
756 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
758 int kvmppc_vcpu_run(struct kvm_vcpu
*vcpu
)
760 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(vcpu
);
763 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
764 struct kvm_translation
*tr
)
769 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
770 struct kvm_guest_debug
*dbg
)
773 vcpu
->guest_debug
= dbg
->control
;
778 void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
)
780 kvmppc_core_queue_dec(vcpu
);
784 int kvmppc_core_vcpu_create(struct kvm_vcpu
*vcpu
)
786 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_create(vcpu
);
789 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
791 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
794 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
796 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
799 void kvm_arch_sync_dirty_log(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
804 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
806 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
809 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
811 kvm
->arch
.kvm_ops
->free_memslot(slot
);
814 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
816 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
819 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
820 struct kvm_memory_slot
*memslot
,
821 const struct kvm_userspace_memory_region
*mem
,
822 enum kvm_mr_change change
)
824 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
,
828 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
829 const struct kvm_userspace_memory_region
*mem
,
830 const struct kvm_memory_slot
*old
,
831 const struct kvm_memory_slot
*new,
832 enum kvm_mr_change change
)
834 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
, new, change
);
837 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
,
840 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
843 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
845 return kvm
->arch
.kvm_ops
->age_hva(kvm
, start
, end
);
848 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
850 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
853 int kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
855 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
859 int kvmppc_core_init_vm(struct kvm
*kvm
)
863 INIT_LIST_HEAD_RCU(&kvm
->arch
.spapr_tce_tables
);
864 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
865 mutex_init(&kvm
->arch
.rtas_token_lock
);
868 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
871 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
873 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
876 kvmppc_rtas_tokens_free(kvm
);
877 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
880 #ifdef CONFIG_KVM_XICS
882 * Free the XIVE and XICS devices which are not directly freed by the
883 * device 'release' method
885 kfree(kvm
->arch
.xive_devices
.native
);
886 kvm
->arch
.xive_devices
.native
= NULL
;
887 kfree(kvm
->arch
.xive_devices
.xics_on_xive
);
888 kvm
->arch
.xive_devices
.xics_on_xive
= NULL
;
889 kfree(kvm
->arch
.xics_device
);
890 kvm
->arch
.xics_device
= NULL
;
891 #endif /* CONFIG_KVM_XICS */
894 int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
)
896 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
897 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
902 if (!is_power_of_2(size
) || (size
> sizeof(buf
)))
905 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
906 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
907 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
913 kvmppc_set_gpr(vcpu
, 4, *(u8
*)&buf
);
917 kvmppc_set_gpr(vcpu
, 4, be16_to_cpu(*(__be16
*)&buf
));
921 kvmppc_set_gpr(vcpu
, 4, be32_to_cpu(*(__be32
*)&buf
));
925 kvmppc_set_gpr(vcpu
, 4, be64_to_cpu(*(__be64
*)&buf
));
934 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load
);
936 int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
)
938 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
939 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
940 unsigned long val
= kvmppc_get_gpr(vcpu
, 6);
951 *(__be16
*)&buf
= cpu_to_be16(val
);
955 *(__be32
*)&buf
= cpu_to_be32(val
);
959 *(__be64
*)&buf
= cpu_to_be64(val
);
966 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
967 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
968 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
974 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store
);
976 int kvmppc_core_check_processor_compat(void)
979 * We always return 0 for book3s. We check
980 * for compatibility while loading the HV
986 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
988 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
991 #ifdef CONFIG_KVM_XICS
992 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
996 return kvmppc_xive_set_irq(kvm
, irq_source_id
, irq
, level
,
999 return kvmppc_xics_set_irq(kvm
, irq_source_id
, irq
, level
,
1003 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
1004 struct kvm
*kvm
, int irq_source_id
,
1005 int level
, bool line_status
)
1007 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
1008 level
, line_status
);
1010 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
1011 struct kvm
*kvm
, int irq_source_id
, int level
,
1014 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
1017 int kvm_irq_map_gsi(struct kvm
*kvm
,
1018 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1021 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1022 entries
->set
= kvmppc_book3s_set_irq
;
1023 entries
->irqchip
.irqchip
= 0;
1024 entries
->irqchip
.pin
= gsi
;
1028 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1033 #endif /* CONFIG_KVM_XICS */
1035 static int kvmppc_book3s_init(void)
1039 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1042 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1043 r
= kvmppc_book3s_init_pr();
1046 #ifdef CONFIG_KVM_XICS
1047 #ifdef CONFIG_KVM_XIVE
1048 if (xics_on_xive()) {
1049 kvmppc_xive_init_module();
1050 kvm_register_device_ops(&kvm_xive_ops
, KVM_DEV_TYPE_XICS
);
1051 if (kvmppc_xive_native_supported()) {
1052 kvmppc_xive_native_init_module();
1053 kvm_register_device_ops(&kvm_xive_native_ops
,
1058 kvm_register_device_ops(&kvm_xics_ops
, KVM_DEV_TYPE_XICS
);
1063 static void kvmppc_book3s_exit(void)
1065 #ifdef CONFIG_KVM_XICS
1066 if (xics_on_xive()) {
1067 kvmppc_xive_exit_module();
1068 kvmppc_xive_native_exit_module();
1071 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1072 kvmppc_book3s_exit_pr();
1077 module_init(kvmppc_book3s_init
);
1078 module_exit(kvmppc_book3s_exit
);
1080 /* On 32bit this is our one and only kernel module */
1081 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1082 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1083 MODULE_ALIAS("devname:kvm");