2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
29 #include <asm/cputable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <linux/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/mmu_context.h>
43 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45 /* #define EXIT_DEBUG */
47 struct kvm_stats_debugfs_item debugfs_entries
[] = {
48 { "exits", VCPU_STAT(sum_exits
) },
49 { "mmio", VCPU_STAT(mmio_exits
) },
50 { "sig", VCPU_STAT(signal_exits
) },
51 { "sysc", VCPU_STAT(syscall_exits
) },
52 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
53 { "dec", VCPU_STAT(dec_exits
) },
54 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
55 { "queue_intr", VCPU_STAT(queue_intr
) },
56 { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns
) },
57 { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns
) },
58 { "halt_wait_ns", VCPU_STAT(halt_wait_ns
) },
59 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), },
60 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), },
61 { "halt_successful_wait", VCPU_STAT(halt_successful_wait
) },
62 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
) },
63 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
64 { "pf_storage", VCPU_STAT(pf_storage
) },
65 { "sp_storage", VCPU_STAT(sp_storage
) },
66 { "pf_instruc", VCPU_STAT(pf_instruc
) },
67 { "sp_instruc", VCPU_STAT(sp_instruc
) },
68 { "ld", VCPU_STAT(ld
) },
69 { "ld_slow", VCPU_STAT(ld_slow
) },
70 { "st", VCPU_STAT(st
) },
71 { "st_slow", VCPU_STAT(st_slow
) },
72 { "pthru_all", VCPU_STAT(pthru_all
) },
73 { "pthru_host", VCPU_STAT(pthru_host
) },
74 { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff
) },
78 void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
)
80 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) {
81 ulong pc
= kvmppc_get_pc(vcpu
);
82 if ((pc
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
83 kvmppc_set_pc(vcpu
, pc
& ~SPLIT_HACK_MASK
);
84 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SPLIT_HACK
;
87 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real
);
89 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu
*vcpu
)
91 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
92 return to_book3s(vcpu
)->hior
;
96 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
97 unsigned long pending_now
, unsigned long old_pending
)
99 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
102 kvmppc_set_int_pending(vcpu
, 1);
103 else if (old_pending
)
104 kvmppc_set_int_pending(vcpu
, 0);
107 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
113 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
116 crit_raw
= kvmppc_get_critical(vcpu
);
117 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
119 /* Truncate crit indicators in 32 bit mode */
120 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
121 crit_raw
&= 0xffffffff;
122 crit_r1
&= 0xffffffff;
125 /* Critical section when crit == r1 */
126 crit
= (crit_raw
== crit_r1
);
127 /* ... and we're in supervisor mode */
128 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
133 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
135 kvmppc_unfixup_split_real(vcpu
);
136 kvmppc_set_srr0(vcpu
, kvmppc_get_pc(vcpu
));
137 kvmppc_set_srr1(vcpu
, kvmppc_get_msr(vcpu
) | flags
);
138 kvmppc_set_pc(vcpu
, kvmppc_interrupt_offset(vcpu
) + vec
);
139 vcpu
->arch
.mmu
.reset_msr(vcpu
);
142 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
147 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
148 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
149 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
150 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
151 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
152 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
153 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
154 case 0x501: prio
= BOOK3S_IRQPRIO_EXTERNAL_LEVEL
; break;
155 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
156 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
157 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
158 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
159 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
160 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
161 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
162 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
163 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
164 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
170 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
173 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
175 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
176 &vcpu
->arch
.pending_exceptions
);
178 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
182 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
184 vcpu
->stat
.queue_intr
++;
186 set_bit(kvmppc_book3s_vec2irqprio(vec
),
187 &vcpu
->arch
.pending_exceptions
);
189 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
192 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
194 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
196 /* might as well deliver this straight away */
197 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
199 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
201 void kvmppc_core_queue_fpunavail(struct kvm_vcpu
*vcpu
)
203 /* might as well deliver this straight away */
204 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, 0);
207 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu
*vcpu
)
209 /* might as well deliver this straight away */
210 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_ALTIVEC
, 0);
213 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu
*vcpu
)
215 /* might as well deliver this straight away */
216 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_VSX
, 0);
219 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
221 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
223 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
225 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
227 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
229 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
231 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
233 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
235 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
237 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
238 struct kvm_interrupt
*irq
)
240 unsigned int vec
= BOOK3S_INTERRUPT_EXTERNAL
;
242 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
243 vec
= BOOK3S_INTERRUPT_EXTERNAL_LEVEL
;
245 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
248 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
250 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
251 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
254 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
257 kvmppc_set_dar(vcpu
, dar
);
258 kvmppc_set_dsisr(vcpu
, flags
);
259 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
261 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage
); /* used by kvm_hv */
263 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
265 u64 msr
= kvmppc_get_msr(vcpu
);
266 msr
&= ~(SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
267 msr
|= flags
& (SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
268 kvmppc_set_msr_fast(vcpu
, msr
);
269 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
272 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
,
273 unsigned int priority
)
277 bool crit
= kvmppc_critical_section(vcpu
);
280 case BOOK3S_IRQPRIO_DECREMENTER
:
281 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
282 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
284 case BOOK3S_IRQPRIO_EXTERNAL
:
285 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
286 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
287 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
289 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
290 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
292 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
293 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
295 case BOOK3S_IRQPRIO_DATA_STORAGE
:
296 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
298 case BOOK3S_IRQPRIO_INST_STORAGE
:
299 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
301 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
302 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
304 case BOOK3S_IRQPRIO_INST_SEGMENT
:
305 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
307 case BOOK3S_IRQPRIO_ALIGNMENT
:
308 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
310 case BOOK3S_IRQPRIO_PROGRAM
:
311 vec
= BOOK3S_INTERRUPT_PROGRAM
;
313 case BOOK3S_IRQPRIO_VSX
:
314 vec
= BOOK3S_INTERRUPT_VSX
;
316 case BOOK3S_IRQPRIO_ALTIVEC
:
317 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
319 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
320 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
322 case BOOK3S_IRQPRIO_SYSCALL
:
323 vec
= BOOK3S_INTERRUPT_SYSCALL
;
325 case BOOK3S_IRQPRIO_DEBUG
:
326 vec
= BOOK3S_INTERRUPT_TRACE
;
328 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
329 vec
= BOOK3S_INTERRUPT_PERFMON
;
331 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
332 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
336 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
341 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
345 kvmppc_inject_interrupt(vcpu
, vec
, 0);
351 * This function determines if an irqprio should be cleared once issued.
353 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
356 case BOOK3S_IRQPRIO_DECREMENTER
:
357 /* DEC interrupts get cleared by mtdec */
359 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
360 /* External interrupts get cleared by userspace */
367 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
369 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
370 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
371 unsigned int priority
;
374 if (vcpu
->arch
.pending_exceptions
)
375 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
377 priority
= __ffs(*pending
);
378 while (priority
< BOOK3S_IRQPRIO_MAX
) {
379 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
380 clear_irqprio(vcpu
, priority
)) {
381 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
385 priority
= find_next_bit(pending
,
386 BITS_PER_BYTE
* sizeof(*pending
),
390 /* Tell the guest about our interrupt status */
391 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
395 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
397 kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
400 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
401 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
403 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
404 mp_pa
= (uint32_t)mp_pa
;
406 /* Magic page override */
408 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
409 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
412 pfn
= (kvm_pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
413 get_page(pfn_to_page(pfn
));
419 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
421 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
423 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
424 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
426 bool data
= (xlid
== XLATE_DATA
);
427 bool iswrite
= (xlrw
== XLATE_WRITE
);
428 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
432 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
435 pte
->raddr
= eaddr
& KVM_PAM
;
436 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
437 pte
->may_read
= true;
438 pte
->may_write
= true;
439 pte
->may_execute
= true;
442 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
444 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
445 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
446 pte
->raddr
&= ~SPLIT_HACK_MASK
;
453 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
, enum instruction_type type
,
456 ulong pc
= kvmppc_get_pc(vcpu
);
462 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
463 if (r
== EMULATE_DONE
)
466 return EMULATE_AGAIN
;
468 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
470 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
475 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
480 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
484 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
485 struct kvm_sregs
*sregs
)
490 ret
= vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
496 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
497 struct kvm_sregs
*sregs
)
502 ret
= vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
508 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
514 regs
->pc
= kvmppc_get_pc(vcpu
);
515 regs
->cr
= kvmppc_get_cr(vcpu
);
516 regs
->ctr
= kvmppc_get_ctr(vcpu
);
517 regs
->lr
= kvmppc_get_lr(vcpu
);
518 regs
->xer
= kvmppc_get_xer(vcpu
);
519 regs
->msr
= kvmppc_get_msr(vcpu
);
520 regs
->srr0
= kvmppc_get_srr0(vcpu
);
521 regs
->srr1
= kvmppc_get_srr1(vcpu
);
522 regs
->pid
= vcpu
->arch
.pid
;
523 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
524 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
525 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
526 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
527 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
528 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
529 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
530 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
532 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
533 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
539 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
545 kvmppc_set_pc(vcpu
, regs
->pc
);
546 kvmppc_set_cr(vcpu
, regs
->cr
);
547 kvmppc_set_ctr(vcpu
, regs
->ctr
);
548 kvmppc_set_lr(vcpu
, regs
->lr
);
549 kvmppc_set_xer(vcpu
, regs
->xer
);
550 kvmppc_set_msr(vcpu
, regs
->msr
);
551 kvmppc_set_srr0(vcpu
, regs
->srr0
);
552 kvmppc_set_srr1(vcpu
, regs
->srr1
);
553 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
554 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
555 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
556 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
557 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
558 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
559 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
560 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
562 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
563 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
569 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
574 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
579 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
580 union kvmppc_one_reg
*val
)
585 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, id
, val
);
589 case KVM_REG_PPC_DAR
:
590 *val
= get_reg_val(id
, kvmppc_get_dar(vcpu
));
592 case KVM_REG_PPC_DSISR
:
593 *val
= get_reg_val(id
, kvmppc_get_dsisr(vcpu
));
595 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
596 i
= id
- KVM_REG_PPC_FPR0
;
597 *val
= get_reg_val(id
, VCPU_FPR(vcpu
, i
));
599 case KVM_REG_PPC_FPSCR
:
600 *val
= get_reg_val(id
, vcpu
->arch
.fp
.fpscr
);
603 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
604 if (cpu_has_feature(CPU_FTR_VSX
)) {
605 i
= id
- KVM_REG_PPC_VSR0
;
606 val
->vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
607 val
->vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
612 #endif /* CONFIG_VSX */
613 case KVM_REG_PPC_DEBUG_INST
:
614 *val
= get_reg_val(id
, INS_TW
);
616 #ifdef CONFIG_KVM_XICS
617 case KVM_REG_PPC_ICP_STATE
:
618 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
623 *val
= get_reg_val(id
, kvmppc_xive_get_icp(vcpu
));
625 *val
= get_reg_val(id
, kvmppc_xics_get_icp(vcpu
));
627 #endif /* CONFIG_KVM_XICS */
628 case KVM_REG_PPC_FSCR
:
629 *val
= get_reg_val(id
, vcpu
->arch
.fscr
);
631 case KVM_REG_PPC_TAR
:
632 *val
= get_reg_val(id
, vcpu
->arch
.tar
);
634 case KVM_REG_PPC_EBBHR
:
635 *val
= get_reg_val(id
, vcpu
->arch
.ebbhr
);
637 case KVM_REG_PPC_EBBRR
:
638 *val
= get_reg_val(id
, vcpu
->arch
.ebbrr
);
640 case KVM_REG_PPC_BESCR
:
641 *val
= get_reg_val(id
, vcpu
->arch
.bescr
);
644 *val
= get_reg_val(id
, vcpu
->arch
.ic
);
655 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
656 union kvmppc_one_reg
*val
)
661 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, id
, val
);
665 case KVM_REG_PPC_DAR
:
666 kvmppc_set_dar(vcpu
, set_reg_val(id
, *val
));
668 case KVM_REG_PPC_DSISR
:
669 kvmppc_set_dsisr(vcpu
, set_reg_val(id
, *val
));
671 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
672 i
= id
- KVM_REG_PPC_FPR0
;
673 VCPU_FPR(vcpu
, i
) = set_reg_val(id
, *val
);
675 case KVM_REG_PPC_FPSCR
:
676 vcpu
->arch
.fp
.fpscr
= set_reg_val(id
, *val
);
679 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
680 if (cpu_has_feature(CPU_FTR_VSX
)) {
681 i
= id
- KVM_REG_PPC_VSR0
;
682 vcpu
->arch
.fp
.fpr
[i
][0] = val
->vsxval
[0];
683 vcpu
->arch
.fp
.fpr
[i
][1] = val
->vsxval
[1];
688 #endif /* CONFIG_VSX */
689 #ifdef CONFIG_KVM_XICS
690 case KVM_REG_PPC_ICP_STATE
:
691 if (!vcpu
->arch
.icp
&& !vcpu
->arch
.xive_vcpu
) {
696 r
= kvmppc_xive_set_icp(vcpu
, set_reg_val(id
, *val
));
698 r
= kvmppc_xics_set_icp(vcpu
, set_reg_val(id
, *val
));
700 #endif /* CONFIG_KVM_XICS */
701 case KVM_REG_PPC_FSCR
:
702 vcpu
->arch
.fscr
= set_reg_val(id
, *val
);
704 case KVM_REG_PPC_TAR
:
705 vcpu
->arch
.tar
= set_reg_val(id
, *val
);
707 case KVM_REG_PPC_EBBHR
:
708 vcpu
->arch
.ebbhr
= set_reg_val(id
, *val
);
710 case KVM_REG_PPC_EBBRR
:
711 vcpu
->arch
.ebbrr
= set_reg_val(id
, *val
);
713 case KVM_REG_PPC_BESCR
:
714 vcpu
->arch
.bescr
= set_reg_val(id
, *val
);
717 vcpu
->arch
.ic
= set_reg_val(id
, *val
);
728 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
730 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
733 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
735 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
738 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
740 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
742 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
744 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
746 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
749 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
750 struct kvm_translation
*tr
)
755 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
756 struct kvm_guest_debug
*dbg
)
759 vcpu
->guest_debug
= dbg
->control
;
764 void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
)
766 kvmppc_core_queue_dec(vcpu
);
770 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
772 return kvm
->arch
.kvm_ops
->vcpu_create(kvm
, id
);
775 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
777 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
780 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
782 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
785 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
787 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
790 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
791 struct kvm_memory_slot
*dont
)
793 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
796 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
797 unsigned long npages
)
799 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
802 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
804 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
807 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
808 struct kvm_memory_slot
*memslot
,
809 const struct kvm_userspace_memory_region
*mem
)
811 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
814 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
815 const struct kvm_userspace_memory_region
*mem
,
816 const struct kvm_memory_slot
*old
,
817 const struct kvm_memory_slot
*new)
819 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
, new);
822 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
824 return kvm
->arch
.kvm_ops
->unmap_hva(kvm
, hva
);
826 EXPORT_SYMBOL_GPL(kvm_unmap_hva
);
828 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
830 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
833 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
835 return kvm
->arch
.kvm_ops
->age_hva(kvm
, start
, end
);
838 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
840 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
843 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
845 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
848 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
850 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
853 int kvmppc_core_init_vm(struct kvm
*kvm
)
857 INIT_LIST_HEAD_RCU(&kvm
->arch
.spapr_tce_tables
);
858 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
861 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
864 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
866 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
869 kvmppc_rtas_tokens_free(kvm
);
870 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
874 int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
)
876 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
877 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
882 if (!is_power_of_2(size
) || (size
> sizeof(buf
)))
885 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
886 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
887 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
893 kvmppc_set_gpr(vcpu
, 4, *(u8
*)&buf
);
897 kvmppc_set_gpr(vcpu
, 4, be16_to_cpu(*(__be16
*)&buf
));
901 kvmppc_set_gpr(vcpu
, 4, be32_to_cpu(*(__be32
*)&buf
));
905 kvmppc_set_gpr(vcpu
, 4, be64_to_cpu(*(__be64
*)&buf
));
914 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load
);
916 int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
)
918 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
919 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
920 unsigned long val
= kvmppc_get_gpr(vcpu
, 6);
931 *(__be16
*)&buf
= cpu_to_be16(val
);
935 *(__be32
*)&buf
= cpu_to_be32(val
);
939 *(__be64
*)&buf
= cpu_to_be64(val
);
946 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
947 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
948 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
954 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store
);
956 int kvmppc_core_check_processor_compat(void)
959 * We always return 0 for book3s. We check
960 * for compatibility while loading the HV
966 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
968 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
971 #ifdef CONFIG_KVM_XICS
972 int kvm_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
976 return kvmppc_xive_set_irq(kvm
, irq_source_id
, irq
, level
,
979 return kvmppc_xics_set_irq(kvm
, irq_source_id
, irq
, level
,
983 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry
*irq_entry
,
984 struct kvm
*kvm
, int irq_source_id
,
985 int level
, bool line_status
)
987 return kvm_set_irq(kvm
, irq_source_id
, irq_entry
->gsi
,
990 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry
*e
,
991 struct kvm
*kvm
, int irq_source_id
, int level
,
994 return kvm_set_irq(kvm
, irq_source_id
, e
->gsi
, level
, line_status
);
997 int kvm_irq_map_gsi(struct kvm
*kvm
,
998 struct kvm_kernel_irq_routing_entry
*entries
, int gsi
)
1001 entries
->type
= KVM_IRQ_ROUTING_IRQCHIP
;
1002 entries
->set
= kvmppc_book3s_set_irq
;
1003 entries
->irqchip
.irqchip
= 0;
1004 entries
->irqchip
.pin
= gsi
;
1008 int kvm_irq_map_chip_pin(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
1013 #endif /* CONFIG_KVM_XICS */
1015 static int kvmppc_book3s_init(void)
1019 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1022 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1023 r
= kvmppc_book3s_init_pr();
1026 #ifdef CONFIG_KVM_XICS
1027 #ifdef CONFIG_KVM_XIVE
1028 if (xive_enabled()) {
1029 kvmppc_xive_init_module();
1030 kvm_register_device_ops(&kvm_xive_ops
, KVM_DEV_TYPE_XICS
);
1033 kvm_register_device_ops(&kvm_xics_ops
, KVM_DEV_TYPE_XICS
);
1038 static void kvmppc_book3s_exit(void)
1040 #ifdef CONFIG_KVM_XICS
1042 kvmppc_xive_exit_module();
1044 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1045 kvmppc_book3s_exit_pr();
1050 module_init(kvmppc_book3s_init
);
1051 module_exit(kvmppc_book3s_exit
);
1053 /* On 32bit this is our one and only kernel module */
1054 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1055 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1056 MODULE_ALIAS("devname:kvm");