2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 /* #define EXIT_DEBUG */
46 struct kvm_stats_debugfs_item debugfs_entries
[] = {
47 { "exits", VCPU_STAT(sum_exits
) },
48 { "mmio", VCPU_STAT(mmio_exits
) },
49 { "sig", VCPU_STAT(signal_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
52 { "dec", VCPU_STAT(dec_exits
) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
54 { "queue_intr", VCPU_STAT(queue_intr
) },
55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
), },
56 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
), },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
58 { "pf_storage", VCPU_STAT(pf_storage
) },
59 { "sp_storage", VCPU_STAT(sp_storage
) },
60 { "pf_instruc", VCPU_STAT(pf_instruc
) },
61 { "sp_instruc", VCPU_STAT(sp_instruc
) },
62 { "ld", VCPU_STAT(ld
) },
63 { "ld_slow", VCPU_STAT(ld_slow
) },
64 { "st", VCPU_STAT(st
) },
65 { "st_slow", VCPU_STAT(st_slow
) },
69 void kvmppc_unfixup_split_real(struct kvm_vcpu
*vcpu
)
71 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) {
72 ulong pc
= kvmppc_get_pc(vcpu
);
73 if ((pc
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
)
74 kvmppc_set_pc(vcpu
, pc
& ~SPLIT_HACK_MASK
);
75 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SPLIT_HACK
;
78 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real
);
80 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu
*vcpu
)
82 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
83 return to_book3s(vcpu
)->hior
;
87 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
88 unsigned long pending_now
, unsigned long old_pending
)
90 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
93 kvmppc_set_int_pending(vcpu
, 1);
95 kvmppc_set_int_pending(vcpu
, 0);
98 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
104 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
107 crit_raw
= kvmppc_get_critical(vcpu
);
108 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
110 /* Truncate crit indicators in 32 bit mode */
111 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
112 crit_raw
&= 0xffffffff;
113 crit_r1
&= 0xffffffff;
116 /* Critical section when crit == r1 */
117 crit
= (crit_raw
== crit_r1
);
118 /* ... and we're in supervisor mode */
119 crit
= crit
&& !(kvmppc_get_msr(vcpu
) & MSR_PR
);
124 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
126 kvmppc_unfixup_split_real(vcpu
);
127 kvmppc_set_srr0(vcpu
, kvmppc_get_pc(vcpu
));
128 kvmppc_set_srr1(vcpu
, kvmppc_get_msr(vcpu
) | flags
);
129 kvmppc_set_pc(vcpu
, kvmppc_interrupt_offset(vcpu
) + vec
);
130 vcpu
->arch
.mmu
.reset_msr(vcpu
);
133 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
138 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
139 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
140 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
141 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
142 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
143 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
144 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
145 case 0x501: prio
= BOOK3S_IRQPRIO_EXTERNAL_LEVEL
; break;
146 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
147 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
148 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
149 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
150 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
151 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
152 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
153 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
154 case 0xf60: prio
= BOOK3S_IRQPRIO_FAC_UNAVAIL
; break;
155 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
161 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
164 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
166 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
167 &vcpu
->arch
.pending_exceptions
);
169 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
173 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
175 vcpu
->stat
.queue_intr
++;
177 set_bit(kvmppc_book3s_vec2irqprio(vec
),
178 &vcpu
->arch
.pending_exceptions
);
180 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
183 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
185 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
187 /* might as well deliver this straight away */
188 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
190 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
192 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
194 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
196 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
198 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
200 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
202 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
204 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
206 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
208 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
210 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
211 struct kvm_interrupt
*irq
)
213 unsigned int vec
= BOOK3S_INTERRUPT_EXTERNAL
;
215 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
216 vec
= BOOK3S_INTERRUPT_EXTERNAL_LEVEL
;
218 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
221 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
223 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
224 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
227 void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
, ulong dar
,
230 kvmppc_set_dar(vcpu
, dar
);
231 kvmppc_set_dsisr(vcpu
, flags
);
232 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DATA_STORAGE
);
235 void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
, ulong flags
)
237 u64 msr
= kvmppc_get_msr(vcpu
);
238 msr
&= ~(SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
239 msr
|= flags
& (SRR1_ISI_NOPT
| SRR1_ISI_N_OR_G
| SRR1_ISI_PROT
);
240 kvmppc_set_msr_fast(vcpu
, msr
);
241 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
244 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
,
245 unsigned int priority
)
249 bool crit
= kvmppc_critical_section(vcpu
);
252 case BOOK3S_IRQPRIO_DECREMENTER
:
253 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
254 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
256 case BOOK3S_IRQPRIO_EXTERNAL
:
257 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
258 deliver
= (kvmppc_get_msr(vcpu
) & MSR_EE
) && !crit
;
259 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
261 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
262 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
264 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
265 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
267 case BOOK3S_IRQPRIO_DATA_STORAGE
:
268 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
270 case BOOK3S_IRQPRIO_INST_STORAGE
:
271 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
273 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
274 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
276 case BOOK3S_IRQPRIO_INST_SEGMENT
:
277 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
279 case BOOK3S_IRQPRIO_ALIGNMENT
:
280 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
282 case BOOK3S_IRQPRIO_PROGRAM
:
283 vec
= BOOK3S_INTERRUPT_PROGRAM
;
285 case BOOK3S_IRQPRIO_VSX
:
286 vec
= BOOK3S_INTERRUPT_VSX
;
288 case BOOK3S_IRQPRIO_ALTIVEC
:
289 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
291 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
292 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
294 case BOOK3S_IRQPRIO_SYSCALL
:
295 vec
= BOOK3S_INTERRUPT_SYSCALL
;
297 case BOOK3S_IRQPRIO_DEBUG
:
298 vec
= BOOK3S_INTERRUPT_TRACE
;
300 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
301 vec
= BOOK3S_INTERRUPT_PERFMON
;
303 case BOOK3S_IRQPRIO_FAC_UNAVAIL
:
304 vec
= BOOK3S_INTERRUPT_FAC_UNAVAIL
;
308 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
313 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
317 kvmppc_inject_interrupt(vcpu
, vec
, 0);
323 * This function determines if an irqprio should be cleared once issued.
325 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
328 case BOOK3S_IRQPRIO_DECREMENTER
:
329 /* DEC interrupts get cleared by mtdec */
331 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
332 /* External interrupts get cleared by userspace */
339 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
341 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
342 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
343 unsigned int priority
;
346 if (vcpu
->arch
.pending_exceptions
)
347 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
349 priority
= __ffs(*pending
);
350 while (priority
< BOOK3S_IRQPRIO_MAX
) {
351 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
352 clear_irqprio(vcpu
, priority
)) {
353 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
357 priority
= find_next_bit(pending
,
358 BITS_PER_BYTE
* sizeof(*pending
),
362 /* Tell the guest about our interrupt status */
363 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
367 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
369 kvm_pfn_t
kvmppc_gpa_to_pfn(struct kvm_vcpu
*vcpu
, gpa_t gpa
, bool writing
,
372 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
;
373 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
375 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
376 mp_pa
= (uint32_t)mp_pa
;
378 /* Magic page override */
380 if (unlikely(mp_pa
) && unlikely((gpa
& KVM_PAM
) == mp_pa
)) {
381 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
384 pfn
= (kvm_pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
385 get_page(pfn_to_page(pfn
));
391 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
393 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn
);
395 int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, enum xlate_instdata xlid
,
396 enum xlate_readwrite xlrw
, struct kvmppc_pte
*pte
)
398 bool data
= (xlid
== XLATE_DATA
);
399 bool iswrite
= (xlrw
== XLATE_WRITE
);
400 int relocated
= (kvmppc_get_msr(vcpu
) & (data
? MSR_DR
: MSR_IR
));
404 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
407 pte
->raddr
= eaddr
& KVM_PAM
;
408 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
409 pte
->may_read
= true;
410 pte
->may_write
= true;
411 pte
->may_execute
= true;
414 if ((kvmppc_get_msr(vcpu
) & (MSR_IR
| MSR_DR
)) == MSR_DR
&&
416 if ((vcpu
->arch
.hflags
& BOOK3S_HFLAG_SPLIT_HACK
) &&
417 ((eaddr
& SPLIT_HACK_MASK
) == SPLIT_HACK_OFFS
))
418 pte
->raddr
&= ~SPLIT_HACK_MASK
;
425 int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
, enum instruction_type type
,
428 ulong pc
= kvmppc_get_pc(vcpu
);
434 r
= kvmppc_ld(vcpu
, &pc
, sizeof(u32
), inst
, false);
435 if (r
== EMULATE_DONE
)
438 return EMULATE_AGAIN
;
440 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst
);
442 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
447 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
452 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
456 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
457 struct kvm_sregs
*sregs
)
459 return vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
462 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
463 struct kvm_sregs
*sregs
)
465 return vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
468 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
472 regs
->pc
= kvmppc_get_pc(vcpu
);
473 regs
->cr
= kvmppc_get_cr(vcpu
);
474 regs
->ctr
= kvmppc_get_ctr(vcpu
);
475 regs
->lr
= kvmppc_get_lr(vcpu
);
476 regs
->xer
= kvmppc_get_xer(vcpu
);
477 regs
->msr
= kvmppc_get_msr(vcpu
);
478 regs
->srr0
= kvmppc_get_srr0(vcpu
);
479 regs
->srr1
= kvmppc_get_srr1(vcpu
);
480 regs
->pid
= vcpu
->arch
.pid
;
481 regs
->sprg0
= kvmppc_get_sprg0(vcpu
);
482 regs
->sprg1
= kvmppc_get_sprg1(vcpu
);
483 regs
->sprg2
= kvmppc_get_sprg2(vcpu
);
484 regs
->sprg3
= kvmppc_get_sprg3(vcpu
);
485 regs
->sprg4
= kvmppc_get_sprg4(vcpu
);
486 regs
->sprg5
= kvmppc_get_sprg5(vcpu
);
487 regs
->sprg6
= kvmppc_get_sprg6(vcpu
);
488 regs
->sprg7
= kvmppc_get_sprg7(vcpu
);
490 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
491 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
496 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
500 kvmppc_set_pc(vcpu
, regs
->pc
);
501 kvmppc_set_cr(vcpu
, regs
->cr
);
502 kvmppc_set_ctr(vcpu
, regs
->ctr
);
503 kvmppc_set_lr(vcpu
, regs
->lr
);
504 kvmppc_set_xer(vcpu
, regs
->xer
);
505 kvmppc_set_msr(vcpu
, regs
->msr
);
506 kvmppc_set_srr0(vcpu
, regs
->srr0
);
507 kvmppc_set_srr1(vcpu
, regs
->srr1
);
508 kvmppc_set_sprg0(vcpu
, regs
->sprg0
);
509 kvmppc_set_sprg1(vcpu
, regs
->sprg1
);
510 kvmppc_set_sprg2(vcpu
, regs
->sprg2
);
511 kvmppc_set_sprg3(vcpu
, regs
->sprg3
);
512 kvmppc_set_sprg4(vcpu
, regs
->sprg4
);
513 kvmppc_set_sprg5(vcpu
, regs
->sprg5
);
514 kvmppc_set_sprg6(vcpu
, regs
->sprg6
);
515 kvmppc_set_sprg7(vcpu
, regs
->sprg7
);
517 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
518 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
523 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
528 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
533 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
534 union kvmppc_one_reg
*val
)
539 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, id
, val
);
543 case KVM_REG_PPC_DAR
:
544 *val
= get_reg_val(id
, kvmppc_get_dar(vcpu
));
546 case KVM_REG_PPC_DSISR
:
547 *val
= get_reg_val(id
, kvmppc_get_dsisr(vcpu
));
549 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
550 i
= id
- KVM_REG_PPC_FPR0
;
551 *val
= get_reg_val(id
, VCPU_FPR(vcpu
, i
));
553 case KVM_REG_PPC_FPSCR
:
554 *val
= get_reg_val(id
, vcpu
->arch
.fp
.fpscr
);
557 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
558 if (cpu_has_feature(CPU_FTR_VSX
)) {
559 i
= id
- KVM_REG_PPC_VSR0
;
560 val
->vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
561 val
->vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
566 #endif /* CONFIG_VSX */
567 case KVM_REG_PPC_DEBUG_INST
:
568 *val
= get_reg_val(id
, INS_TW
);
570 #ifdef CONFIG_KVM_XICS
571 case KVM_REG_PPC_ICP_STATE
:
572 if (!vcpu
->arch
.icp
) {
576 *val
= get_reg_val(id
, kvmppc_xics_get_icp(vcpu
));
578 #endif /* CONFIG_KVM_XICS */
579 case KVM_REG_PPC_FSCR
:
580 *val
= get_reg_val(id
, vcpu
->arch
.fscr
);
582 case KVM_REG_PPC_TAR
:
583 *val
= get_reg_val(id
, vcpu
->arch
.tar
);
585 case KVM_REG_PPC_EBBHR
:
586 *val
= get_reg_val(id
, vcpu
->arch
.ebbhr
);
588 case KVM_REG_PPC_EBBRR
:
589 *val
= get_reg_val(id
, vcpu
->arch
.ebbrr
);
591 case KVM_REG_PPC_BESCR
:
592 *val
= get_reg_val(id
, vcpu
->arch
.bescr
);
594 case KVM_REG_PPC_VTB
:
595 *val
= get_reg_val(id
, vcpu
->arch
.vtb
);
598 *val
= get_reg_val(id
, vcpu
->arch
.ic
);
609 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
,
610 union kvmppc_one_reg
*val
)
615 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, id
, val
);
619 case KVM_REG_PPC_DAR
:
620 kvmppc_set_dar(vcpu
, set_reg_val(id
, *val
));
622 case KVM_REG_PPC_DSISR
:
623 kvmppc_set_dsisr(vcpu
, set_reg_val(id
, *val
));
625 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
626 i
= id
- KVM_REG_PPC_FPR0
;
627 VCPU_FPR(vcpu
, i
) = set_reg_val(id
, *val
);
629 case KVM_REG_PPC_FPSCR
:
630 vcpu
->arch
.fp
.fpscr
= set_reg_val(id
, *val
);
633 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
634 if (cpu_has_feature(CPU_FTR_VSX
)) {
635 i
= id
- KVM_REG_PPC_VSR0
;
636 vcpu
->arch
.fp
.fpr
[i
][0] = val
->vsxval
[0];
637 vcpu
->arch
.fp
.fpr
[i
][1] = val
->vsxval
[1];
642 #endif /* CONFIG_VSX */
643 #ifdef CONFIG_KVM_XICS
644 case KVM_REG_PPC_ICP_STATE
:
645 if (!vcpu
->arch
.icp
) {
649 r
= kvmppc_xics_set_icp(vcpu
,
650 set_reg_val(id
, *val
));
652 #endif /* CONFIG_KVM_XICS */
653 case KVM_REG_PPC_FSCR
:
654 vcpu
->arch
.fscr
= set_reg_val(id
, *val
);
656 case KVM_REG_PPC_TAR
:
657 vcpu
->arch
.tar
= set_reg_val(id
, *val
);
659 case KVM_REG_PPC_EBBHR
:
660 vcpu
->arch
.ebbhr
= set_reg_val(id
, *val
);
662 case KVM_REG_PPC_EBBRR
:
663 vcpu
->arch
.ebbrr
= set_reg_val(id
, *val
);
665 case KVM_REG_PPC_BESCR
:
666 vcpu
->arch
.bescr
= set_reg_val(id
, *val
);
668 case KVM_REG_PPC_VTB
:
669 vcpu
->arch
.vtb
= set_reg_val(id
, *val
);
672 vcpu
->arch
.ic
= set_reg_val(id
, *val
);
683 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
685 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
688 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
690 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
693 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
695 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
697 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
699 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
701 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
704 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
705 struct kvm_translation
*tr
)
710 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
711 struct kvm_guest_debug
*dbg
)
713 vcpu
->guest_debug
= dbg
->control
;
717 void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
)
719 kvmppc_core_queue_dec(vcpu
);
723 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
725 return kvm
->arch
.kvm_ops
->vcpu_create(kvm
, id
);
728 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
730 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
733 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
735 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
738 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
740 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
743 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
744 struct kvm_memory_slot
*dont
)
746 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
749 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
750 unsigned long npages
)
752 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
755 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
757 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
760 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
761 struct kvm_memory_slot
*memslot
,
762 const struct kvm_userspace_memory_region
*mem
)
764 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
767 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
768 const struct kvm_userspace_memory_region
*mem
,
769 const struct kvm_memory_slot
*old
,
770 const struct kvm_memory_slot
*new)
772 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
, new);
775 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
777 return kvm
->arch
.kvm_ops
->unmap_hva(kvm
, hva
);
779 EXPORT_SYMBOL_GPL(kvm_unmap_hva
);
781 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
783 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
786 int kvm_age_hva(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
788 return kvm
->arch
.kvm_ops
->age_hva(kvm
, start
, end
);
791 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
793 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
796 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
798 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
801 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
803 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
806 int kvmppc_core_init_vm(struct kvm
*kvm
)
810 INIT_LIST_HEAD_RCU(&kvm
->arch
.spapr_tce_tables
);
811 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
814 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
817 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
819 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
822 kvmppc_rtas_tokens_free(kvm
);
823 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
827 int kvmppc_h_logical_ci_load(struct kvm_vcpu
*vcpu
)
829 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
830 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
835 if (!is_power_of_2(size
) || (size
> sizeof(buf
)))
838 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
839 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
840 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
846 kvmppc_set_gpr(vcpu
, 4, *(u8
*)&buf
);
850 kvmppc_set_gpr(vcpu
, 4, be16_to_cpu(*(__be16
*)&buf
));
854 kvmppc_set_gpr(vcpu
, 4, be32_to_cpu(*(__be32
*)&buf
));
858 kvmppc_set_gpr(vcpu
, 4, be64_to_cpu(*(__be64
*)&buf
));
867 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load
);
869 int kvmppc_h_logical_ci_store(struct kvm_vcpu
*vcpu
)
871 unsigned long size
= kvmppc_get_gpr(vcpu
, 4);
872 unsigned long addr
= kvmppc_get_gpr(vcpu
, 5);
873 unsigned long val
= kvmppc_get_gpr(vcpu
, 6);
884 *(__be16
*)&buf
= cpu_to_be16(val
);
888 *(__be32
*)&buf
= cpu_to_be32(val
);
892 *(__be64
*)&buf
= cpu_to_be64(val
);
899 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
900 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, addr
, size
, &buf
);
901 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
907 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store
);
909 int kvmppc_core_check_processor_compat(void)
912 * We always return 0 for book3s. We check
913 * for compatibility while loading the HV
919 int kvmppc_book3s_hcall_implemented(struct kvm
*kvm
, unsigned long hcall
)
921 return kvm
->arch
.kvm_ops
->hcall_implemented(hcall
);
924 static int kvmppc_book3s_init(void)
928 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
931 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
932 r
= kvmppc_book3s_init_pr();
938 static void kvmppc_book3s_exit(void)
940 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
941 kvmppc_book3s_exit_pr();
946 module_init(kvmppc_book3s_init
);
947 module_exit(kvmppc_book3s_exit
);
949 /* On 32bit this is our one and only kernel module */
950 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
951 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
952 MODULE_ALIAS("devname:kvm");