2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 /* #define EXIT_DEBUG */
46 struct kvm_stats_debugfs_item debugfs_entries
[] = {
47 { "exits", VCPU_STAT(sum_exits
) },
48 { "mmio", VCPU_STAT(mmio_exits
) },
49 { "sig", VCPU_STAT(signal_exits
) },
50 { "sysc", VCPU_STAT(syscall_exits
) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits
) },
52 { "dec", VCPU_STAT(dec_exits
) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits
) },
54 { "queue_intr", VCPU_STAT(queue_intr
) },
55 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
56 { "pf_storage", VCPU_STAT(pf_storage
) },
57 { "sp_storage", VCPU_STAT(sp_storage
) },
58 { "pf_instruc", VCPU_STAT(pf_instruc
) },
59 { "sp_instruc", VCPU_STAT(sp_instruc
) },
60 { "ld", VCPU_STAT(ld
) },
61 { "ld_slow", VCPU_STAT(ld_slow
) },
62 { "st", VCPU_STAT(st
) },
63 { "st_slow", VCPU_STAT(st_slow
) },
67 void kvmppc_core_load_host_debugstate(struct kvm_vcpu
*vcpu
)
71 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu
*vcpu
)
75 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu
*vcpu
)
77 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
78 return to_book3s(vcpu
)->hior
;
82 static inline void kvmppc_update_int_pending(struct kvm_vcpu
*vcpu
,
83 unsigned long pending_now
, unsigned long old_pending
)
85 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
88 vcpu
->arch
.shared
->int_pending
= 1;
90 vcpu
->arch
.shared
->int_pending
= 0;
93 static inline bool kvmppc_critical_section(struct kvm_vcpu
*vcpu
)
99 if (is_kvmppc_hv_enabled(vcpu
->kvm
))
102 crit_raw
= vcpu
->arch
.shared
->critical
;
103 crit_r1
= kvmppc_get_gpr(vcpu
, 1);
105 /* Truncate crit indicators in 32 bit mode */
106 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
107 crit_raw
&= 0xffffffff;
108 crit_r1
&= 0xffffffff;
111 /* Critical section when crit == r1 */
112 crit
= (crit_raw
== crit_r1
);
113 /* ... and we're in supervisor mode */
114 crit
= crit
&& !(vcpu
->arch
.shared
->msr
& MSR_PR
);
119 void kvmppc_inject_interrupt(struct kvm_vcpu
*vcpu
, int vec
, u64 flags
)
121 vcpu
->arch
.shared
->srr0
= kvmppc_get_pc(vcpu
);
122 vcpu
->arch
.shared
->srr1
= vcpu
->arch
.shared
->msr
| flags
;
123 kvmppc_set_pc(vcpu
, kvmppc_interrupt_offset(vcpu
) + vec
);
124 vcpu
->arch
.mmu
.reset_msr(vcpu
);
127 static int kvmppc_book3s_vec2irqprio(unsigned int vec
)
132 case 0x100: prio
= BOOK3S_IRQPRIO_SYSTEM_RESET
; break;
133 case 0x200: prio
= BOOK3S_IRQPRIO_MACHINE_CHECK
; break;
134 case 0x300: prio
= BOOK3S_IRQPRIO_DATA_STORAGE
; break;
135 case 0x380: prio
= BOOK3S_IRQPRIO_DATA_SEGMENT
; break;
136 case 0x400: prio
= BOOK3S_IRQPRIO_INST_STORAGE
; break;
137 case 0x480: prio
= BOOK3S_IRQPRIO_INST_SEGMENT
; break;
138 case 0x500: prio
= BOOK3S_IRQPRIO_EXTERNAL
; break;
139 case 0x501: prio
= BOOK3S_IRQPRIO_EXTERNAL_LEVEL
; break;
140 case 0x600: prio
= BOOK3S_IRQPRIO_ALIGNMENT
; break;
141 case 0x700: prio
= BOOK3S_IRQPRIO_PROGRAM
; break;
142 case 0x800: prio
= BOOK3S_IRQPRIO_FP_UNAVAIL
; break;
143 case 0x900: prio
= BOOK3S_IRQPRIO_DECREMENTER
; break;
144 case 0xc00: prio
= BOOK3S_IRQPRIO_SYSCALL
; break;
145 case 0xd00: prio
= BOOK3S_IRQPRIO_DEBUG
; break;
146 case 0xf20: prio
= BOOK3S_IRQPRIO_ALTIVEC
; break;
147 case 0xf40: prio
= BOOK3S_IRQPRIO_VSX
; break;
148 default: prio
= BOOK3S_IRQPRIO_MAX
; break;
154 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu
*vcpu
,
157 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
159 clear_bit(kvmppc_book3s_vec2irqprio(vec
),
160 &vcpu
->arch
.pending_exceptions
);
162 kvmppc_update_int_pending(vcpu
, vcpu
->arch
.pending_exceptions
,
166 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu
*vcpu
, unsigned int vec
)
168 vcpu
->stat
.queue_intr
++;
170 set_bit(kvmppc_book3s_vec2irqprio(vec
),
171 &vcpu
->arch
.pending_exceptions
);
173 printk(KERN_INFO
"Queueing interrupt %x\n", vec
);
176 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio
);
178 void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
)
180 /* might as well deliver this straight away */
181 kvmppc_inject_interrupt(vcpu
, BOOK3S_INTERRUPT_PROGRAM
, flags
);
183 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program
);
185 void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
)
187 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
189 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec
);
191 int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
)
193 return test_bit(BOOK3S_IRQPRIO_DECREMENTER
, &vcpu
->arch
.pending_exceptions
);
195 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec
);
197 void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
)
199 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_DECREMENTER
);
201 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec
);
203 void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
204 struct kvm_interrupt
*irq
)
206 unsigned int vec
= BOOK3S_INTERRUPT_EXTERNAL
;
208 if (irq
->irq
== KVM_INTERRUPT_SET_LEVEL
)
209 vec
= BOOK3S_INTERRUPT_EXTERNAL_LEVEL
;
211 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
214 void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
)
216 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL
);
217 kvmppc_book3s_dequeue_irqprio(vcpu
, BOOK3S_INTERRUPT_EXTERNAL_LEVEL
);
220 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu
*vcpu
, unsigned int priority
)
224 bool crit
= kvmppc_critical_section(vcpu
);
227 case BOOK3S_IRQPRIO_DECREMENTER
:
228 deliver
= (vcpu
->arch
.shared
->msr
& MSR_EE
) && !crit
;
229 vec
= BOOK3S_INTERRUPT_DECREMENTER
;
231 case BOOK3S_IRQPRIO_EXTERNAL
:
232 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
233 deliver
= (vcpu
->arch
.shared
->msr
& MSR_EE
) && !crit
;
234 vec
= BOOK3S_INTERRUPT_EXTERNAL
;
236 case BOOK3S_IRQPRIO_SYSTEM_RESET
:
237 vec
= BOOK3S_INTERRUPT_SYSTEM_RESET
;
239 case BOOK3S_IRQPRIO_MACHINE_CHECK
:
240 vec
= BOOK3S_INTERRUPT_MACHINE_CHECK
;
242 case BOOK3S_IRQPRIO_DATA_STORAGE
:
243 vec
= BOOK3S_INTERRUPT_DATA_STORAGE
;
245 case BOOK3S_IRQPRIO_INST_STORAGE
:
246 vec
= BOOK3S_INTERRUPT_INST_STORAGE
;
248 case BOOK3S_IRQPRIO_DATA_SEGMENT
:
249 vec
= BOOK3S_INTERRUPT_DATA_SEGMENT
;
251 case BOOK3S_IRQPRIO_INST_SEGMENT
:
252 vec
= BOOK3S_INTERRUPT_INST_SEGMENT
;
254 case BOOK3S_IRQPRIO_ALIGNMENT
:
255 vec
= BOOK3S_INTERRUPT_ALIGNMENT
;
257 case BOOK3S_IRQPRIO_PROGRAM
:
258 vec
= BOOK3S_INTERRUPT_PROGRAM
;
260 case BOOK3S_IRQPRIO_VSX
:
261 vec
= BOOK3S_INTERRUPT_VSX
;
263 case BOOK3S_IRQPRIO_ALTIVEC
:
264 vec
= BOOK3S_INTERRUPT_ALTIVEC
;
266 case BOOK3S_IRQPRIO_FP_UNAVAIL
:
267 vec
= BOOK3S_INTERRUPT_FP_UNAVAIL
;
269 case BOOK3S_IRQPRIO_SYSCALL
:
270 vec
= BOOK3S_INTERRUPT_SYSCALL
;
272 case BOOK3S_IRQPRIO_DEBUG
:
273 vec
= BOOK3S_INTERRUPT_TRACE
;
275 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR
:
276 vec
= BOOK3S_INTERRUPT_PERFMON
;
280 printk(KERN_ERR
"KVM: Unknown interrupt: 0x%x\n", priority
);
285 printk(KERN_INFO
"Deliver interrupt 0x%x? %x\n", vec
, deliver
);
289 kvmppc_inject_interrupt(vcpu
, vec
, 0);
295 * This function determines if an irqprio should be cleared once issued.
297 static bool clear_irqprio(struct kvm_vcpu
*vcpu
, unsigned int priority
)
300 case BOOK3S_IRQPRIO_DECREMENTER
:
301 /* DEC interrupts get cleared by mtdec */
303 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL
:
304 /* External interrupts get cleared by userspace */
311 int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
)
313 unsigned long *pending
= &vcpu
->arch
.pending_exceptions
;
314 unsigned long old_pending
= vcpu
->arch
.pending_exceptions
;
315 unsigned int priority
;
318 if (vcpu
->arch
.pending_exceptions
)
319 printk(KERN_EMERG
"KVM: Check pending: %lx\n", vcpu
->arch
.pending_exceptions
);
321 priority
= __ffs(*pending
);
322 while (priority
< BOOK3S_IRQPRIO_MAX
) {
323 if (kvmppc_book3s_irqprio_deliver(vcpu
, priority
) &&
324 clear_irqprio(vcpu
, priority
)) {
325 clear_bit(priority
, &vcpu
->arch
.pending_exceptions
);
329 priority
= find_next_bit(pending
,
330 BITS_PER_BYTE
* sizeof(*pending
),
334 /* Tell the guest about our interrupt status */
335 kvmppc_update_int_pending(vcpu
, *pending
, old_pending
);
339 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter
);
341 pfn_t
kvmppc_gfn_to_pfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool writing
,
344 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
346 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
))
347 mp_pa
= (uint32_t)mp_pa
;
349 /* Magic page override */
350 if (unlikely(mp_pa
) &&
351 unlikely(((gfn
<< PAGE_SHIFT
) & KVM_PAM
) ==
352 ((mp_pa
& PAGE_MASK
) & KVM_PAM
))) {
353 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
356 pfn
= (pfn_t
)virt_to_phys((void*)shared_page
) >> PAGE_SHIFT
;
357 get_page(pfn_to_page(pfn
));
363 return gfn_to_pfn_prot(vcpu
->kvm
, gfn
, writing
, writable
);
365 EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn
);
367 static int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
, bool data
,
368 bool iswrite
, struct kvmppc_pte
*pte
)
370 int relocated
= (vcpu
->arch
.shared
->msr
& (data
? MSR_DR
: MSR_IR
));
374 r
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, pte
, data
, iswrite
);
377 pte
->raddr
= eaddr
& KVM_PAM
;
378 pte
->vpage
= VSID_REAL
| eaddr
>> 12;
379 pte
->may_read
= true;
380 pte
->may_write
= true;
381 pte
->may_execute
= true;
388 static hva_t
kvmppc_bad_hva(void)
393 static hva_t
kvmppc_pte_to_hva(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
,
398 if (read
&& !pte
->may_read
)
401 if (!read
&& !pte
->may_write
)
404 hpage
= gfn_to_hva(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
405 if (kvm_is_error_hva(hpage
))
408 return hpage
| (pte
->raddr
& ~PAGE_MASK
);
410 return kvmppc_bad_hva();
413 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
416 struct kvmppc_pte pte
;
420 if (kvmppc_xlate(vcpu
, *eaddr
, data
, true, &pte
))
428 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
429 return EMULATE_DO_MMIO
;
433 EXPORT_SYMBOL_GPL(kvmppc_st
);
435 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
438 struct kvmppc_pte pte
;
443 if (kvmppc_xlate(vcpu
, *eaddr
, data
, false, &pte
))
448 hva
= kvmppc_pte_to_hva(vcpu
, &pte
, true);
449 if (kvm_is_error_hva(hva
))
452 if (copy_from_user(ptr
, (void __user
*)hva
, size
)) {
453 printk(KERN_INFO
"kvmppc_ld at 0x%lx failed\n", hva
);
462 return EMULATE_DO_MMIO
;
464 EXPORT_SYMBOL_GPL(kvmppc_ld
);
466 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
471 int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
)
476 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
480 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
481 struct kvm_sregs
*sregs
)
483 return vcpu
->kvm
->arch
.kvm_ops
->get_sregs(vcpu
, sregs
);
486 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
487 struct kvm_sregs
*sregs
)
489 return vcpu
->kvm
->arch
.kvm_ops
->set_sregs(vcpu
, sregs
);
492 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
496 regs
->pc
= kvmppc_get_pc(vcpu
);
497 regs
->cr
= kvmppc_get_cr(vcpu
);
498 regs
->ctr
= kvmppc_get_ctr(vcpu
);
499 regs
->lr
= kvmppc_get_lr(vcpu
);
500 regs
->xer
= kvmppc_get_xer(vcpu
);
501 regs
->msr
= vcpu
->arch
.shared
->msr
;
502 regs
->srr0
= vcpu
->arch
.shared
->srr0
;
503 regs
->srr1
= vcpu
->arch
.shared
->srr1
;
504 regs
->pid
= vcpu
->arch
.pid
;
505 regs
->sprg0
= vcpu
->arch
.shared
->sprg0
;
506 regs
->sprg1
= vcpu
->arch
.shared
->sprg1
;
507 regs
->sprg2
= vcpu
->arch
.shared
->sprg2
;
508 regs
->sprg3
= vcpu
->arch
.shared
->sprg3
;
509 regs
->sprg4
= vcpu
->arch
.shared
->sprg4
;
510 regs
->sprg5
= vcpu
->arch
.shared
->sprg5
;
511 regs
->sprg6
= vcpu
->arch
.shared
->sprg6
;
512 regs
->sprg7
= vcpu
->arch
.shared
->sprg7
;
514 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
515 regs
->gpr
[i
] = kvmppc_get_gpr(vcpu
, i
);
520 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
524 kvmppc_set_pc(vcpu
, regs
->pc
);
525 kvmppc_set_cr(vcpu
, regs
->cr
);
526 kvmppc_set_ctr(vcpu
, regs
->ctr
);
527 kvmppc_set_lr(vcpu
, regs
->lr
);
528 kvmppc_set_xer(vcpu
, regs
->xer
);
529 kvmppc_set_msr(vcpu
, regs
->msr
);
530 vcpu
->arch
.shared
->srr0
= regs
->srr0
;
531 vcpu
->arch
.shared
->srr1
= regs
->srr1
;
532 vcpu
->arch
.shared
->sprg0
= regs
->sprg0
;
533 vcpu
->arch
.shared
->sprg1
= regs
->sprg1
;
534 vcpu
->arch
.shared
->sprg2
= regs
->sprg2
;
535 vcpu
->arch
.shared
->sprg3
= regs
->sprg3
;
536 vcpu
->arch
.shared
->sprg4
= regs
->sprg4
;
537 vcpu
->arch
.shared
->sprg5
= regs
->sprg5
;
538 vcpu
->arch
.shared
->sprg6
= regs
->sprg6
;
539 vcpu
->arch
.shared
->sprg7
= regs
->sprg7
;
541 for (i
= 0; i
< ARRAY_SIZE(regs
->gpr
); i
++)
542 kvmppc_set_gpr(vcpu
, i
, regs
->gpr
[i
]);
547 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
552 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
557 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
560 union kvmppc_one_reg val
;
564 size
= one_reg_size(reg
->id
);
565 if (size
> sizeof(val
))
568 r
= vcpu
->kvm
->arch
.kvm_ops
->get_one_reg(vcpu
, reg
->id
, &val
);
572 case KVM_REG_PPC_DAR
:
573 val
= get_reg_val(reg
->id
, vcpu
->arch
.shared
->dar
);
575 case KVM_REG_PPC_DSISR
:
576 val
= get_reg_val(reg
->id
, vcpu
->arch
.shared
->dsisr
);
578 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
579 i
= reg
->id
- KVM_REG_PPC_FPR0
;
580 val
= get_reg_val(reg
->id
, VCPU_FPR(vcpu
, i
));
582 case KVM_REG_PPC_FPSCR
:
583 val
= get_reg_val(reg
->id
, vcpu
->arch
.fp
.fpscr
);
585 #ifdef CONFIG_ALTIVEC
586 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
587 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
591 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
593 case KVM_REG_PPC_VSCR
:
594 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
598 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
600 case KVM_REG_PPC_VRSAVE
:
601 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
603 #endif /* CONFIG_ALTIVEC */
605 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
606 if (cpu_has_feature(CPU_FTR_VSX
)) {
607 long int i
= reg
->id
- KVM_REG_PPC_VSR0
;
608 val
.vsxval
[0] = vcpu
->arch
.fp
.fpr
[i
][0];
609 val
.vsxval
[1] = vcpu
->arch
.fp
.fpr
[i
][1];
614 #endif /* CONFIG_VSX */
615 case KVM_REG_PPC_DEBUG_INST
: {
617 r
= copy_to_user((u32 __user
*)(long)reg
->addr
,
618 &opcode
, sizeof(u32
));
621 #ifdef CONFIG_KVM_XICS
622 case KVM_REG_PPC_ICP_STATE
:
623 if (!vcpu
->arch
.icp
) {
627 val
= get_reg_val(reg
->id
, kvmppc_xics_get_icp(vcpu
));
629 #endif /* CONFIG_KVM_XICS */
638 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
644 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
647 union kvmppc_one_reg val
;
651 size
= one_reg_size(reg
->id
);
652 if (size
> sizeof(val
))
655 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
658 r
= vcpu
->kvm
->arch
.kvm_ops
->set_one_reg(vcpu
, reg
->id
, &val
);
662 case KVM_REG_PPC_DAR
:
663 vcpu
->arch
.shared
->dar
= set_reg_val(reg
->id
, val
);
665 case KVM_REG_PPC_DSISR
:
666 vcpu
->arch
.shared
->dsisr
= set_reg_val(reg
->id
, val
);
668 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
669 i
= reg
->id
- KVM_REG_PPC_FPR0
;
670 VCPU_FPR(vcpu
, i
) = set_reg_val(reg
->id
, val
);
672 case KVM_REG_PPC_FPSCR
:
673 vcpu
->arch
.fp
.fpscr
= set_reg_val(reg
->id
, val
);
675 #ifdef CONFIG_ALTIVEC
676 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
677 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
681 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
683 case KVM_REG_PPC_VSCR
:
684 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
688 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
690 case KVM_REG_PPC_VRSAVE
:
691 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
695 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
697 #endif /* CONFIG_ALTIVEC */
699 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
700 if (cpu_has_feature(CPU_FTR_VSX
)) {
701 long int i
= reg
->id
- KVM_REG_PPC_VSR0
;
702 vcpu
->arch
.fp
.fpr
[i
][0] = val
.vsxval
[0];
703 vcpu
->arch
.fp
.fpr
[i
][1] = val
.vsxval
[1];
708 #endif /* CONFIG_VSX */
709 #ifdef CONFIG_KVM_XICS
710 case KVM_REG_PPC_ICP_STATE
:
711 if (!vcpu
->arch
.icp
) {
715 r
= kvmppc_xics_set_icp(vcpu
,
716 set_reg_val(reg
->id
, val
));
718 #endif /* CONFIG_KVM_XICS */
728 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
730 vcpu
->kvm
->arch
.kvm_ops
->vcpu_load(vcpu
, cpu
);
733 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
735 vcpu
->kvm
->arch
.kvm_ops
->vcpu_put(vcpu
);
738 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
740 vcpu
->kvm
->arch
.kvm_ops
->set_msr(vcpu
, msr
);
742 EXPORT_SYMBOL_GPL(kvmppc_set_msr
);
744 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
746 return vcpu
->kvm
->arch
.kvm_ops
->vcpu_run(kvm_run
, vcpu
);
749 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
750 struct kvm_translation
*tr
)
755 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
756 struct kvm_guest_debug
*dbg
)
761 void kvmppc_decrementer_func(unsigned long data
)
763 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
765 kvmppc_core_queue_dec(vcpu
);
769 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
771 return kvm
->arch
.kvm_ops
->vcpu_create(kvm
, id
);
774 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
776 vcpu
->kvm
->arch
.kvm_ops
->vcpu_free(vcpu
);
779 int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
781 return vcpu
->kvm
->arch
.kvm_ops
->check_requests(vcpu
);
784 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
786 return kvm
->arch
.kvm_ops
->get_dirty_log(kvm
, log
);
789 void kvmppc_core_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
790 struct kvm_memory_slot
*dont
)
792 kvm
->arch
.kvm_ops
->free_memslot(free
, dont
);
795 int kvmppc_core_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
796 unsigned long npages
)
798 return kvm
->arch
.kvm_ops
->create_memslot(slot
, npages
);
801 void kvmppc_core_flush_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
)
803 kvm
->arch
.kvm_ops
->flush_memslot(kvm
, memslot
);
806 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
807 struct kvm_memory_slot
*memslot
,
808 struct kvm_userspace_memory_region
*mem
)
810 return kvm
->arch
.kvm_ops
->prepare_memory_region(kvm
, memslot
, mem
);
813 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
814 struct kvm_userspace_memory_region
*mem
,
815 const struct kvm_memory_slot
*old
)
817 kvm
->arch
.kvm_ops
->commit_memory_region(kvm
, mem
, old
);
820 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
822 return kvm
->arch
.kvm_ops
->unmap_hva(kvm
, hva
);
824 EXPORT_SYMBOL_GPL(kvm_unmap_hva
);
826 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
828 return kvm
->arch
.kvm_ops
->unmap_hva_range(kvm
, start
, end
);
831 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
833 return kvm
->arch
.kvm_ops
->age_hva(kvm
, hva
);
836 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
838 return kvm
->arch
.kvm_ops
->test_age_hva(kvm
, hva
);
841 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
843 kvm
->arch
.kvm_ops
->set_spte_hva(kvm
, hva
, pte
);
846 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
848 vcpu
->kvm
->arch
.kvm_ops
->mmu_destroy(vcpu
);
851 int kvmppc_core_init_vm(struct kvm
*kvm
)
855 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
856 INIT_LIST_HEAD(&kvm
->arch
.rtas_tokens
);
859 return kvm
->arch
.kvm_ops
->init_vm(kvm
);
862 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
864 kvm
->arch
.kvm_ops
->destroy_vm(kvm
);
867 kvmppc_rtas_tokens_free(kvm
);
868 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
872 int kvmppc_core_check_processor_compat(void)
875 * We always return 0 for book3s. We check
876 * for compatability while loading the HV
882 static int kvmppc_book3s_init(void)
886 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
889 #ifdef CONFIG_KVM_BOOK3S_32
890 r
= kvmppc_book3s_init_pr();
896 static void kvmppc_book3s_exit(void)
898 #ifdef CONFIG_KVM_BOOK3S_32
899 kvmppc_book3s_exit_pr();
904 module_init(kvmppc_book3s_init
);
905 module_exit(kvmppc_book3s_exit
);
907 /* On 32bit this is our one and only kernel module */
908 #ifdef CONFIG_KVM_BOOK3S_32
909 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
910 MODULE_ALIAS("devname:kvm");