2 * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Varun Sethi, <varun.sethi@freescale.com>
7 * This file is derived from arch/powerpc/kvm/e500.c,
8 * by Yu Liu <yu.liu@freescale.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/kvm_host.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/export.h>
19 #include <linux/miscdevice.h>
20 #include <linux/module.h>
23 #include <asm/cputable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/dbell.h>
31 void kvmppc_set_pending_interrupt(struct kvm_vcpu
*vcpu
, enum int_class type
)
33 enum ppc_dbell dbell_type
;
37 case INT_CLASS_NONCRIT
:
38 dbell_type
= PPC_G_DBELL
;
41 dbell_type
= PPC_G_DBELL_CRIT
;
44 dbell_type
= PPC_G_DBELL_MC
;
47 WARN_ONCE(1, "%s: unknown int type %d\n", __func__
, type
);
52 tag
= PPC_DBELL_LPID(vcpu
->kvm
->arch
.lpid
) | vcpu
->vcpu_id
;
54 ppc_msgsnd(dbell_type
, 0, tag
);
57 /* gtlbe must not be mapped by more than one host tlb entry */
58 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
59 struct kvm_book3e_206_tlb_entry
*gtlbe
)
66 ts
= get_tlb_ts(gtlbe
);
67 tid
= get_tlb_tid(gtlbe
);
68 lpid
= vcpu_e500
->vcpu
.kvm
->arch
.lpid
;
70 /* We search the host TLB to invalidate its shadow TLB entry */
71 val
= (tid
<< 16) | ts
;
72 eaddr
= get_tlb_eaddr(gtlbe
);
74 local_irq_save(flags
);
76 mtspr(SPRN_MAS6
, val
);
77 mtspr(SPRN_MAS5
, MAS5_SGS
| lpid
);
79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr
] "r" (eaddr
));
80 val
= mfspr(SPRN_MAS1
);
81 if (val
& MAS1_VALID
) {
82 mtspr(SPRN_MAS1
, val
& ~MAS1_VALID
);
83 asm volatile("tlbwe");
86 /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
90 local_irq_restore(flags
);
93 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
97 local_irq_save(flags
);
98 mtspr(SPRN_MAS5
, MAS5_SGS
| vcpu_e500
->vcpu
.kvm
->arch
.lpid
);
99 asm volatile("tlbilxlpid");
101 local_irq_restore(flags
);
104 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
)
106 vcpu
->arch
.pid
= pid
;
109 void kvmppc_mmu_msr_notify(struct kvm_vcpu
*vcpu
, u32 old_msr
)
113 static DEFINE_PER_CPU(struct kvm_vcpu
*[KVMPPC_NR_LPIDS
], last_vcpu_of_lpid
);
115 static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu
*vcpu
, int cpu
)
117 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
119 kvmppc_booke_vcpu_load(vcpu
, cpu
);
121 mtspr(SPRN_LPID
, vcpu
->kvm
->arch
.lpid
);
122 mtspr(SPRN_EPCR
, vcpu
->arch
.shadow_epcr
);
123 mtspr(SPRN_GPIR
, vcpu
->vcpu_id
);
124 mtspr(SPRN_MSRP
, vcpu
->arch
.shadow_msrp
);
125 mtspr(SPRN_EPLC
, vcpu
->arch
.eplc
);
126 mtspr(SPRN_EPSC
, vcpu
->arch
.epsc
);
128 mtspr(SPRN_GIVPR
, vcpu
->arch
.ivpr
);
129 mtspr(SPRN_GIVOR2
, vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
]);
130 mtspr(SPRN_GIVOR8
, vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
]);
131 mtspr(SPRN_GSPRG0
, (unsigned long)vcpu
->arch
.shared
->sprg0
);
132 mtspr(SPRN_GSPRG1
, (unsigned long)vcpu
->arch
.shared
->sprg1
);
133 mtspr(SPRN_GSPRG2
, (unsigned long)vcpu
->arch
.shared
->sprg2
);
134 mtspr(SPRN_GSPRG3
, (unsigned long)vcpu
->arch
.shared
->sprg3
);
136 mtspr(SPRN_GSRR0
, vcpu
->arch
.shared
->srr0
);
137 mtspr(SPRN_GSRR1
, vcpu
->arch
.shared
->srr1
);
139 mtspr(SPRN_GEPR
, vcpu
->arch
.epr
);
140 mtspr(SPRN_GDEAR
, vcpu
->arch
.shared
->dar
);
141 mtspr(SPRN_GESR
, vcpu
->arch
.shared
->esr
);
143 if (vcpu
->arch
.oldpir
!= mfspr(SPRN_PIR
) ||
144 __get_cpu_var(last_vcpu_of_lpid
)[vcpu
->kvm
->arch
.lpid
] != vcpu
) {
145 kvmppc_e500_tlbil_all(vcpu_e500
);
146 __get_cpu_var(last_vcpu_of_lpid
)[vcpu
->kvm
->arch
.lpid
] = vcpu
;
149 kvmppc_load_guest_fp(vcpu
);
152 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu
*vcpu
)
154 vcpu
->arch
.eplc
= mfspr(SPRN_EPLC
);
155 vcpu
->arch
.epsc
= mfspr(SPRN_EPSC
);
157 vcpu
->arch
.shared
->sprg0
= mfspr(SPRN_GSPRG0
);
158 vcpu
->arch
.shared
->sprg1
= mfspr(SPRN_GSPRG1
);
159 vcpu
->arch
.shared
->sprg2
= mfspr(SPRN_GSPRG2
);
160 vcpu
->arch
.shared
->sprg3
= mfspr(SPRN_GSPRG3
);
162 vcpu
->arch
.shared
->srr0
= mfspr(SPRN_GSRR0
);
163 vcpu
->arch
.shared
->srr1
= mfspr(SPRN_GSRR1
);
165 vcpu
->arch
.epr
= mfspr(SPRN_GEPR
);
166 vcpu
->arch
.shared
->dar
= mfspr(SPRN_GDEAR
);
167 vcpu
->arch
.shared
->esr
= mfspr(SPRN_GESR
);
169 vcpu
->arch
.oldpir
= mfspr(SPRN_PIR
);
171 kvmppc_booke_vcpu_put(vcpu
);
174 int kvmppc_core_check_processor_compat(void)
178 if (strcmp(cur_cpu_spec
->cpu_name
, "e500mc") == 0)
180 else if (strcmp(cur_cpu_spec
->cpu_name
, "e5500") == 0)
188 int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
)
190 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
192 vcpu
->arch
.shadow_epcr
= SPRN_EPCR_DSIGS
| SPRN_EPCR_DGTMI
| \
195 vcpu
->arch
.shadow_epcr
|= SPRN_EPCR_ICM
;
197 vcpu
->arch
.shadow_msrp
= MSRP_UCLEP
| MSRP_DEP
| MSRP_PMMP
;
198 vcpu
->arch
.eplc
= EPC_EGS
| (vcpu
->kvm
->arch
.lpid
<< EPC_ELPID_SHIFT
);
199 vcpu
->arch
.epsc
= vcpu
->arch
.eplc
;
201 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
202 vcpu_e500
->svr
= mfspr(SPRN_SVR
);
204 vcpu
->arch
.cpu_type
= KVM_CPU_E500MC
;
209 static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu
*vcpu
,
210 struct kvm_sregs
*sregs
)
212 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
214 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206_MMU
| KVM_SREGS_E_PM
|
216 sregs
->u
.e
.impl_id
= KVM_SREGS_E_IMPL_FSL
;
218 sregs
->u
.e
.impl
.fsl
.features
= 0;
219 sregs
->u
.e
.impl
.fsl
.svr
= vcpu_e500
->svr
;
220 sregs
->u
.e
.impl
.fsl
.hid0
= vcpu_e500
->hid0
;
221 sregs
->u
.e
.impl
.fsl
.mcar
= vcpu_e500
->mcar
;
223 kvmppc_get_sregs_e500_tlb(vcpu
, sregs
);
225 sregs
->u
.e
.ivor_high
[3] =
226 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
227 sregs
->u
.e
.ivor_high
[4] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
];
228 sregs
->u
.e
.ivor_high
[5] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
];
230 return kvmppc_get_sregs_ivor(vcpu
, sregs
);
233 static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu
*vcpu
,
234 struct kvm_sregs
*sregs
)
236 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
239 if (sregs
->u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
240 vcpu_e500
->svr
= sregs
->u
.e
.impl
.fsl
.svr
;
241 vcpu_e500
->hid0
= sregs
->u
.e
.impl
.fsl
.hid0
;
242 vcpu_e500
->mcar
= sregs
->u
.e
.impl
.fsl
.mcar
;
245 ret
= kvmppc_set_sregs_e500_tlb(vcpu
, sregs
);
249 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
252 if (sregs
->u
.e
.features
& KVM_SREGS_E_PM
) {
253 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] =
254 sregs
->u
.e
.ivor_high
[3];
257 if (sregs
->u
.e
.features
& KVM_SREGS_E_PC
) {
258 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
] =
259 sregs
->u
.e
.ivor_high
[4];
260 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
] =
261 sregs
->u
.e
.ivor_high
[5];
264 return kvmppc_set_sregs_ivor(vcpu
, sregs
);
267 static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu
*vcpu
, u64 id
,
268 union kvmppc_one_reg
*val
)
273 case KVM_REG_PPC_SPRG9
:
274 *val
= get_reg_val(id
, vcpu
->arch
.sprg9
);
277 r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
283 static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu
*vcpu
, u64 id
,
284 union kvmppc_one_reg
*val
)
289 case KVM_REG_PPC_SPRG9
:
290 vcpu
->arch
.sprg9
= set_reg_val(id
, *val
);
293 r
= kvmppc_set_one_reg_e500_tlb(vcpu
, id
, val
);
299 static struct kvm_vcpu
*kvmppc_core_vcpu_create_e500mc(struct kvm
*kvm
,
302 struct kvmppc_vcpu_e500
*vcpu_e500
;
303 struct kvm_vcpu
*vcpu
;
306 vcpu_e500
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
311 vcpu
= &vcpu_e500
->vcpu
;
313 /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
314 vcpu
->arch
.oldpir
= 0xffffffff;
316 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
320 err
= kvmppc_e500_tlb_init(vcpu_e500
);
324 vcpu
->arch
.shared
= (void *)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
325 if (!vcpu
->arch
.shared
)
331 kvmppc_e500_tlb_uninit(vcpu_e500
);
333 kvm_vcpu_uninit(vcpu
);
336 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
341 static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu
*vcpu
)
343 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
345 free_page((unsigned long)vcpu
->arch
.shared
);
346 kvmppc_e500_tlb_uninit(vcpu_e500
);
347 kvm_vcpu_uninit(vcpu
);
348 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
351 static int kvmppc_core_init_vm_e500mc(struct kvm
*kvm
)
355 lpid
= kvmppc_alloc_lpid();
359 kvm
->arch
.lpid
= lpid
;
363 static void kvmppc_core_destroy_vm_e500mc(struct kvm
*kvm
)
365 kvmppc_free_lpid(kvm
->arch
.lpid
);
368 static struct kvmppc_ops kvm_ops_e500mc
= {
369 .get_sregs
= kvmppc_core_get_sregs_e500mc
,
370 .set_sregs
= kvmppc_core_set_sregs_e500mc
,
371 .get_one_reg
= kvmppc_get_one_reg_e500mc
,
372 .set_one_reg
= kvmppc_set_one_reg_e500mc
,
373 .vcpu_load
= kvmppc_core_vcpu_load_e500mc
,
374 .vcpu_put
= kvmppc_core_vcpu_put_e500mc
,
375 .vcpu_create
= kvmppc_core_vcpu_create_e500mc
,
376 .vcpu_free
= kvmppc_core_vcpu_free_e500mc
,
377 .mmu_destroy
= kvmppc_mmu_destroy_e500
,
378 .init_vm
= kvmppc_core_init_vm_e500mc
,
379 .destroy_vm
= kvmppc_core_destroy_vm_e500mc
,
380 .emulate_op
= kvmppc_core_emulate_op_e500
,
381 .emulate_mtspr
= kvmppc_core_emulate_mtspr_e500
,
382 .emulate_mfspr
= kvmppc_core_emulate_mfspr_e500
,
385 static int __init
kvmppc_e500mc_init(void)
389 r
= kvmppc_booke_init();
393 kvmppc_init_lpid(64);
394 kvmppc_claim_lpid(0); /* host */
396 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_e500
), 0, THIS_MODULE
);
399 kvm_ops_e500mc
.owner
= THIS_MODULE
;
400 kvmppc_pr_ops
= &kvm_ops_e500mc
;
406 static void __exit
kvmppc_e500mc_exit(void)
408 kvmppc_pr_ops
= NULL
;
412 module_init(kvmppc_e500mc_init
);
413 module_exit(kvmppc_e500mc_exit
);
414 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
415 MODULE_ALIAS("devname:kvm");