1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
5 * Author: Varun Sethi, <varun.sethi@freescale.com>
8 * This file is derived from arch/powerpc/kvm/e500.c,
9 * by Yu Liu <yu.liu@freescale.com>.
12 #include <linux/kvm_host.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
20 #include <asm/cputable.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/dbell.h>
27 void kvmppc_set_pending_interrupt(struct kvm_vcpu
*vcpu
, enum int_class type
)
29 enum ppc_dbell dbell_type
;
33 case INT_CLASS_NONCRIT
:
34 dbell_type
= PPC_G_DBELL
;
37 dbell_type
= PPC_G_DBELL_CRIT
;
40 dbell_type
= PPC_G_DBELL_MC
;
43 WARN_ONCE(1, "%s: unknown int type %d\n", __func__
, type
);
48 tag
= PPC_DBELL_LPID(get_lpid(vcpu
)) | vcpu
->vcpu_id
;
50 ppc_msgsnd(dbell_type
, 0, tag
);
54 /* gtlbe must not be mapped by more than one host tlb entry */
55 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
56 struct kvm_book3e_206_tlb_entry
*gtlbe
)
63 ts
= get_tlb_ts(gtlbe
);
64 tid
= get_tlb_tid(gtlbe
);
66 /* We search the host TLB to invalidate its shadow TLB entry */
67 val
= (tid
<< 16) | ts
;
68 eaddr
= get_tlb_eaddr(gtlbe
);
70 local_irq_save(flags
);
72 mtspr(SPRN_MAS6
, val
);
73 mtspr(SPRN_MAS5
, MAS5_SGS
| get_lpid(&vcpu_e500
->vcpu
));
75 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr
] "r" (eaddr
));
76 val
= mfspr(SPRN_MAS1
);
77 if (val
& MAS1_VALID
) {
78 mtspr(SPRN_MAS1
, val
& ~MAS1_VALID
);
79 asm volatile("tlbwe");
82 /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
86 local_irq_restore(flags
);
89 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
93 local_irq_save(flags
);
94 mtspr(SPRN_MAS5
, MAS5_SGS
| get_lpid(&vcpu_e500
->vcpu
));
95 asm volatile("tlbilxlpid");
97 local_irq_restore(flags
);
100 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
)
102 vcpu
->arch
.pid
= pid
;
105 void kvmppc_mmu_msr_notify(struct kvm_vcpu
*vcpu
, u32 old_msr
)
109 /* We use two lpids per VM */
110 static DEFINE_PER_CPU(struct kvm_vcpu
*[KVMPPC_NR_LPIDS
], last_vcpu_of_lpid
);
112 static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu
*vcpu
, int cpu
)
114 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
116 kvmppc_booke_vcpu_load(vcpu
, cpu
);
118 mtspr(SPRN_LPID
, get_lpid(vcpu
));
119 mtspr(SPRN_EPCR
, vcpu
->arch
.shadow_epcr
);
120 mtspr(SPRN_GPIR
, vcpu
->vcpu_id
);
121 mtspr(SPRN_MSRP
, vcpu
->arch
.shadow_msrp
);
122 vcpu
->arch
.eplc
= EPC_EGS
| (get_lpid(vcpu
) << EPC_ELPID_SHIFT
);
123 vcpu
->arch
.epsc
= vcpu
->arch
.eplc
;
124 mtspr(SPRN_EPLC
, vcpu
->arch
.eplc
);
125 mtspr(SPRN_EPSC
, vcpu
->arch
.epsc
);
127 mtspr(SPRN_GIVPR
, vcpu
->arch
.ivpr
);
128 mtspr(SPRN_GIVOR2
, vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DATA_STORAGE
]);
129 mtspr(SPRN_GIVOR8
, vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SYSCALL
]);
130 mtspr(SPRN_GSPRG0
, (unsigned long)vcpu
->arch
.shared
->sprg0
);
131 mtspr(SPRN_GSPRG1
, (unsigned long)vcpu
->arch
.shared
->sprg1
);
132 mtspr(SPRN_GSPRG2
, (unsigned long)vcpu
->arch
.shared
->sprg2
);
133 mtspr(SPRN_GSPRG3
, (unsigned long)vcpu
->arch
.shared
->sprg3
);
135 mtspr(SPRN_GSRR0
, vcpu
->arch
.shared
->srr0
);
136 mtspr(SPRN_GSRR1
, vcpu
->arch
.shared
->srr1
);
138 mtspr(SPRN_GEPR
, vcpu
->arch
.epr
);
139 mtspr(SPRN_GDEAR
, vcpu
->arch
.shared
->dar
);
140 mtspr(SPRN_GESR
, vcpu
->arch
.shared
->esr
);
142 if (vcpu
->arch
.oldpir
!= mfspr(SPRN_PIR
) ||
143 __this_cpu_read(last_vcpu_of_lpid
[get_lpid(vcpu
)]) != vcpu
) {
144 kvmppc_e500_tlbil_all(vcpu_e500
);
145 __this_cpu_write(last_vcpu_of_lpid
[get_lpid(vcpu
)], vcpu
);
149 static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu
*vcpu
)
151 vcpu
->arch
.eplc
= mfspr(SPRN_EPLC
);
152 vcpu
->arch
.epsc
= mfspr(SPRN_EPSC
);
154 vcpu
->arch
.shared
->sprg0
= mfspr(SPRN_GSPRG0
);
155 vcpu
->arch
.shared
->sprg1
= mfspr(SPRN_GSPRG1
);
156 vcpu
->arch
.shared
->sprg2
= mfspr(SPRN_GSPRG2
);
157 vcpu
->arch
.shared
->sprg3
= mfspr(SPRN_GSPRG3
);
159 vcpu
->arch
.shared
->srr0
= mfspr(SPRN_GSRR0
);
160 vcpu
->arch
.shared
->srr1
= mfspr(SPRN_GSRR1
);
162 vcpu
->arch
.epr
= mfspr(SPRN_GEPR
);
163 vcpu
->arch
.shared
->dar
= mfspr(SPRN_GDEAR
);
164 vcpu
->arch
.shared
->esr
= mfspr(SPRN_GESR
);
166 vcpu
->arch
.oldpir
= mfspr(SPRN_PIR
);
168 kvmppc_booke_vcpu_put(vcpu
);
171 int kvmppc_core_check_processor_compat(void)
175 if (strcmp(cur_cpu_spec
->cpu_name
, "e500mc") == 0)
177 else if (strcmp(cur_cpu_spec
->cpu_name
, "e5500") == 0)
179 #ifdef CONFIG_ALTIVEC
181 * Since guests have the privilege to enable AltiVec, we need AltiVec
182 * support in the host to save/restore their context.
183 * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
184 * because it's cleared in the absence of CONFIG_ALTIVEC!
186 else if (strcmp(cur_cpu_spec
->cpu_name
, "e6500") == 0)
195 int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
)
197 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
199 vcpu
->arch
.shadow_epcr
= SPRN_EPCR_DSIGS
| SPRN_EPCR_DGTMI
| \
202 vcpu
->arch
.shadow_epcr
|= SPRN_EPCR_ICM
;
204 vcpu
->arch
.shadow_msrp
= MSRP_UCLEP
| MSRP_PMMP
;
206 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
207 vcpu_e500
->svr
= mfspr(SPRN_SVR
);
209 vcpu
->arch
.cpu_type
= KVM_CPU_E500MC
;
214 static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu
*vcpu
,
215 struct kvm_sregs
*sregs
)
217 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
219 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206_MMU
| KVM_SREGS_E_PM
|
221 sregs
->u
.e
.impl_id
= KVM_SREGS_E_IMPL_FSL
;
223 sregs
->u
.e
.impl
.fsl
.features
= 0;
224 sregs
->u
.e
.impl
.fsl
.svr
= vcpu_e500
->svr
;
225 sregs
->u
.e
.impl
.fsl
.hid0
= vcpu_e500
->hid0
;
226 sregs
->u
.e
.impl
.fsl
.mcar
= vcpu_e500
->mcar
;
228 kvmppc_get_sregs_e500_tlb(vcpu
, sregs
);
230 sregs
->u
.e
.ivor_high
[3] =
231 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
232 sregs
->u
.e
.ivor_high
[4] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
];
233 sregs
->u
.e
.ivor_high
[5] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
];
235 return kvmppc_get_sregs_ivor(vcpu
, sregs
);
238 static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu
*vcpu
,
239 struct kvm_sregs
*sregs
)
241 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
244 if (sregs
->u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
245 vcpu_e500
->svr
= sregs
->u
.e
.impl
.fsl
.svr
;
246 vcpu_e500
->hid0
= sregs
->u
.e
.impl
.fsl
.hid0
;
247 vcpu_e500
->mcar
= sregs
->u
.e
.impl
.fsl
.mcar
;
250 ret
= kvmppc_set_sregs_e500_tlb(vcpu
, sregs
);
254 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
257 if (sregs
->u
.e
.features
& KVM_SREGS_E_PM
) {
258 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] =
259 sregs
->u
.e
.ivor_high
[3];
262 if (sregs
->u
.e
.features
& KVM_SREGS_E_PC
) {
263 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL
] =
264 sregs
->u
.e
.ivor_high
[4];
265 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_DBELL_CRIT
] =
266 sregs
->u
.e
.ivor_high
[5];
269 return kvmppc_set_sregs_ivor(vcpu
, sregs
);
272 static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu
*vcpu
, u64 id
,
273 union kvmppc_one_reg
*val
)
278 case KVM_REG_PPC_SPRG9
:
279 *val
= get_reg_val(id
, vcpu
->arch
.sprg9
);
282 r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
288 static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu
*vcpu
, u64 id
,
289 union kvmppc_one_reg
*val
)
294 case KVM_REG_PPC_SPRG9
:
295 vcpu
->arch
.sprg9
= set_reg_val(id
, *val
);
298 r
= kvmppc_set_one_reg_e500_tlb(vcpu
, id
, val
);
304 static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu
*vcpu
)
306 struct kvmppc_vcpu_e500
*vcpu_e500
;
309 BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500
, vcpu
) != 0);
310 vcpu_e500
= to_e500(vcpu
);
312 /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
313 vcpu
->arch
.oldpir
= 0xffffffff;
315 err
= kvmppc_e500_tlb_init(vcpu_e500
);
319 vcpu
->arch
.shared
= (void *)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
320 if (!vcpu
->arch
.shared
) {
328 kvmppc_e500_tlb_uninit(vcpu_e500
);
332 static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu
*vcpu
)
334 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
336 free_page((unsigned long)vcpu
->arch
.shared
);
337 kvmppc_e500_tlb_uninit(vcpu_e500
);
340 static int kvmppc_core_init_vm_e500mc(struct kvm
*kvm
)
344 lpid
= kvmppc_alloc_lpid();
349 * Use two lpids per VM on cores with two threads like e6500. Use
350 * even numbers to speedup vcpu lpid computation with consecutive lpids
351 * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
353 if (threads_per_core
== 2)
356 kvm
->arch
.lpid
= lpid
;
360 static void kvmppc_core_destroy_vm_e500mc(struct kvm
*kvm
)
362 int lpid
= kvm
->arch
.lpid
;
364 if (threads_per_core
== 2)
367 kvmppc_free_lpid(lpid
);
370 static struct kvmppc_ops kvm_ops_e500mc
= {
371 .get_sregs
= kvmppc_core_get_sregs_e500mc
,
372 .set_sregs
= kvmppc_core_set_sregs_e500mc
,
373 .get_one_reg
= kvmppc_get_one_reg_e500mc
,
374 .set_one_reg
= kvmppc_set_one_reg_e500mc
,
375 .vcpu_load
= kvmppc_core_vcpu_load_e500mc
,
376 .vcpu_put
= kvmppc_core_vcpu_put_e500mc
,
377 .vcpu_create
= kvmppc_core_vcpu_create_e500mc
,
378 .vcpu_free
= kvmppc_core_vcpu_free_e500mc
,
379 .init_vm
= kvmppc_core_init_vm_e500mc
,
380 .destroy_vm
= kvmppc_core_destroy_vm_e500mc
,
381 .emulate_op
= kvmppc_core_emulate_op_e500
,
382 .emulate_mtspr
= kvmppc_core_emulate_mtspr_e500
,
383 .emulate_mfspr
= kvmppc_core_emulate_mfspr_e500
,
386 static int __init
kvmppc_e500mc_init(void)
390 r
= kvmppc_booke_init();
395 * Use two lpids per VM on dual threaded processors like e6500
396 * to workarround the lack of tlb write conditional instruction.
397 * Expose half the number of available hardware lpids to the lpid
400 kvmppc_init_lpid(KVMPPC_NR_LPIDS
/threads_per_core
);
401 kvmppc_claim_lpid(0); /* host */
403 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_e500
), 0, THIS_MODULE
);
406 kvm_ops_e500mc
.owner
= THIS_MODULE
;
407 kvmppc_pr_ops
= &kvm_ops_e500mc
;
413 static void __exit
kvmppc_e500mc_exit(void)
415 kvmppc_pr_ops
= NULL
;
419 module_init(kvmppc_e500mc_init
);
420 module_exit(kvmppc_e500mc_exit
);
421 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
422 MODULE_ALIAS("devname:kvm");