1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
5 * Author: Yu Liu, <yu.liu@freescale.com>
8 * This file is derived from arch/powerpc/kvm/44x.c,
9 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 #include <linux/kvm_host.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/miscdevice.h>
20 #include <asm/cputable.h>
21 #include <asm/kvm_ppc.h>
23 #include "../mm/mmu_decl.h"
35 * This table provide mappings from:
36 * (guestAS,guestTID,guestPR) --> ID of physical cpu
41 * Each vcpu keeps one vcpu_id_table.
43 struct vcpu_id_table
{
44 struct id id
[2][NUM_TIDS
][2];
48 * This table provide reversed mappings of vcpu_id_table:
49 * ID --> address of vcpu_id_table item.
50 * Each physical core has one pcpu_id_table.
52 struct pcpu_id_table
{
53 struct id
*entry
[NUM_TIDS
];
56 static DEFINE_PER_CPU(struct pcpu_id_table
, pcpu_sids
);
58 /* This variable keeps last used shadow ID on local core.
59 * The valid range of shadow ID is [1..255] */
60 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid
);
63 * Allocate a free shadow id and setup a valid sid mapping in given entry.
64 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
66 * The caller must have preemption disabled, and keep it that way until
67 * it has finished with the returned shadow id (either written into the
68 * TLB or arch.shadow_pid, or discarded).
70 static inline int local_sid_setup_one(struct id
*entry
)
75 sid
= __this_cpu_inc_return(pcpu_last_used_sid
);
77 __this_cpu_write(pcpu_sids
.entry
[sid
], entry
);
79 entry
->pentry
= this_cpu_ptr(&pcpu_sids
.entry
[sid
]);
84 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
85 * the caller will invalidate everything and start over.
87 * sid > NUM_TIDS indicates a race, which we disable preemption to
90 WARN_ON(sid
> NUM_TIDS
);
96 * Check if given entry contain a valid shadow id mapping.
97 * An ID mapping is considered valid only if
98 * both vcpu and pcpu know this mapping.
100 * The caller must have preemption disabled, and keep it that way until
101 * it has finished with the returned shadow id (either written into the
102 * TLB or arch.shadow_pid, or discarded).
104 static inline int local_sid_lookup(struct id
*entry
)
106 if (entry
&& entry
->val
!= 0 &&
107 __this_cpu_read(pcpu_sids
.entry
[entry
->val
]) == entry
&&
108 entry
->pentry
== this_cpu_ptr(&pcpu_sids
.entry
[entry
->val
]))
113 /* Invalidate all id mappings on local core -- call with preempt disabled */
114 static inline void local_sid_destroy_all(void)
116 __this_cpu_write(pcpu_last_used_sid
, 0);
117 memset(this_cpu_ptr(&pcpu_sids
), 0, sizeof(pcpu_sids
));
120 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500
*vcpu_e500
)
122 vcpu_e500
->idt
= kzalloc(sizeof(struct vcpu_id_table
), GFP_KERNEL
);
123 return vcpu_e500
->idt
;
126 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500
*vcpu_e500
)
128 kfree(vcpu_e500
->idt
);
129 vcpu_e500
->idt
= NULL
;
132 /* Map guest pid to shadow.
133 * We use PID to keep shadow of current guest non-zero PID,
134 * and use PID1 to keep shadow of guest zero PID.
135 * So that guest tlbe with TID=0 can be accessed at any time */
136 static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500
*vcpu_e500
)
139 vcpu_e500
->vcpu
.arch
.shadow_pid
= kvmppc_e500_get_sid(vcpu_e500
,
140 get_cur_as(&vcpu_e500
->vcpu
),
141 get_cur_pid(&vcpu_e500
->vcpu
),
142 get_cur_pr(&vcpu_e500
->vcpu
), 1);
143 vcpu_e500
->vcpu
.arch
.shadow_pid1
= kvmppc_e500_get_sid(vcpu_e500
,
144 get_cur_as(&vcpu_e500
->vcpu
), 0,
145 get_cur_pr(&vcpu_e500
->vcpu
), 1);
149 /* Invalidate all mappings on vcpu */
150 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
152 memset(vcpu_e500
->idt
, 0, sizeof(struct vcpu_id_table
));
154 /* Update shadow pid when mappings are changed */
155 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
158 /* Invalidate one ID mapping on vcpu */
159 static inline void kvmppc_e500_id_table_reset_one(
160 struct kvmppc_vcpu_e500
*vcpu_e500
,
161 int as
, int pid
, int pr
)
163 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
166 BUG_ON(pid
>= NUM_TIDS
);
169 idt
->id
[as
][pid
][pr
].val
= 0;
170 idt
->id
[as
][pid
][pr
].pentry
= NULL
;
172 /* Update shadow pid when mappings are changed */
173 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
177 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
178 * This function first lookup if a valid mapping exists,
179 * if not, then creates a new one.
181 * The caller must have preemption disabled, and keep it that way until
182 * it has finished with the returned shadow id (either written into the
183 * TLB or arch.shadow_pid, or discarded).
185 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500
*vcpu_e500
,
186 unsigned int as
, unsigned int gid
,
187 unsigned int pr
, int avoid_recursion
)
189 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
193 BUG_ON(gid
>= NUM_TIDS
);
196 sid
= local_sid_lookup(&idt
->id
[as
][gid
][pr
]);
200 sid
= local_sid_setup_one(&idt
->id
[as
][gid
][pr
]);
203 local_sid_destroy_all();
206 /* Update shadow pid when mappings are changed */
207 if (!avoid_recursion
)
208 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
214 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu
*vcpu
,
215 struct kvm_book3e_206_tlb_entry
*gtlbe
)
217 return kvmppc_e500_get_sid(to_e500(vcpu
), get_tlb_ts(gtlbe
),
218 get_tlb_tid(gtlbe
), get_cur_pr(vcpu
), 0);
221 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
)
223 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
225 if (vcpu
->arch
.pid
!= pid
) {
226 vcpu_e500
->pid
[0] = vcpu
->arch
.pid
= pid
;
227 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
231 /* gtlbe must not be mapped by more than one host tlbe */
232 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
233 struct kvm_book3e_206_tlb_entry
*gtlbe
)
235 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
236 unsigned int pr
, tid
, ts
;
241 ts
= get_tlb_ts(gtlbe
);
242 tid
= get_tlb_tid(gtlbe
);
246 /* One guest ID may be mapped to two shadow IDs */
247 for (pr
= 0; pr
< 2; pr
++) {
249 * The shadow PID can have a valid mapping on at most one
250 * host CPU. In the common case, it will be valid on this
251 * CPU, in which case we do a local invalidation of the
254 * If the shadow PID is not valid on the current host CPU,
255 * we invalidate the entire shadow PID.
257 pid
= local_sid_lookup(&idt
->id
[ts
][tid
][pr
]);
259 kvmppc_e500_id_table_reset_one(vcpu_e500
, ts
, tid
, pr
);
264 * The guest is invalidating a 4K entry which is in a PID
265 * that has a valid shadow mapping on this host CPU. We
266 * search host TLB to invalidate it's shadow TLB entry,
267 * similar to __tlbil_va except that we need to look in AS1.
269 val
= (pid
<< MAS6_SPID_SHIFT
) | MAS6_SAS
;
270 eaddr
= get_tlb_eaddr(gtlbe
);
272 local_irq_save(flags
);
274 mtspr(SPRN_MAS6
, val
);
275 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr
] "r" (eaddr
));
276 val
= mfspr(SPRN_MAS1
);
277 if (val
& MAS1_VALID
) {
278 mtspr(SPRN_MAS1
, val
& ~MAS1_VALID
);
279 asm volatile("tlbwe");
282 local_irq_restore(flags
);
288 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
290 kvmppc_e500_id_table_reset_all(vcpu_e500
);
293 void kvmppc_mmu_msr_notify(struct kvm_vcpu
*vcpu
, u32 old_msr
)
295 /* Recalc shadow pid since MSR changes */
296 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
299 static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu
*vcpu
, int cpu
)
301 kvmppc_booke_vcpu_load(vcpu
, cpu
);
303 /* Shadow PID may be expired on local core */
304 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
307 static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu
*vcpu
)
310 if (vcpu
->arch
.shadow_msr
& MSR_SPE
)
311 kvmppc_vcpu_disable_spe(vcpu
);
314 kvmppc_booke_vcpu_put(vcpu
);
317 int kvmppc_core_check_processor_compat(void)
321 if (strcmp(cur_cpu_spec
->cpu_name
, "e500v2") == 0)
329 static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
331 struct kvm_book3e_206_tlb_entry
*tlbe
;
333 /* Insert large initial mapping for guest. */
334 tlbe
= get_entry(vcpu_e500
, 1, 0);
335 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_256M
);
337 tlbe
->mas7_3
= E500_TLB_SUPER_PERM_MASK
;
339 /* 4K map for serial output. Used by kernel wrapper. */
340 tlbe
= get_entry(vcpu_e500
, 1, 1);
341 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
342 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
343 tlbe
->mas7_3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
346 int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
)
348 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
350 kvmppc_e500_tlb_setup(vcpu_e500
);
353 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
354 vcpu_e500
->svr
= mfspr(SPRN_SVR
);
356 vcpu
->arch
.cpu_type
= KVM_CPU_E500V2
;
361 static int kvmppc_core_get_sregs_e500(struct kvm_vcpu
*vcpu
,
362 struct kvm_sregs
*sregs
)
364 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
366 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206_MMU
| KVM_SREGS_E_SPE
|
368 sregs
->u
.e
.impl_id
= KVM_SREGS_E_IMPL_FSL
;
370 sregs
->u
.e
.impl
.fsl
.features
= 0;
371 sregs
->u
.e
.impl
.fsl
.svr
= vcpu_e500
->svr
;
372 sregs
->u
.e
.impl
.fsl
.hid0
= vcpu_e500
->hid0
;
373 sregs
->u
.e
.impl
.fsl
.mcar
= vcpu_e500
->mcar
;
375 sregs
->u
.e
.ivor_high
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
376 sregs
->u
.e
.ivor_high
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
377 sregs
->u
.e
.ivor_high
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
378 sregs
->u
.e
.ivor_high
[3] =
379 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
381 kvmppc_get_sregs_ivor(vcpu
, sregs
);
382 kvmppc_get_sregs_e500_tlb(vcpu
, sregs
);
386 static int kvmppc_core_set_sregs_e500(struct kvm_vcpu
*vcpu
,
387 struct kvm_sregs
*sregs
)
389 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
392 if (sregs
->u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
393 vcpu_e500
->svr
= sregs
->u
.e
.impl
.fsl
.svr
;
394 vcpu_e500
->hid0
= sregs
->u
.e
.impl
.fsl
.hid0
;
395 vcpu_e500
->mcar
= sregs
->u
.e
.impl
.fsl
.mcar
;
398 ret
= kvmppc_set_sregs_e500_tlb(vcpu
, sregs
);
402 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
405 if (sregs
->u
.e
.features
& KVM_SREGS_E_SPE
) {
406 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] =
407 sregs
->u
.e
.ivor_high
[0];
408 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] =
409 sregs
->u
.e
.ivor_high
[1];
410 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] =
411 sregs
->u
.e
.ivor_high
[2];
414 if (sregs
->u
.e
.features
& KVM_SREGS_E_PM
) {
415 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] =
416 sregs
->u
.e
.ivor_high
[3];
419 return kvmppc_set_sregs_ivor(vcpu
, sregs
);
422 static int kvmppc_get_one_reg_e500(struct kvm_vcpu
*vcpu
, u64 id
,
423 union kvmppc_one_reg
*val
)
425 int r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
429 static int kvmppc_set_one_reg_e500(struct kvm_vcpu
*vcpu
, u64 id
,
430 union kvmppc_one_reg
*val
)
432 int r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
436 static int kvmppc_core_vcpu_create_e500(struct kvm_vcpu
*vcpu
)
438 struct kvmppc_vcpu_e500
*vcpu_e500
;
441 BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500
, vcpu
) != 0);
442 vcpu_e500
= to_e500(vcpu
);
444 if (kvmppc_e500_id_table_alloc(vcpu_e500
) == NULL
)
447 err
= kvmppc_e500_tlb_init(vcpu_e500
);
451 vcpu
->arch
.shared
= (void*)__get_free_page(GFP_KERNEL
|__GFP_ZERO
);
452 if (!vcpu
->arch
.shared
) {
460 kvmppc_e500_tlb_uninit(vcpu_e500
);
462 kvmppc_e500_id_table_free(vcpu_e500
);
466 static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu
*vcpu
)
468 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
470 free_page((unsigned long)vcpu
->arch
.shared
);
471 kvmppc_e500_tlb_uninit(vcpu_e500
);
472 kvmppc_e500_id_table_free(vcpu_e500
);
475 static int kvmppc_core_init_vm_e500(struct kvm
*kvm
)
480 static void kvmppc_core_destroy_vm_e500(struct kvm
*kvm
)
484 static struct kvmppc_ops kvm_ops_e500
= {
485 .get_sregs
= kvmppc_core_get_sregs_e500
,
486 .set_sregs
= kvmppc_core_set_sregs_e500
,
487 .get_one_reg
= kvmppc_get_one_reg_e500
,
488 .set_one_reg
= kvmppc_set_one_reg_e500
,
489 .vcpu_load
= kvmppc_core_vcpu_load_e500
,
490 .vcpu_put
= kvmppc_core_vcpu_put_e500
,
491 .vcpu_create
= kvmppc_core_vcpu_create_e500
,
492 .vcpu_free
= kvmppc_core_vcpu_free_e500
,
493 .init_vm
= kvmppc_core_init_vm_e500
,
494 .destroy_vm
= kvmppc_core_destroy_vm_e500
,
495 .emulate_op
= kvmppc_core_emulate_op_e500
,
496 .emulate_mtspr
= kvmppc_core_emulate_mtspr_e500
,
497 .emulate_mfspr
= kvmppc_core_emulate_mfspr_e500
,
500 static int __init
kvmppc_e500_init(void)
503 unsigned long ivor
[3];
504 /* Process remaining handlers above the generic first 16 */
505 unsigned long *handler
= &kvmppc_booke_handler_addr
[16];
506 unsigned long handler_len
;
507 unsigned long max_ivor
= 0;
509 r
= kvmppc_core_check_processor_compat();
513 r
= kvmppc_booke_init();
517 /* copy extra E500 exception handlers */
518 ivor
[0] = mfspr(SPRN_IVOR32
);
519 ivor
[1] = mfspr(SPRN_IVOR33
);
520 ivor
[2] = mfspr(SPRN_IVOR34
);
521 for (i
= 0; i
< 3; i
++) {
522 if (ivor
[i
] > ivor
[max_ivor
])
525 handler_len
= handler
[i
+ 1] - handler
[i
];
526 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
527 (void *)handler
[i
], handler_len
);
529 handler_len
= handler
[max_ivor
+ 1] - handler
[max_ivor
];
530 flush_icache_range(kvmppc_booke_handlers
, kvmppc_booke_handlers
+
531 ivor
[max_ivor
] + handler_len
);
533 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_e500
), 0, THIS_MODULE
);
536 kvm_ops_e500
.owner
= THIS_MODULE
;
537 kvmppc_pr_ops
= &kvm_ops_e500
;
543 static void __exit
kvmppc_e500_exit(void)
545 kvmppc_pr_ops
= NULL
;
549 module_init(kvmppc_e500_init
);
550 module_exit(kvmppc_e500_exit
);
551 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
552 MODULE_ALIAS("devname:kvm");