2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, <yu.liu@freescale.com>
7 * This file is derived from arch/powerpc/kvm/44x.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/kvm_host.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/export.h>
21 #include <asm/cputable.h>
22 #include <asm/tlbflush.h>
23 #include <asm/kvm_ppc.h>
25 #include "../mm/mmu_decl.h"
37 * This table provide mappings from:
38 * (guestAS,guestTID,guestPR) --> ID of physical cpu
43 * Each vcpu keeps one vcpu_id_table.
45 struct vcpu_id_table
{
46 struct id id
[2][NUM_TIDS
][2];
50 * This table provide reversed mappings of vcpu_id_table:
51 * ID --> address of vcpu_id_table item.
52 * Each physical core has one pcpu_id_table.
54 struct pcpu_id_table
{
55 struct id
*entry
[NUM_TIDS
];
58 static DEFINE_PER_CPU(struct pcpu_id_table
, pcpu_sids
);
60 /* This variable keeps last used shadow ID on local core.
61 * The valid range of shadow ID is [1..255] */
62 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid
);
65 * Allocate a free shadow id and setup a valid sid mapping in given entry.
66 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
68 * The caller must have preemption disabled, and keep it that way until
69 * it has finished with the returned shadow id (either written into the
70 * TLB or arch.shadow_pid, or discarded).
72 static inline int local_sid_setup_one(struct id
*entry
)
77 sid
= ++(__get_cpu_var(pcpu_last_used_sid
));
79 __get_cpu_var(pcpu_sids
).entry
[sid
] = entry
;
81 entry
->pentry
= &__get_cpu_var(pcpu_sids
).entry
[sid
];
86 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
87 * the caller will invalidate everything and start over.
89 * sid > NUM_TIDS indicates a race, which we disable preemption to
92 WARN_ON(sid
> NUM_TIDS
);
98 * Check if given entry contain a valid shadow id mapping.
99 * An ID mapping is considered valid only if
100 * both vcpu and pcpu know this mapping.
102 * The caller must have preemption disabled, and keep it that way until
103 * it has finished with the returned shadow id (either written into the
104 * TLB or arch.shadow_pid, or discarded).
106 static inline int local_sid_lookup(struct id
*entry
)
108 if (entry
&& entry
->val
!= 0 &&
109 __get_cpu_var(pcpu_sids
).entry
[entry
->val
] == entry
&&
110 entry
->pentry
== &__get_cpu_var(pcpu_sids
).entry
[entry
->val
])
115 /* Invalidate all id mappings on local core -- call with preempt disabled */
116 static inline void local_sid_destroy_all(void)
118 __get_cpu_var(pcpu_last_used_sid
) = 0;
119 memset(&__get_cpu_var(pcpu_sids
), 0, sizeof(__get_cpu_var(pcpu_sids
)));
122 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500
*vcpu_e500
)
124 vcpu_e500
->idt
= kzalloc(sizeof(struct vcpu_id_table
), GFP_KERNEL
);
125 return vcpu_e500
->idt
;
128 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500
*vcpu_e500
)
130 kfree(vcpu_e500
->idt
);
131 vcpu_e500
->idt
= NULL
;
134 /* Map guest pid to shadow.
135 * We use PID to keep shadow of current guest non-zero PID,
136 * and use PID1 to keep shadow of guest zero PID.
137 * So that guest tlbe with TID=0 can be accessed at any time */
138 static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500
*vcpu_e500
)
141 vcpu_e500
->vcpu
.arch
.shadow_pid
= kvmppc_e500_get_sid(vcpu_e500
,
142 get_cur_as(&vcpu_e500
->vcpu
),
143 get_cur_pid(&vcpu_e500
->vcpu
),
144 get_cur_pr(&vcpu_e500
->vcpu
), 1);
145 vcpu_e500
->vcpu
.arch
.shadow_pid1
= kvmppc_e500_get_sid(vcpu_e500
,
146 get_cur_as(&vcpu_e500
->vcpu
), 0,
147 get_cur_pr(&vcpu_e500
->vcpu
), 1);
151 /* Invalidate all mappings on vcpu */
152 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
154 memset(vcpu_e500
->idt
, 0, sizeof(struct vcpu_id_table
));
156 /* Update shadow pid when mappings are changed */
157 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
160 /* Invalidate one ID mapping on vcpu */
161 static inline void kvmppc_e500_id_table_reset_one(
162 struct kvmppc_vcpu_e500
*vcpu_e500
,
163 int as
, int pid
, int pr
)
165 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
168 BUG_ON(pid
>= NUM_TIDS
);
171 idt
->id
[as
][pid
][pr
].val
= 0;
172 idt
->id
[as
][pid
][pr
].pentry
= NULL
;
174 /* Update shadow pid when mappings are changed */
175 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
179 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
180 * This function first lookup if a valid mapping exists,
181 * if not, then creates a new one.
183 * The caller must have preemption disabled, and keep it that way until
184 * it has finished with the returned shadow id (either written into the
185 * TLB or arch.shadow_pid, or discarded).
187 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500
*vcpu_e500
,
188 unsigned int as
, unsigned int gid
,
189 unsigned int pr
, int avoid_recursion
)
191 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
195 BUG_ON(gid
>= NUM_TIDS
);
198 sid
= local_sid_lookup(&idt
->id
[as
][gid
][pr
]);
202 sid
= local_sid_setup_one(&idt
->id
[as
][gid
][pr
]);
205 local_sid_destroy_all();
208 /* Update shadow pid when mappings are changed */
209 if (!avoid_recursion
)
210 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
216 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu
*vcpu
,
217 struct kvm_book3e_206_tlb_entry
*gtlbe
)
219 return kvmppc_e500_get_sid(to_e500(vcpu
), get_tlb_ts(gtlbe
),
220 get_tlb_tid(gtlbe
), get_cur_pr(vcpu
), 0);
223 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
)
225 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
227 if (vcpu
->arch
.pid
!= pid
) {
228 vcpu_e500
->pid
[0] = vcpu
->arch
.pid
= pid
;
229 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
233 /* gtlbe must not be mapped by more than one host tlbe */
234 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
235 struct kvm_book3e_206_tlb_entry
*gtlbe
)
237 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
238 unsigned int pr
, tid
, ts
, pid
;
242 ts
= get_tlb_ts(gtlbe
);
243 tid
= get_tlb_tid(gtlbe
);
247 /* One guest ID may be mapped to two shadow IDs */
248 for (pr
= 0; pr
< 2; pr
++) {
250 * The shadow PID can have a valid mapping on at most one
251 * host CPU. In the common case, it will be valid on this
252 * CPU, in which case we do a local invalidation of the
255 * If the shadow PID is not valid on the current host CPU,
256 * we invalidate the entire shadow PID.
258 pid
= local_sid_lookup(&idt
->id
[ts
][tid
][pr
]);
260 kvmppc_e500_id_table_reset_one(vcpu_e500
, ts
, tid
, pr
);
265 * The guest is invalidating a 4K entry which is in a PID
266 * that has a valid shadow mapping on this host CPU. We
267 * search host TLB to invalidate it's shadow TLB entry,
268 * similar to __tlbil_va except that we need to look in AS1.
270 val
= (pid
<< MAS6_SPID_SHIFT
) | MAS6_SAS
;
271 eaddr
= get_tlb_eaddr(gtlbe
);
273 local_irq_save(flags
);
275 mtspr(SPRN_MAS6
, val
);
276 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr
] "r" (eaddr
));
277 val
= mfspr(SPRN_MAS1
);
278 if (val
& MAS1_VALID
) {
279 mtspr(SPRN_MAS1
, val
& ~MAS1_VALID
);
280 asm volatile("tlbwe");
283 local_irq_restore(flags
);
289 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
291 kvmppc_e500_id_table_reset_all(vcpu_e500
);
294 void kvmppc_mmu_msr_notify(struct kvm_vcpu
*vcpu
, u32 old_msr
)
296 /* Recalc shadow pid since MSR changes */
297 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
300 void kvmppc_core_load_host_debugstate(struct kvm_vcpu
*vcpu
)
304 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu
*vcpu
)
308 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
310 kvmppc_booke_vcpu_load(vcpu
, cpu
);
312 /* Shadow PID may be expired on local core */
313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
316 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
319 if (vcpu
->arch
.shadow_msr
& MSR_SPE
)
320 kvmppc_vcpu_disable_spe(vcpu
);
323 kvmppc_booke_vcpu_put(vcpu
);
326 int kvmppc_core_check_processor_compat(void)
330 if (strcmp(cur_cpu_spec
->cpu_name
, "e500v2") == 0)
338 static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
340 struct kvm_book3e_206_tlb_entry
*tlbe
;
342 /* Insert large initial mapping for guest. */
343 tlbe
= get_entry(vcpu_e500
, 1, 0);
344 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_256M
);
346 tlbe
->mas7_3
= E500_TLB_SUPER_PERM_MASK
;
348 /* 4K map for serial output. Used by kernel wrapper. */
349 tlbe
= get_entry(vcpu_e500
, 1, 1);
350 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
351 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
352 tlbe
->mas7_3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
355 int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
)
357 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
359 kvmppc_e500_tlb_setup(vcpu_e500
);
362 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
363 vcpu_e500
->svr
= mfspr(SPRN_SVR
);
365 vcpu
->arch
.cpu_type
= KVM_CPU_E500V2
;
370 void kvmppc_core_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
372 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
374 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206_MMU
| KVM_SREGS_E_SPE
|
376 sregs
->u
.e
.impl_id
= KVM_SREGS_E_IMPL_FSL
;
378 sregs
->u
.e
.impl
.fsl
.features
= 0;
379 sregs
->u
.e
.impl
.fsl
.svr
= vcpu_e500
->svr
;
380 sregs
->u
.e
.impl
.fsl
.hid0
= vcpu_e500
->hid0
;
381 sregs
->u
.e
.impl
.fsl
.mcar
= vcpu_e500
->mcar
;
383 sregs
->u
.e
.ivor_high
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
384 sregs
->u
.e
.ivor_high
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
385 sregs
->u
.e
.ivor_high
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
386 sregs
->u
.e
.ivor_high
[3] =
387 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
389 kvmppc_get_sregs_ivor(vcpu
, sregs
);
390 kvmppc_get_sregs_e500_tlb(vcpu
, sregs
);
393 int kvmppc_core_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
395 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
398 if (sregs
->u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
399 vcpu_e500
->svr
= sregs
->u
.e
.impl
.fsl
.svr
;
400 vcpu_e500
->hid0
= sregs
->u
.e
.impl
.fsl
.hid0
;
401 vcpu_e500
->mcar
= sregs
->u
.e
.impl
.fsl
.mcar
;
404 ret
= kvmppc_set_sregs_e500_tlb(vcpu
, sregs
);
408 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
411 if (sregs
->u
.e
.features
& KVM_SREGS_E_SPE
) {
412 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] =
413 sregs
->u
.e
.ivor_high
[0];
414 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] =
415 sregs
->u
.e
.ivor_high
[1];
416 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] =
417 sregs
->u
.e
.ivor_high
[2];
420 if (sregs
->u
.e
.features
& KVM_SREGS_E_PM
) {
421 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] =
422 sregs
->u
.e
.ivor_high
[3];
425 return kvmppc_set_sregs_ivor(vcpu
, sregs
);
428 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
430 struct kvmppc_vcpu_e500
*vcpu_e500
;
431 struct kvm_vcpu
*vcpu
;
434 vcpu_e500
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
440 vcpu
= &vcpu_e500
->vcpu
;
441 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
445 if (kvmppc_e500_id_table_alloc(vcpu_e500
) == NULL
)
448 err
= kvmppc_e500_tlb_init(vcpu_e500
);
452 vcpu
->arch
.shared
= (void*)__get_free_page(GFP_KERNEL
|__GFP_ZERO
);
453 if (!vcpu
->arch
.shared
)
459 kvmppc_e500_tlb_uninit(vcpu_e500
);
461 kvmppc_e500_id_table_free(vcpu_e500
);
463 kvm_vcpu_uninit(vcpu
);
465 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
470 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
472 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
474 free_page((unsigned long)vcpu
->arch
.shared
);
475 kvmppc_e500_tlb_uninit(vcpu_e500
);
476 kvmppc_e500_id_table_free(vcpu_e500
);
477 kvm_vcpu_uninit(vcpu
);
478 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
481 int kvmppc_core_init_vm(struct kvm
*kvm
)
486 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
490 static int __init
kvmppc_e500_init(void)
493 unsigned long ivor
[3];
494 unsigned long max_ivor
= 0;
496 r
= kvmppc_core_check_processor_compat();
500 r
= kvmppc_booke_init();
504 /* copy extra E500 exception handlers */
505 ivor
[0] = mfspr(SPRN_IVOR32
);
506 ivor
[1] = mfspr(SPRN_IVOR33
);
507 ivor
[2] = mfspr(SPRN_IVOR34
);
508 for (i
= 0; i
< 3; i
++) {
509 if (ivor
[i
] > max_ivor
)
512 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
513 kvmppc_handlers_start
+ (i
+ 16) * kvmppc_handler_len
,
516 flush_icache_range(kvmppc_booke_handlers
,
517 kvmppc_booke_handlers
+ max_ivor
+ kvmppc_handler_len
);
519 return kvm_init(NULL
, sizeof(struct kvmppc_vcpu_e500
), 0, THIS_MODULE
);
522 static void __exit
kvmppc_e500_exit(void)
527 module_init(kvmppc_e500_init
);
528 module_exit(kvmppc_e500_exit
);