2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, <yu.liu@freescale.com>
7 * This file is derived from arch/powerpc/kvm/44x.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/kvm_host.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/export.h>
19 #include <linux/module.h>
20 #include <linux/miscdevice.h>
23 #include <asm/cputable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/kvm_ppc.h>
27 #include "../mm/mmu_decl.h"
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
45 * Each vcpu keeps one vcpu_id_table.
47 struct vcpu_id_table
{
48 struct id id
[2][NUM_TIDS
][2];
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
56 struct pcpu_id_table
{
57 struct id
*entry
[NUM_TIDS
];
60 static DEFINE_PER_CPU(struct pcpu_id_table
, pcpu_sids
);
62 /* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid
);
67 * Allocate a free shadow id and setup a valid sid mapping in given entry.
68 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
70 * The caller must have preemption disabled, and keep it that way until
71 * it has finished with the returned shadow id (either written into the
72 * TLB or arch.shadow_pid, or discarded).
74 static inline int local_sid_setup_one(struct id
*entry
)
79 sid
= __this_cpu_inc_return(pcpu_last_used_sid
);
81 __this_cpu_write(pcpu_sids
.entry
[sid
], entry
);
83 entry
->pentry
= this_cpu_ptr(&pcpu_sids
.entry
[sid
]);
88 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
89 * the caller will invalidate everything and start over.
91 * sid > NUM_TIDS indicates a race, which we disable preemption to
94 WARN_ON(sid
> NUM_TIDS
);
100 * Check if given entry contain a valid shadow id mapping.
101 * An ID mapping is considered valid only if
102 * both vcpu and pcpu know this mapping.
104 * The caller must have preemption disabled, and keep it that way until
105 * it has finished with the returned shadow id (either written into the
106 * TLB or arch.shadow_pid, or discarded).
108 static inline int local_sid_lookup(struct id
*entry
)
110 if (entry
&& entry
->val
!= 0 &&
111 __this_cpu_read(pcpu_sids
.entry
[entry
->val
]) == entry
&&
112 entry
->pentry
== this_cpu_ptr(&pcpu_sids
.entry
[entry
->val
]))
117 /* Invalidate all id mappings on local core -- call with preempt disabled */
118 static inline void local_sid_destroy_all(void)
120 __this_cpu_write(pcpu_last_used_sid
, 0);
121 memset(this_cpu_ptr(&pcpu_sids
), 0, sizeof(pcpu_sids
));
124 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500
*vcpu_e500
)
126 vcpu_e500
->idt
= kzalloc(sizeof(struct vcpu_id_table
), GFP_KERNEL
);
127 return vcpu_e500
->idt
;
130 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500
*vcpu_e500
)
132 kfree(vcpu_e500
->idt
);
133 vcpu_e500
->idt
= NULL
;
136 /* Map guest pid to shadow.
137 * We use PID to keep shadow of current guest non-zero PID,
138 * and use PID1 to keep shadow of guest zero PID.
139 * So that guest tlbe with TID=0 can be accessed at any time */
140 static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500
*vcpu_e500
)
143 vcpu_e500
->vcpu
.arch
.shadow_pid
= kvmppc_e500_get_sid(vcpu_e500
,
144 get_cur_as(&vcpu_e500
->vcpu
),
145 get_cur_pid(&vcpu_e500
->vcpu
),
146 get_cur_pr(&vcpu_e500
->vcpu
), 1);
147 vcpu_e500
->vcpu
.arch
.shadow_pid1
= kvmppc_e500_get_sid(vcpu_e500
,
148 get_cur_as(&vcpu_e500
->vcpu
), 0,
149 get_cur_pr(&vcpu_e500
->vcpu
), 1);
153 /* Invalidate all mappings on vcpu */
154 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
156 memset(vcpu_e500
->idt
, 0, sizeof(struct vcpu_id_table
));
158 /* Update shadow pid when mappings are changed */
159 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
162 /* Invalidate one ID mapping on vcpu */
163 static inline void kvmppc_e500_id_table_reset_one(
164 struct kvmppc_vcpu_e500
*vcpu_e500
,
165 int as
, int pid
, int pr
)
167 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
170 BUG_ON(pid
>= NUM_TIDS
);
173 idt
->id
[as
][pid
][pr
].val
= 0;
174 idt
->id
[as
][pid
][pr
].pentry
= NULL
;
176 /* Update shadow pid when mappings are changed */
177 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
181 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
182 * This function first lookup if a valid mapping exists,
183 * if not, then creates a new one.
185 * The caller must have preemption disabled, and keep it that way until
186 * it has finished with the returned shadow id (either written into the
187 * TLB or arch.shadow_pid, or discarded).
189 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500
*vcpu_e500
,
190 unsigned int as
, unsigned int gid
,
191 unsigned int pr
, int avoid_recursion
)
193 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
197 BUG_ON(gid
>= NUM_TIDS
);
200 sid
= local_sid_lookup(&idt
->id
[as
][gid
][pr
]);
204 sid
= local_sid_setup_one(&idt
->id
[as
][gid
][pr
]);
207 local_sid_destroy_all();
210 /* Update shadow pid when mappings are changed */
211 if (!avoid_recursion
)
212 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
218 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu
*vcpu
,
219 struct kvm_book3e_206_tlb_entry
*gtlbe
)
221 return kvmppc_e500_get_sid(to_e500(vcpu
), get_tlb_ts(gtlbe
),
222 get_tlb_tid(gtlbe
), get_cur_pr(vcpu
), 0);
225 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
)
227 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
229 if (vcpu
->arch
.pid
!= pid
) {
230 vcpu_e500
->pid
[0] = vcpu
->arch
.pid
= pid
;
231 kvmppc_e500_recalc_shadow_pid(vcpu_e500
);
235 /* gtlbe must not be mapped by more than one host tlbe */
236 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
237 struct kvm_book3e_206_tlb_entry
*gtlbe
)
239 struct vcpu_id_table
*idt
= vcpu_e500
->idt
;
240 unsigned int pr
, tid
, ts
;
245 ts
= get_tlb_ts(gtlbe
);
246 tid
= get_tlb_tid(gtlbe
);
250 /* One guest ID may be mapped to two shadow IDs */
251 for (pr
= 0; pr
< 2; pr
++) {
253 * The shadow PID can have a valid mapping on at most one
254 * host CPU. In the common case, it will be valid on this
255 * CPU, in which case we do a local invalidation of the
258 * If the shadow PID is not valid on the current host CPU,
259 * we invalidate the entire shadow PID.
261 pid
= local_sid_lookup(&idt
->id
[ts
][tid
][pr
]);
263 kvmppc_e500_id_table_reset_one(vcpu_e500
, ts
, tid
, pr
);
268 * The guest is invalidating a 4K entry which is in a PID
269 * that has a valid shadow mapping on this host CPU. We
270 * search host TLB to invalidate it's shadow TLB entry,
271 * similar to __tlbil_va except that we need to look in AS1.
273 val
= (pid
<< MAS6_SPID_SHIFT
) | MAS6_SAS
;
274 eaddr
= get_tlb_eaddr(gtlbe
);
276 local_irq_save(flags
);
278 mtspr(SPRN_MAS6
, val
);
279 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr
] "r" (eaddr
));
280 val
= mfspr(SPRN_MAS1
);
281 if (val
& MAS1_VALID
) {
282 mtspr(SPRN_MAS1
, val
& ~MAS1_VALID
);
283 asm volatile("tlbwe");
286 local_irq_restore(flags
);
292 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
)
294 kvmppc_e500_id_table_reset_all(vcpu_e500
);
297 void kvmppc_mmu_msr_notify(struct kvm_vcpu
*vcpu
, u32 old_msr
)
299 /* Recalc shadow pid since MSR changes */
300 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
303 static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu
*vcpu
, int cpu
)
305 kvmppc_booke_vcpu_load(vcpu
, cpu
);
307 /* Shadow PID may be expired on local core */
308 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu
));
311 static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu
*vcpu
)
314 if (vcpu
->arch
.shadow_msr
& MSR_SPE
)
315 kvmppc_vcpu_disable_spe(vcpu
);
318 kvmppc_booke_vcpu_put(vcpu
);
321 int kvmppc_core_check_processor_compat(void)
325 if (strcmp(cur_cpu_spec
->cpu_name
, "e500v2") == 0)
333 static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
335 struct kvm_book3e_206_tlb_entry
*tlbe
;
337 /* Insert large initial mapping for guest. */
338 tlbe
= get_entry(vcpu_e500
, 1, 0);
339 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_256M
);
341 tlbe
->mas7_3
= E500_TLB_SUPER_PERM_MASK
;
343 /* 4K map for serial output. Used by kernel wrapper. */
344 tlbe
= get_entry(vcpu_e500
, 1, 1);
345 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
346 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
347 tlbe
->mas7_3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
350 int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
)
352 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
354 kvmppc_e500_tlb_setup(vcpu_e500
);
357 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
358 vcpu_e500
->svr
= mfspr(SPRN_SVR
);
360 vcpu
->arch
.cpu_type
= KVM_CPU_E500V2
;
365 static int kvmppc_core_get_sregs_e500(struct kvm_vcpu
*vcpu
,
366 struct kvm_sregs
*sregs
)
368 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
370 sregs
->u
.e
.features
|= KVM_SREGS_E_ARCH206_MMU
| KVM_SREGS_E_SPE
|
372 sregs
->u
.e
.impl_id
= KVM_SREGS_E_IMPL_FSL
;
374 sregs
->u
.e
.impl
.fsl
.features
= 0;
375 sregs
->u
.e
.impl
.fsl
.svr
= vcpu_e500
->svr
;
376 sregs
->u
.e
.impl
.fsl
.hid0
= vcpu_e500
->hid0
;
377 sregs
->u
.e
.impl
.fsl
.mcar
= vcpu_e500
->mcar
;
379 sregs
->u
.e
.ivor_high
[0] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
];
380 sregs
->u
.e
.ivor_high
[1] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
];
381 sregs
->u
.e
.ivor_high
[2] = vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
];
382 sregs
->u
.e
.ivor_high
[3] =
383 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
];
385 kvmppc_get_sregs_ivor(vcpu
, sregs
);
386 kvmppc_get_sregs_e500_tlb(vcpu
, sregs
);
390 static int kvmppc_core_set_sregs_e500(struct kvm_vcpu
*vcpu
,
391 struct kvm_sregs
*sregs
)
393 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
396 if (sregs
->u
.e
.impl_id
== KVM_SREGS_E_IMPL_FSL
) {
397 vcpu_e500
->svr
= sregs
->u
.e
.impl
.fsl
.svr
;
398 vcpu_e500
->hid0
= sregs
->u
.e
.impl
.fsl
.hid0
;
399 vcpu_e500
->mcar
= sregs
->u
.e
.impl
.fsl
.mcar
;
402 ret
= kvmppc_set_sregs_e500_tlb(vcpu
, sregs
);
406 if (!(sregs
->u
.e
.features
& KVM_SREGS_E_IVOR
))
409 if (sregs
->u
.e
.features
& KVM_SREGS_E_SPE
) {
410 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_UNAVAIL
] =
411 sregs
->u
.e
.ivor_high
[0];
412 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_DATA
] =
413 sregs
->u
.e
.ivor_high
[1];
414 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_SPE_FP_ROUND
] =
415 sregs
->u
.e
.ivor_high
[2];
418 if (sregs
->u
.e
.features
& KVM_SREGS_E_PM
) {
419 vcpu
->arch
.ivor
[BOOKE_IRQPRIO_PERFORMANCE_MONITOR
] =
420 sregs
->u
.e
.ivor_high
[3];
423 return kvmppc_set_sregs_ivor(vcpu
, sregs
);
426 static int kvmppc_get_one_reg_e500(struct kvm_vcpu
*vcpu
, u64 id
,
427 union kvmppc_one_reg
*val
)
429 int r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
433 static int kvmppc_set_one_reg_e500(struct kvm_vcpu
*vcpu
, u64 id
,
434 union kvmppc_one_reg
*val
)
436 int r
= kvmppc_get_one_reg_e500_tlb(vcpu
, id
, val
);
440 static struct kvm_vcpu
*kvmppc_core_vcpu_create_e500(struct kvm
*kvm
,
443 struct kvmppc_vcpu_e500
*vcpu_e500
;
444 struct kvm_vcpu
*vcpu
;
447 vcpu_e500
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
453 vcpu
= &vcpu_e500
->vcpu
;
454 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
458 if (kvmppc_e500_id_table_alloc(vcpu_e500
) == NULL
)
461 err
= kvmppc_e500_tlb_init(vcpu_e500
);
465 vcpu
->arch
.shared
= (void*)__get_free_page(GFP_KERNEL
|__GFP_ZERO
);
466 if (!vcpu
->arch
.shared
)
472 kvmppc_e500_tlb_uninit(vcpu_e500
);
474 kvmppc_e500_id_table_free(vcpu_e500
);
476 kvm_vcpu_uninit(vcpu
);
478 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
483 static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu
*vcpu
)
485 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
487 free_page((unsigned long)vcpu
->arch
.shared
);
488 kvmppc_e500_tlb_uninit(vcpu_e500
);
489 kvmppc_e500_id_table_free(vcpu_e500
);
490 kvm_vcpu_uninit(vcpu
);
491 kmem_cache_free(kvm_vcpu_cache
, vcpu_e500
);
494 static int kvmppc_core_init_vm_e500(struct kvm
*kvm
)
499 static void kvmppc_core_destroy_vm_e500(struct kvm
*kvm
)
503 static struct kvmppc_ops kvm_ops_e500
= {
504 .get_sregs
= kvmppc_core_get_sregs_e500
,
505 .set_sregs
= kvmppc_core_set_sregs_e500
,
506 .get_one_reg
= kvmppc_get_one_reg_e500
,
507 .set_one_reg
= kvmppc_set_one_reg_e500
,
508 .vcpu_load
= kvmppc_core_vcpu_load_e500
,
509 .vcpu_put
= kvmppc_core_vcpu_put_e500
,
510 .vcpu_create
= kvmppc_core_vcpu_create_e500
,
511 .vcpu_free
= kvmppc_core_vcpu_free_e500
,
512 .mmu_destroy
= kvmppc_mmu_destroy_e500
,
513 .init_vm
= kvmppc_core_init_vm_e500
,
514 .destroy_vm
= kvmppc_core_destroy_vm_e500
,
515 .emulate_op
= kvmppc_core_emulate_op_e500
,
516 .emulate_mtspr
= kvmppc_core_emulate_mtspr_e500
,
517 .emulate_mfspr
= kvmppc_core_emulate_mfspr_e500
,
520 static int __init
kvmppc_e500_init(void)
523 unsigned long ivor
[3];
524 /* Process remaining handlers above the generic first 16 */
525 unsigned long *handler
= &kvmppc_booke_handler_addr
[16];
526 unsigned long handler_len
;
527 unsigned long max_ivor
= 0;
529 r
= kvmppc_core_check_processor_compat();
533 r
= kvmppc_booke_init();
537 /* copy extra E500 exception handlers */
538 ivor
[0] = mfspr(SPRN_IVOR32
);
539 ivor
[1] = mfspr(SPRN_IVOR33
);
540 ivor
[2] = mfspr(SPRN_IVOR34
);
541 for (i
= 0; i
< 3; i
++) {
542 if (ivor
[i
] > ivor
[max_ivor
])
545 handler_len
= handler
[i
+ 1] - handler
[i
];
546 memcpy((void *)kvmppc_booke_handlers
+ ivor
[i
],
547 (void *)handler
[i
], handler_len
);
549 handler_len
= handler
[max_ivor
+ 1] - handler
[max_ivor
];
550 flush_icache_range(kvmppc_booke_handlers
, kvmppc_booke_handlers
+
551 ivor
[max_ivor
] + handler_len
);
553 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_e500
), 0, THIS_MODULE
);
556 kvm_ops_e500
.owner
= THIS_MODULE
;
557 kvmppc_pr_ops
= &kvm_ops_e500
;
563 static void __exit
kvmppc_e500_exit(void)
565 kvmppc_pr_ops
= NULL
;
569 module_init(kvmppc_e500_init
);
570 module_exit(kvmppc_e500_exit
);
571 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
572 MODULE_ALIAS("devname:kvm");