2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/kvm.h>
24 #include <linux/kvm_host.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/uaccess.h>
28 #include <linux/sched.h>
29 #include <linux/rwsem.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hugetlb.h>
32 #include <asm/kvm_ppc.h>
35 #include "trace_booke.h"
37 #include "e500_mmu_host.h"
39 static inline unsigned int gtlb0_get_next_victim(
40 struct kvmppc_vcpu_e500
*vcpu_e500
)
44 victim
= vcpu_e500
->gtlb_nv
[0]++;
45 if (unlikely(vcpu_e500
->gtlb_nv
[0] >= vcpu_e500
->gtlb_params
[0].ways
))
46 vcpu_e500
->gtlb_nv
[0] = 0;
51 static int tlb0_set_base(gva_t addr
, int sets
, int ways
)
55 set_base
= (addr
>> PAGE_SHIFT
) & (sets
- 1);
61 static int gtlb0_set_base(struct kvmppc_vcpu_e500
*vcpu_e500
, gva_t addr
)
63 return tlb0_set_base(addr
, vcpu_e500
->gtlb_params
[0].sets
,
64 vcpu_e500
->gtlb_params
[0].ways
);
67 static unsigned int get_tlb_esel(struct kvm_vcpu
*vcpu
, int tlbsel
)
69 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
70 int esel
= get_tlb_esel_bit(vcpu
);
73 esel
&= vcpu_e500
->gtlb_params
[0].ways
- 1;
74 esel
+= gtlb0_set_base(vcpu_e500
, vcpu
->arch
.shared
->mas2
);
76 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].entries
- 1;
82 /* Search the guest TLB for a matching entry. */
83 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
84 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
86 int size
= vcpu_e500
->gtlb_params
[tlbsel
].entries
;
87 unsigned int set_base
, offset
;
91 set_base
= gtlb0_set_base(vcpu_e500
, eaddr
);
92 size
= vcpu_e500
->gtlb_params
[0].ways
;
94 if (eaddr
< vcpu_e500
->tlb1_min_eaddr
||
95 eaddr
> vcpu_e500
->tlb1_max_eaddr
)
100 offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
102 for (i
= 0; i
< size
; i
++) {
103 struct kvm_book3e_206_tlb_entry
*tlbe
=
104 &vcpu_e500
->gtlb_arch
[offset
+ set_base
+ i
];
107 if (eaddr
< get_tlb_eaddr(tlbe
))
110 if (eaddr
> get_tlb_end(tlbe
))
113 tid
= get_tlb_tid(tlbe
);
114 if (tid
&& (tid
!= pid
))
117 if (!get_tlb_v(tlbe
))
120 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
129 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
132 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
133 unsigned int victim
, tsized
;
136 /* since we only have two TLBs, only lower bit is used. */
137 tlbsel
= (vcpu
->arch
.shared
->mas4
>> 28) & 0x1;
138 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
139 tsized
= (vcpu
->arch
.shared
->mas4
>> 7) & 0x1f;
141 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
142 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
143 vcpu
->arch
.shared
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
144 | MAS1_TID(get_tlbmiss_tid(vcpu
))
145 | MAS1_TSIZE(tsized
);
146 vcpu
->arch
.shared
->mas2
= (eaddr
& MAS2_EPN
)
147 | (vcpu
->arch
.shared
->mas4
& MAS2_ATTRIB_MASK
);
148 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
149 vcpu
->arch
.shared
->mas6
= (vcpu
->arch
.shared
->mas6
& MAS6_SPID1
)
150 | (get_cur_pid(vcpu
) << 16)
151 | (as
? MAS6_SAS
: 0);
154 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
)
156 int size
= vcpu_e500
->gtlb_params
[1].entries
;
161 vcpu_e500
->tlb1_min_eaddr
= ~0UL;
162 vcpu_e500
->tlb1_max_eaddr
= 0;
163 offset
= vcpu_e500
->gtlb_offset
[1];
165 for (i
= 0; i
< size
; i
++) {
166 struct kvm_book3e_206_tlb_entry
*tlbe
=
167 &vcpu_e500
->gtlb_arch
[offset
+ i
];
169 if (!get_tlb_v(tlbe
))
172 eaddr
= get_tlb_eaddr(tlbe
);
173 vcpu_e500
->tlb1_min_eaddr
=
174 min(vcpu_e500
->tlb1_min_eaddr
, eaddr
);
176 eaddr
= get_tlb_end(tlbe
);
177 vcpu_e500
->tlb1_max_eaddr
=
178 max(vcpu_e500
->tlb1_max_eaddr
, eaddr
);
182 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
,
183 struct kvm_book3e_206_tlb_entry
*gtlbe
)
185 unsigned long start
, end
, size
;
187 size
= get_tlb_bytes(gtlbe
);
188 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
189 end
= start
+ size
- 1;
191 return vcpu_e500
->tlb1_min_eaddr
== start
||
192 vcpu_e500
->tlb1_max_eaddr
== end
;
195 /* This function is supposed to be called for a adding a new valid tlb entry */
196 static void kvmppc_set_tlb1map_range(struct kvm_vcpu
*vcpu
,
197 struct kvm_book3e_206_tlb_entry
*gtlbe
)
199 unsigned long start
, end
, size
;
200 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
202 if (!get_tlb_v(gtlbe
))
205 size
= get_tlb_bytes(gtlbe
);
206 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
207 end
= start
+ size
- 1;
209 vcpu_e500
->tlb1_min_eaddr
= min(vcpu_e500
->tlb1_min_eaddr
, start
);
210 vcpu_e500
->tlb1_max_eaddr
= max(vcpu_e500
->tlb1_max_eaddr
, end
);
213 static inline int kvmppc_e500_gtlbe_invalidate(
214 struct kvmppc_vcpu_e500
*vcpu_e500
,
215 int tlbsel
, int esel
)
217 struct kvm_book3e_206_tlb_entry
*gtlbe
=
218 get_entry(vcpu_e500
, tlbsel
, esel
);
220 if (unlikely(get_tlb_iprot(gtlbe
)))
223 if (tlbsel
== 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
224 kvmppc_recalc_tlb1map_range(vcpu_e500
);
231 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
235 if (value
& MMUCSR0_TLB0FI
)
236 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[0].entries
; esel
++)
237 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
238 if (value
& MMUCSR0_TLB1FI
)
239 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[1].entries
; esel
++)
240 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
242 /* Invalidate all host shadow mappings */
243 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
248 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, gva_t ea
)
250 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
254 ia
= (ea
>> 2) & 0x1;
256 /* since we only have two TLBs, only lower bit is used. */
257 tlbsel
= (ea
>> 3) & 0x1;
260 /* invalidate all entries */
261 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
;
263 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
266 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
267 get_cur_pid(vcpu
), -1);
269 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
272 /* Invalidate all host shadow mappings */
273 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
278 static void tlbilx_all(struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
,
281 struct kvm_book3e_206_tlb_entry
*tlbe
;
284 /* invalidate all entries */
285 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; esel
++) {
286 tlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
287 tid
= get_tlb_tid(tlbe
);
288 if (type
== 0 || tid
== pid
) {
289 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
290 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
295 static void tlbilx_one(struct kvmppc_vcpu_e500
*vcpu_e500
, int pid
,
300 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
301 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, -1);
303 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
304 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
310 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int type
, gva_t ea
)
312 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
313 int pid
= get_cur_spid(vcpu
);
315 if (type
== 0 || type
== 1) {
316 tlbilx_all(vcpu_e500
, 0, pid
, type
);
317 tlbilx_all(vcpu_e500
, 1, pid
, type
);
318 } else if (type
== 3) {
319 tlbilx_one(vcpu_e500
, pid
, ea
);
325 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
327 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
329 struct kvm_book3e_206_tlb_entry
*gtlbe
;
331 tlbsel
= get_tlb_tlbsel(vcpu
);
332 esel
= get_tlb_esel(vcpu
, tlbsel
);
334 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
335 vcpu
->arch
.shared
->mas0
&= ~MAS0_NV(~0);
336 vcpu
->arch
.shared
->mas0
|= MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
337 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
338 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
339 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
344 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, gva_t ea
)
346 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
347 int as
= !!get_cur_sas(vcpu
);
348 unsigned int pid
= get_cur_spid(vcpu
);
350 struct kvm_book3e_206_tlb_entry
*gtlbe
= NULL
;
352 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
353 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
355 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
361 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].ways
- 1;
363 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
364 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
365 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
366 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
367 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
371 /* since we only have two TLBs, only lower bit is used. */
372 tlbsel
= vcpu
->arch
.shared
->mas4
>> 28 & 0x1;
373 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
375 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
)
377 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
378 vcpu
->arch
.shared
->mas1
=
379 (vcpu
->arch
.shared
->mas6
& MAS6_SPID0
)
380 | (vcpu
->arch
.shared
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
381 | (vcpu
->arch
.shared
->mas4
& MAS4_TSIZED(~0));
382 vcpu
->arch
.shared
->mas2
&= MAS2_EPN
;
383 vcpu
->arch
.shared
->mas2
|= vcpu
->arch
.shared
->mas4
&
385 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
|
389 kvmppc_set_exit_type(vcpu
, EMULATED_TLBSX_EXITS
);
393 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
395 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
396 struct kvm_book3e_206_tlb_entry
*gtlbe
;
401 tlbsel
= get_tlb_tlbsel(vcpu
);
402 esel
= get_tlb_esel(vcpu
, tlbsel
);
404 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
406 if (get_tlb_v(gtlbe
)) {
407 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
409 kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
413 gtlbe
->mas1
= vcpu
->arch
.shared
->mas1
;
414 gtlbe
->mas2
= vcpu
->arch
.shared
->mas2
;
415 if (!(vcpu
->arch
.shared
->msr
& MSR_CM
))
416 gtlbe
->mas2
&= 0xffffffffUL
;
417 gtlbe
->mas7_3
= vcpu
->arch
.shared
->mas7_3
;
419 trace_kvm_booke206_gtlb_write(vcpu
->arch
.shared
->mas0
, gtlbe
->mas1
,
420 gtlbe
->mas2
, gtlbe
->mas7_3
);
424 * If a valid tlb1 entry is overwritten then recalculate the
425 * min/max TLB1 map address range otherwise no need to look
429 kvmppc_recalc_tlb1map_range(vcpu_e500
);
431 kvmppc_set_tlb1map_range(vcpu
, gtlbe
);
434 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
436 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
437 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
438 u64 eaddr
= get_tlb_eaddr(gtlbe
);
439 u64 raddr
= get_tlb_raddr(gtlbe
);
442 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
443 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
446 /* Premap the faulting page */
447 kvmppc_mmu_map(vcpu
, eaddr
, raddr
, index_of(tlbsel
, esel
));
450 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
452 kvmppc_set_exit_type(vcpu
, EMULATED_TLBWE_EXITS
);
456 static int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
457 gva_t eaddr
, unsigned int pid
, int as
)
459 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
462 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
463 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
465 return index_of(tlbsel
, esel
);
471 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
472 int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
473 struct kvm_translation
*tr
)
480 eaddr
= tr
->linear_address
;
481 pid
= (tr
->linear_address
>> 32) & 0xff;
482 as
= (tr
->linear_address
>> 40) & 0x1;
484 index
= kvmppc_e500_tlb_search(vcpu
, eaddr
, pid
, as
);
490 tr
->physical_address
= kvmppc_mmu_xlate(vcpu
, index
, eaddr
);
491 /* XXX what does "writeable" and "usermode" even mean? */
498 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
500 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
502 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
505 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
507 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
509 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
512 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
514 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
516 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
519 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
521 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
523 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
526 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
529 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
530 struct kvm_book3e_206_tlb_entry
*gtlbe
;
533 gtlbe
= get_entry(vcpu_e500
, tlbsel_of(index
), esel_of(index
));
534 pgmask
= get_tlb_bytes(gtlbe
) - 1;
536 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
539 void kvmppc_mmu_destroy_e500(struct kvm_vcpu
*vcpu
)
543 /*****************************************/
545 static void free_gtlb(struct kvmppc_vcpu_e500
*vcpu_e500
)
549 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
550 kfree(vcpu_e500
->g2h_tlb1_map
);
551 kfree(vcpu_e500
->gtlb_priv
[0]);
552 kfree(vcpu_e500
->gtlb_priv
[1]);
554 if (vcpu_e500
->shared_tlb_pages
) {
555 vfree((void *)(round_down((uintptr_t)vcpu_e500
->gtlb_arch
,
558 for (i
= 0; i
< vcpu_e500
->num_shared_tlb_pages
; i
++) {
559 set_page_dirty_lock(vcpu_e500
->shared_tlb_pages
[i
]);
560 put_page(vcpu_e500
->shared_tlb_pages
[i
]);
563 vcpu_e500
->num_shared_tlb_pages
= 0;
565 kfree(vcpu_e500
->shared_tlb_pages
);
566 vcpu_e500
->shared_tlb_pages
= NULL
;
568 kfree(vcpu_e500
->gtlb_arch
);
571 vcpu_e500
->gtlb_arch
= NULL
;
574 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
576 sregs
->u
.e
.mas0
= vcpu
->arch
.shared
->mas0
;
577 sregs
->u
.e
.mas1
= vcpu
->arch
.shared
->mas1
;
578 sregs
->u
.e
.mas2
= vcpu
->arch
.shared
->mas2
;
579 sregs
->u
.e
.mas7_3
= vcpu
->arch
.shared
->mas7_3
;
580 sregs
->u
.e
.mas4
= vcpu
->arch
.shared
->mas4
;
581 sregs
->u
.e
.mas6
= vcpu
->arch
.shared
->mas6
;
583 sregs
->u
.e
.mmucfg
= vcpu
->arch
.mmucfg
;
584 sregs
->u
.e
.tlbcfg
[0] = vcpu
->arch
.tlbcfg
[0];
585 sregs
->u
.e
.tlbcfg
[1] = vcpu
->arch
.tlbcfg
[1];
586 sregs
->u
.e
.tlbcfg
[2] = 0;
587 sregs
->u
.e
.tlbcfg
[3] = 0;
590 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
592 if (sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
593 vcpu
->arch
.shared
->mas0
= sregs
->u
.e
.mas0
;
594 vcpu
->arch
.shared
->mas1
= sregs
->u
.e
.mas1
;
595 vcpu
->arch
.shared
->mas2
= sregs
->u
.e
.mas2
;
596 vcpu
->arch
.shared
->mas7_3
= sregs
->u
.e
.mas7_3
;
597 vcpu
->arch
.shared
->mas4
= sregs
->u
.e
.mas4
;
598 vcpu
->arch
.shared
->mas6
= sregs
->u
.e
.mas6
;
604 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
605 union kvmppc_one_reg
*val
)
611 case KVM_REG_PPC_MAS0
:
612 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas0
);
614 case KVM_REG_PPC_MAS1
:
615 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas1
);
617 case KVM_REG_PPC_MAS2
:
618 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas2
);
620 case KVM_REG_PPC_MAS7_3
:
621 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas7_3
);
623 case KVM_REG_PPC_MAS4
:
624 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas4
);
626 case KVM_REG_PPC_MAS6
:
627 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas6
);
629 case KVM_REG_PPC_MMUCFG
:
630 *val
= get_reg_val(id
, vcpu
->arch
.mmucfg
);
632 case KVM_REG_PPC_EPTCFG
:
633 *val
= get_reg_val(id
, vcpu
->arch
.eptcfg
);
635 case KVM_REG_PPC_TLB0CFG
:
636 case KVM_REG_PPC_TLB1CFG
:
637 case KVM_REG_PPC_TLB2CFG
:
638 case KVM_REG_PPC_TLB3CFG
:
639 i
= id
- KVM_REG_PPC_TLB0CFG
;
640 *val
= get_reg_val(id
, vcpu
->arch
.tlbcfg
[i
]);
642 case KVM_REG_PPC_TLB0PS
:
643 case KVM_REG_PPC_TLB1PS
:
644 case KVM_REG_PPC_TLB2PS
:
645 case KVM_REG_PPC_TLB3PS
:
646 i
= id
- KVM_REG_PPC_TLB0PS
;
647 *val
= get_reg_val(id
, vcpu
->arch
.tlbps
[i
]);
657 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
658 union kvmppc_one_reg
*val
)
664 case KVM_REG_PPC_MAS0
:
665 vcpu
->arch
.shared
->mas0
= set_reg_val(id
, *val
);
667 case KVM_REG_PPC_MAS1
:
668 vcpu
->arch
.shared
->mas1
= set_reg_val(id
, *val
);
670 case KVM_REG_PPC_MAS2
:
671 vcpu
->arch
.shared
->mas2
= set_reg_val(id
, *val
);
673 case KVM_REG_PPC_MAS7_3
:
674 vcpu
->arch
.shared
->mas7_3
= set_reg_val(id
, *val
);
676 case KVM_REG_PPC_MAS4
:
677 vcpu
->arch
.shared
->mas4
= set_reg_val(id
, *val
);
679 case KVM_REG_PPC_MAS6
:
680 vcpu
->arch
.shared
->mas6
= set_reg_val(id
, *val
);
682 /* Only allow MMU registers to be set to the config supported by KVM */
683 case KVM_REG_PPC_MMUCFG
: {
684 u32 reg
= set_reg_val(id
, *val
);
685 if (reg
!= vcpu
->arch
.mmucfg
)
689 case KVM_REG_PPC_EPTCFG
: {
690 u32 reg
= set_reg_val(id
, *val
);
691 if (reg
!= vcpu
->arch
.eptcfg
)
695 case KVM_REG_PPC_TLB0CFG
:
696 case KVM_REG_PPC_TLB1CFG
:
697 case KVM_REG_PPC_TLB2CFG
:
698 case KVM_REG_PPC_TLB3CFG
: {
699 /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
700 u32 reg
= set_reg_val(id
, *val
);
701 i
= id
- KVM_REG_PPC_TLB0CFG
;
702 if (reg
!= vcpu
->arch
.tlbcfg
[i
])
706 case KVM_REG_PPC_TLB0PS
:
707 case KVM_REG_PPC_TLB1PS
:
708 case KVM_REG_PPC_TLB2PS
:
709 case KVM_REG_PPC_TLB3PS
: {
710 u32 reg
= set_reg_val(id
, *val
);
711 i
= id
- KVM_REG_PPC_TLB0PS
;
712 if (reg
!= vcpu
->arch
.tlbps
[i
])
724 static int vcpu_mmu_geometry_update(struct kvm_vcpu
*vcpu
,
725 struct kvm_book3e_206_tlb_params
*params
)
727 vcpu
->arch
.tlbcfg
[0] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
728 if (params
->tlb_sizes
[0] <= 2048)
729 vcpu
->arch
.tlbcfg
[0] |= params
->tlb_sizes
[0];
730 vcpu
->arch
.tlbcfg
[0] |= params
->tlb_ways
[0] << TLBnCFG_ASSOC_SHIFT
;
732 vcpu
->arch
.tlbcfg
[1] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
733 vcpu
->arch
.tlbcfg
[1] |= params
->tlb_sizes
[1];
734 vcpu
->arch
.tlbcfg
[1] |= params
->tlb_ways
[1] << TLBnCFG_ASSOC_SHIFT
;
738 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
739 struct kvm_config_tlb
*cfg
)
741 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
742 struct kvm_book3e_206_tlb_params params
;
745 struct tlbe_priv
*privs
[2] = {};
746 u64
*g2h_bitmap
= NULL
;
749 int num_pages
, ret
, i
;
751 if (cfg
->mmu_type
!= KVM_MMU_FSL_BOOKE_NOHV
)
754 if (copy_from_user(¶ms
, (void __user
*)(uintptr_t)cfg
->params
,
758 if (params
.tlb_sizes
[1] > 64)
760 if (params
.tlb_ways
[1] != params
.tlb_sizes
[1])
762 if (params
.tlb_sizes
[2] != 0 || params
.tlb_sizes
[3] != 0)
764 if (params
.tlb_ways
[2] != 0 || params
.tlb_ways
[3] != 0)
767 if (!is_power_of_2(params
.tlb_ways
[0]))
770 sets
= params
.tlb_sizes
[0] >> ilog2(params
.tlb_ways
[0]);
771 if (!is_power_of_2(sets
))
774 array_len
= params
.tlb_sizes
[0] + params
.tlb_sizes
[1];
775 array_len
*= sizeof(struct kvm_book3e_206_tlb_entry
);
777 if (cfg
->array_len
< array_len
)
780 num_pages
= DIV_ROUND_UP(cfg
->array
+ array_len
- 1, PAGE_SIZE
) -
781 cfg
->array
/ PAGE_SIZE
;
782 pages
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_KERNEL
);
786 ret
= get_user_pages_fast(cfg
->array
, num_pages
, 1, pages
);
790 if (ret
!= num_pages
) {
796 virt
= vmap(pages
, num_pages
, VM_MAP
, PAGE_KERNEL
);
802 privs
[0] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[0],
804 privs
[1] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[1],
807 if (!privs
[0] || !privs
[1]) {
812 g2h_bitmap
= kzalloc(sizeof(u64
) * params
.tlb_sizes
[1],
819 free_gtlb(vcpu_e500
);
821 vcpu_e500
->gtlb_priv
[0] = privs
[0];
822 vcpu_e500
->gtlb_priv
[1] = privs
[1];
823 vcpu_e500
->g2h_tlb1_map
= g2h_bitmap
;
825 vcpu_e500
->gtlb_arch
= (struct kvm_book3e_206_tlb_entry
*)
826 (virt
+ (cfg
->array
& (PAGE_SIZE
- 1)));
828 vcpu_e500
->gtlb_params
[0].entries
= params
.tlb_sizes
[0];
829 vcpu_e500
->gtlb_params
[1].entries
= params
.tlb_sizes
[1];
831 vcpu_e500
->gtlb_offset
[0] = 0;
832 vcpu_e500
->gtlb_offset
[1] = params
.tlb_sizes
[0];
834 /* Update vcpu's MMU geometry based on SW_TLB input */
835 vcpu_mmu_geometry_update(vcpu
, ¶ms
);
837 vcpu_e500
->shared_tlb_pages
= pages
;
838 vcpu_e500
->num_shared_tlb_pages
= num_pages
;
840 vcpu_e500
->gtlb_params
[0].ways
= params
.tlb_ways
[0];
841 vcpu_e500
->gtlb_params
[0].sets
= sets
;
843 vcpu_e500
->gtlb_params
[1].ways
= params
.tlb_sizes
[1];
844 vcpu_e500
->gtlb_params
[1].sets
= 1;
846 kvmppc_recalc_tlb1map_range(vcpu_e500
);
854 for (i
= 0; i
< num_pages
; i
++)
862 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
863 struct kvm_dirty_tlb
*dirty
)
865 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
866 kvmppc_recalc_tlb1map_range(vcpu_e500
);
867 kvmppc_core_flush_tlb(vcpu
);
871 /* Vcpu's MMU default configuration */
872 static int vcpu_mmu_init(struct kvm_vcpu
*vcpu
,
873 struct kvmppc_e500_tlb_params
*params
)
875 /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
876 vcpu
->arch
.mmucfg
= mfspr(SPRN_MMUCFG
) & ~MMUCFG_LPIDSIZE
;
878 /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
879 vcpu
->arch
.tlbcfg
[0] = mfspr(SPRN_TLB0CFG
) &
880 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
881 vcpu
->arch
.tlbcfg
[0] |= params
[0].entries
;
882 vcpu
->arch
.tlbcfg
[0] |= params
[0].ways
<< TLBnCFG_ASSOC_SHIFT
;
884 vcpu
->arch
.tlbcfg
[1] = mfspr(SPRN_TLB1CFG
) &
885 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
886 vcpu
->arch
.tlbcfg
[1] |= params
[1].entries
;
887 vcpu
->arch
.tlbcfg
[1] |= params
[1].ways
<< TLBnCFG_ASSOC_SHIFT
;
889 if (has_feature(vcpu
, VCPU_FTR_MMU_V2
)) {
890 vcpu
->arch
.tlbps
[0] = mfspr(SPRN_TLB0PS
);
891 vcpu
->arch
.tlbps
[1] = mfspr(SPRN_TLB1PS
);
893 vcpu
->arch
.mmucfg
&= ~MMUCFG_LRAT
;
895 /* Guest mmu emulation currently doesn't handle E.PT */
896 vcpu
->arch
.eptcfg
= 0;
897 vcpu
->arch
.tlbcfg
[0] &= ~TLBnCFG_PT
;
898 vcpu
->arch
.tlbcfg
[1] &= ~TLBnCFG_IND
;
904 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
906 struct kvm_vcpu
*vcpu
= &vcpu_e500
->vcpu
;
907 int entry_size
= sizeof(struct kvm_book3e_206_tlb_entry
);
908 int entries
= KVM_E500_TLB0_SIZE
+ KVM_E500_TLB1_SIZE
;
910 if (e500_mmu_host_init(vcpu_e500
))
913 vcpu_e500
->gtlb_params
[0].entries
= KVM_E500_TLB0_SIZE
;
914 vcpu_e500
->gtlb_params
[1].entries
= KVM_E500_TLB1_SIZE
;
916 vcpu_e500
->gtlb_params
[0].ways
= KVM_E500_TLB0_WAY_NUM
;
917 vcpu_e500
->gtlb_params
[0].sets
=
918 KVM_E500_TLB0_SIZE
/ KVM_E500_TLB0_WAY_NUM
;
920 vcpu_e500
->gtlb_params
[1].ways
= KVM_E500_TLB1_SIZE
;
921 vcpu_e500
->gtlb_params
[1].sets
= 1;
923 vcpu_e500
->gtlb_arch
= kmalloc(entries
* entry_size
, GFP_KERNEL
);
924 if (!vcpu_e500
->gtlb_arch
)
927 vcpu_e500
->gtlb_offset
[0] = 0;
928 vcpu_e500
->gtlb_offset
[1] = KVM_E500_TLB0_SIZE
;
930 vcpu_e500
->gtlb_priv
[0] = kzalloc(sizeof(struct tlbe_ref
) *
931 vcpu_e500
->gtlb_params
[0].entries
,
933 if (!vcpu_e500
->gtlb_priv
[0])
936 vcpu_e500
->gtlb_priv
[1] = kzalloc(sizeof(struct tlbe_ref
) *
937 vcpu_e500
->gtlb_params
[1].entries
,
939 if (!vcpu_e500
->gtlb_priv
[1])
942 vcpu_e500
->g2h_tlb1_map
= kzalloc(sizeof(u64
) *
943 vcpu_e500
->gtlb_params
[1].entries
,
945 if (!vcpu_e500
->g2h_tlb1_map
)
948 vcpu_mmu_init(vcpu
, vcpu_e500
->gtlb_params
);
950 kvmppc_recalc_tlb1map_range(vcpu_e500
);
954 free_gtlb(vcpu_e500
);
958 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
960 free_gtlb(vcpu_e500
);
961 e500_mmu_host_uninit(vcpu_e500
);