1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
5 * Author: Yu Liu, yu.liu@freescale.com
6 * Scott Wood, scottwood@freescale.com
7 * Ashish Kalra, ashish.kalra@freescale.com
8 * Varun Sethi, varun.sethi@freescale.com
9 * Alexander Graf, agraf@suse.de
12 * This file is based on arch/powerpc/kvm/44x_tlb.c,
13 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/uaccess.h>
25 #include <linux/sched.h>
26 #include <linux/rwsem.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hugetlb.h>
29 #include <asm/kvm_ppc.h>
32 #include "trace_booke.h"
34 #include "e500_mmu_host.h"
36 static inline unsigned int gtlb0_get_next_victim(
37 struct kvmppc_vcpu_e500
*vcpu_e500
)
41 victim
= vcpu_e500
->gtlb_nv
[0]++;
42 if (unlikely(vcpu_e500
->gtlb_nv
[0] >= vcpu_e500
->gtlb_params
[0].ways
))
43 vcpu_e500
->gtlb_nv
[0] = 0;
48 static int tlb0_set_base(gva_t addr
, int sets
, int ways
)
52 set_base
= (addr
>> PAGE_SHIFT
) & (sets
- 1);
58 static int gtlb0_set_base(struct kvmppc_vcpu_e500
*vcpu_e500
, gva_t addr
)
60 return tlb0_set_base(addr
, vcpu_e500
->gtlb_params
[0].sets
,
61 vcpu_e500
->gtlb_params
[0].ways
);
64 static unsigned int get_tlb_esel(struct kvm_vcpu
*vcpu
, int tlbsel
)
66 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
67 int esel
= get_tlb_esel_bit(vcpu
);
70 esel
&= vcpu_e500
->gtlb_params
[0].ways
- 1;
71 esel
+= gtlb0_set_base(vcpu_e500
, vcpu
->arch
.shared
->mas2
);
73 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].entries
- 1;
79 /* Search the guest TLB for a matching entry. */
80 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
81 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
83 int size
= vcpu_e500
->gtlb_params
[tlbsel
].entries
;
84 unsigned int set_base
, offset
;
88 set_base
= gtlb0_set_base(vcpu_e500
, eaddr
);
89 size
= vcpu_e500
->gtlb_params
[0].ways
;
91 if (eaddr
< vcpu_e500
->tlb1_min_eaddr
||
92 eaddr
> vcpu_e500
->tlb1_max_eaddr
)
97 offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
99 for (i
= 0; i
< size
; i
++) {
100 struct kvm_book3e_206_tlb_entry
*tlbe
=
101 &vcpu_e500
->gtlb_arch
[offset
+ set_base
+ i
];
104 if (eaddr
< get_tlb_eaddr(tlbe
))
107 if (eaddr
> get_tlb_end(tlbe
))
110 tid
= get_tlb_tid(tlbe
);
111 if (tid
&& (tid
!= pid
))
114 if (!get_tlb_v(tlbe
))
117 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
126 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
129 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
130 unsigned int victim
, tsized
;
133 /* since we only have two TLBs, only lower bit is used. */
134 tlbsel
= (vcpu
->arch
.shared
->mas4
>> 28) & 0x1;
135 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
136 tsized
= (vcpu
->arch
.shared
->mas4
>> 7) & 0x1f;
138 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
139 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
140 vcpu
->arch
.shared
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
141 | MAS1_TID(get_tlbmiss_tid(vcpu
))
142 | MAS1_TSIZE(tsized
);
143 vcpu
->arch
.shared
->mas2
= (eaddr
& MAS2_EPN
)
144 | (vcpu
->arch
.shared
->mas4
& MAS2_ATTRIB_MASK
);
145 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
146 vcpu
->arch
.shared
->mas6
= (vcpu
->arch
.shared
->mas6
& MAS6_SPID1
)
147 | (get_cur_pid(vcpu
) << 16)
148 | (as
? MAS6_SAS
: 0);
151 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
)
153 int size
= vcpu_e500
->gtlb_params
[1].entries
;
158 vcpu_e500
->tlb1_min_eaddr
= ~0UL;
159 vcpu_e500
->tlb1_max_eaddr
= 0;
160 offset
= vcpu_e500
->gtlb_offset
[1];
162 for (i
= 0; i
< size
; i
++) {
163 struct kvm_book3e_206_tlb_entry
*tlbe
=
164 &vcpu_e500
->gtlb_arch
[offset
+ i
];
166 if (!get_tlb_v(tlbe
))
169 eaddr
= get_tlb_eaddr(tlbe
);
170 vcpu_e500
->tlb1_min_eaddr
=
171 min(vcpu_e500
->tlb1_min_eaddr
, eaddr
);
173 eaddr
= get_tlb_end(tlbe
);
174 vcpu_e500
->tlb1_max_eaddr
=
175 max(vcpu_e500
->tlb1_max_eaddr
, eaddr
);
179 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
,
180 struct kvm_book3e_206_tlb_entry
*gtlbe
)
182 unsigned long start
, end
, size
;
184 size
= get_tlb_bytes(gtlbe
);
185 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
186 end
= start
+ size
- 1;
188 return vcpu_e500
->tlb1_min_eaddr
== start
||
189 vcpu_e500
->tlb1_max_eaddr
== end
;
192 /* This function is supposed to be called for a adding a new valid tlb entry */
193 static void kvmppc_set_tlb1map_range(struct kvm_vcpu
*vcpu
,
194 struct kvm_book3e_206_tlb_entry
*gtlbe
)
196 unsigned long start
, end
, size
;
197 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
199 if (!get_tlb_v(gtlbe
))
202 size
= get_tlb_bytes(gtlbe
);
203 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
204 end
= start
+ size
- 1;
206 vcpu_e500
->tlb1_min_eaddr
= min(vcpu_e500
->tlb1_min_eaddr
, start
);
207 vcpu_e500
->tlb1_max_eaddr
= max(vcpu_e500
->tlb1_max_eaddr
, end
);
210 static inline int kvmppc_e500_gtlbe_invalidate(
211 struct kvmppc_vcpu_e500
*vcpu_e500
,
212 int tlbsel
, int esel
)
214 struct kvm_book3e_206_tlb_entry
*gtlbe
=
215 get_entry(vcpu_e500
, tlbsel
, esel
);
217 if (unlikely(get_tlb_iprot(gtlbe
)))
220 if (tlbsel
== 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
221 kvmppc_recalc_tlb1map_range(vcpu_e500
);
228 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
232 if (value
& MMUCSR0_TLB0FI
)
233 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[0].entries
; esel
++)
234 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
235 if (value
& MMUCSR0_TLB1FI
)
236 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[1].entries
; esel
++)
237 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
239 /* Invalidate all host shadow mappings */
240 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
245 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, gva_t ea
)
247 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
251 ia
= (ea
>> 2) & 0x1;
253 /* since we only have two TLBs, only lower bit is used. */
254 tlbsel
= (ea
>> 3) & 0x1;
257 /* invalidate all entries */
258 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
;
260 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
263 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
264 get_cur_pid(vcpu
), -1);
266 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
269 /* Invalidate all host shadow mappings */
270 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
275 static void tlbilx_all(struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
,
278 struct kvm_book3e_206_tlb_entry
*tlbe
;
281 /* invalidate all entries */
282 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; esel
++) {
283 tlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
284 tid
= get_tlb_tid(tlbe
);
285 if (type
== 0 || tid
== pid
) {
286 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
287 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
292 static void tlbilx_one(struct kvmppc_vcpu_e500
*vcpu_e500
, int pid
,
297 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
298 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, -1);
300 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
301 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
307 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int type
, gva_t ea
)
309 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
310 int pid
= get_cur_spid(vcpu
);
312 if (type
== 0 || type
== 1) {
313 tlbilx_all(vcpu_e500
, 0, pid
, type
);
314 tlbilx_all(vcpu_e500
, 1, pid
, type
);
315 } else if (type
== 3) {
316 tlbilx_one(vcpu_e500
, pid
, ea
);
322 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
324 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
326 struct kvm_book3e_206_tlb_entry
*gtlbe
;
328 tlbsel
= get_tlb_tlbsel(vcpu
);
329 esel
= get_tlb_esel(vcpu
, tlbsel
);
331 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
332 vcpu
->arch
.shared
->mas0
&= ~MAS0_NV(~0);
333 vcpu
->arch
.shared
->mas0
|= MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
334 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
335 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
336 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
341 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, gva_t ea
)
343 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
344 int as
= !!get_cur_sas(vcpu
);
345 unsigned int pid
= get_cur_spid(vcpu
);
347 struct kvm_book3e_206_tlb_entry
*gtlbe
= NULL
;
349 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
350 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
352 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
358 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].ways
- 1;
360 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
361 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
362 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
363 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
364 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
368 /* since we only have two TLBs, only lower bit is used. */
369 tlbsel
= vcpu
->arch
.shared
->mas4
>> 28 & 0x1;
370 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
372 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
)
374 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
375 vcpu
->arch
.shared
->mas1
=
376 (vcpu
->arch
.shared
->mas6
& MAS6_SPID0
)
377 | ((vcpu
->arch
.shared
->mas6
& MAS6_SAS
) ? MAS1_TS
: 0)
378 | (vcpu
->arch
.shared
->mas4
& MAS4_TSIZED(~0));
379 vcpu
->arch
.shared
->mas2
&= MAS2_EPN
;
380 vcpu
->arch
.shared
->mas2
|= vcpu
->arch
.shared
->mas4
&
382 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
|
386 kvmppc_set_exit_type(vcpu
, EMULATED_TLBSX_EXITS
);
390 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
392 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
393 struct kvm_book3e_206_tlb_entry
*gtlbe
;
398 tlbsel
= get_tlb_tlbsel(vcpu
);
399 esel
= get_tlb_esel(vcpu
, tlbsel
);
401 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
403 if (get_tlb_v(gtlbe
)) {
404 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
406 kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
410 gtlbe
->mas1
= vcpu
->arch
.shared
->mas1
;
411 gtlbe
->mas2
= vcpu
->arch
.shared
->mas2
;
412 if (!(vcpu
->arch
.shared
->msr
& MSR_CM
))
413 gtlbe
->mas2
&= 0xffffffffUL
;
414 gtlbe
->mas7_3
= vcpu
->arch
.shared
->mas7_3
;
416 trace_kvm_booke206_gtlb_write(vcpu
->arch
.shared
->mas0
, gtlbe
->mas1
,
417 gtlbe
->mas2
, gtlbe
->mas7_3
);
421 * If a valid tlb1 entry is overwritten then recalculate the
422 * min/max TLB1 map address range otherwise no need to look
426 kvmppc_recalc_tlb1map_range(vcpu_e500
);
428 kvmppc_set_tlb1map_range(vcpu
, gtlbe
);
431 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
433 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
435 u64 eaddr
= get_tlb_eaddr(gtlbe
);
436 u64 raddr
= get_tlb_raddr(gtlbe
);
439 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
440 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
443 /* Premap the faulting page */
444 kvmppc_mmu_map(vcpu
, eaddr
, raddr
, index_of(tlbsel
, esel
));
447 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
449 kvmppc_set_exit_type(vcpu
, EMULATED_TLBWE_EXITS
);
453 static int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
454 gva_t eaddr
, unsigned int pid
, int as
)
456 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
459 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
460 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
462 return index_of(tlbsel
, esel
);
468 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
469 int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
470 struct kvm_translation
*tr
)
477 eaddr
= tr
->linear_address
;
478 pid
= (tr
->linear_address
>> 32) & 0xff;
479 as
= (tr
->linear_address
>> 40) & 0x1;
481 index
= kvmppc_e500_tlb_search(vcpu
, eaddr
, pid
, as
);
487 tr
->physical_address
= kvmppc_mmu_xlate(vcpu
, index
, eaddr
);
488 /* XXX what does "writeable" and "usermode" even mean? */
495 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
497 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
499 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
502 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
504 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
506 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
509 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
511 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
513 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.regs
.nip
, as
);
516 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
518 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
520 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
523 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
526 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
527 struct kvm_book3e_206_tlb_entry
*gtlbe
;
530 gtlbe
= get_entry(vcpu_e500
, tlbsel_of(index
), esel_of(index
));
531 pgmask
= get_tlb_bytes(gtlbe
) - 1;
533 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
536 /*****************************************/
538 static void free_gtlb(struct kvmppc_vcpu_e500
*vcpu_e500
)
542 kvmppc_core_flush_tlb(&vcpu_e500
->vcpu
);
543 kfree(vcpu_e500
->g2h_tlb1_map
);
544 kfree(vcpu_e500
->gtlb_priv
[0]);
545 kfree(vcpu_e500
->gtlb_priv
[1]);
547 if (vcpu_e500
->shared_tlb_pages
) {
548 vfree((void *)(round_down((uintptr_t)vcpu_e500
->gtlb_arch
,
551 for (i
= 0; i
< vcpu_e500
->num_shared_tlb_pages
; i
++) {
552 set_page_dirty_lock(vcpu_e500
->shared_tlb_pages
[i
]);
553 put_page(vcpu_e500
->shared_tlb_pages
[i
]);
556 vcpu_e500
->num_shared_tlb_pages
= 0;
558 kfree(vcpu_e500
->shared_tlb_pages
);
559 vcpu_e500
->shared_tlb_pages
= NULL
;
561 kfree(vcpu_e500
->gtlb_arch
);
564 vcpu_e500
->gtlb_arch
= NULL
;
567 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
569 sregs
->u
.e
.mas0
= vcpu
->arch
.shared
->mas0
;
570 sregs
->u
.e
.mas1
= vcpu
->arch
.shared
->mas1
;
571 sregs
->u
.e
.mas2
= vcpu
->arch
.shared
->mas2
;
572 sregs
->u
.e
.mas7_3
= vcpu
->arch
.shared
->mas7_3
;
573 sregs
->u
.e
.mas4
= vcpu
->arch
.shared
->mas4
;
574 sregs
->u
.e
.mas6
= vcpu
->arch
.shared
->mas6
;
576 sregs
->u
.e
.mmucfg
= vcpu
->arch
.mmucfg
;
577 sregs
->u
.e
.tlbcfg
[0] = vcpu
->arch
.tlbcfg
[0];
578 sregs
->u
.e
.tlbcfg
[1] = vcpu
->arch
.tlbcfg
[1];
579 sregs
->u
.e
.tlbcfg
[2] = 0;
580 sregs
->u
.e
.tlbcfg
[3] = 0;
583 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
585 if (sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
586 vcpu
->arch
.shared
->mas0
= sregs
->u
.e
.mas0
;
587 vcpu
->arch
.shared
->mas1
= sregs
->u
.e
.mas1
;
588 vcpu
->arch
.shared
->mas2
= sregs
->u
.e
.mas2
;
589 vcpu
->arch
.shared
->mas7_3
= sregs
->u
.e
.mas7_3
;
590 vcpu
->arch
.shared
->mas4
= sregs
->u
.e
.mas4
;
591 vcpu
->arch
.shared
->mas6
= sregs
->u
.e
.mas6
;
597 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
598 union kvmppc_one_reg
*val
)
604 case KVM_REG_PPC_MAS0
:
605 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas0
);
607 case KVM_REG_PPC_MAS1
:
608 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas1
);
610 case KVM_REG_PPC_MAS2
:
611 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas2
);
613 case KVM_REG_PPC_MAS7_3
:
614 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas7_3
);
616 case KVM_REG_PPC_MAS4
:
617 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas4
);
619 case KVM_REG_PPC_MAS6
:
620 *val
= get_reg_val(id
, vcpu
->arch
.shared
->mas6
);
622 case KVM_REG_PPC_MMUCFG
:
623 *val
= get_reg_val(id
, vcpu
->arch
.mmucfg
);
625 case KVM_REG_PPC_EPTCFG
:
626 *val
= get_reg_val(id
, vcpu
->arch
.eptcfg
);
628 case KVM_REG_PPC_TLB0CFG
:
629 case KVM_REG_PPC_TLB1CFG
:
630 case KVM_REG_PPC_TLB2CFG
:
631 case KVM_REG_PPC_TLB3CFG
:
632 i
= id
- KVM_REG_PPC_TLB0CFG
;
633 *val
= get_reg_val(id
, vcpu
->arch
.tlbcfg
[i
]);
635 case KVM_REG_PPC_TLB0PS
:
636 case KVM_REG_PPC_TLB1PS
:
637 case KVM_REG_PPC_TLB2PS
:
638 case KVM_REG_PPC_TLB3PS
:
639 i
= id
- KVM_REG_PPC_TLB0PS
;
640 *val
= get_reg_val(id
, vcpu
->arch
.tlbps
[i
]);
650 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
651 union kvmppc_one_reg
*val
)
657 case KVM_REG_PPC_MAS0
:
658 vcpu
->arch
.shared
->mas0
= set_reg_val(id
, *val
);
660 case KVM_REG_PPC_MAS1
:
661 vcpu
->arch
.shared
->mas1
= set_reg_val(id
, *val
);
663 case KVM_REG_PPC_MAS2
:
664 vcpu
->arch
.shared
->mas2
= set_reg_val(id
, *val
);
666 case KVM_REG_PPC_MAS7_3
:
667 vcpu
->arch
.shared
->mas7_3
= set_reg_val(id
, *val
);
669 case KVM_REG_PPC_MAS4
:
670 vcpu
->arch
.shared
->mas4
= set_reg_val(id
, *val
);
672 case KVM_REG_PPC_MAS6
:
673 vcpu
->arch
.shared
->mas6
= set_reg_val(id
, *val
);
675 /* Only allow MMU registers to be set to the config supported by KVM */
676 case KVM_REG_PPC_MMUCFG
: {
677 u32 reg
= set_reg_val(id
, *val
);
678 if (reg
!= vcpu
->arch
.mmucfg
)
682 case KVM_REG_PPC_EPTCFG
: {
683 u32 reg
= set_reg_val(id
, *val
);
684 if (reg
!= vcpu
->arch
.eptcfg
)
688 case KVM_REG_PPC_TLB0CFG
:
689 case KVM_REG_PPC_TLB1CFG
:
690 case KVM_REG_PPC_TLB2CFG
:
691 case KVM_REG_PPC_TLB3CFG
: {
692 /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
693 u32 reg
= set_reg_val(id
, *val
);
694 i
= id
- KVM_REG_PPC_TLB0CFG
;
695 if (reg
!= vcpu
->arch
.tlbcfg
[i
])
699 case KVM_REG_PPC_TLB0PS
:
700 case KVM_REG_PPC_TLB1PS
:
701 case KVM_REG_PPC_TLB2PS
:
702 case KVM_REG_PPC_TLB3PS
: {
703 u32 reg
= set_reg_val(id
, *val
);
704 i
= id
- KVM_REG_PPC_TLB0PS
;
705 if (reg
!= vcpu
->arch
.tlbps
[i
])
717 static int vcpu_mmu_geometry_update(struct kvm_vcpu
*vcpu
,
718 struct kvm_book3e_206_tlb_params
*params
)
720 vcpu
->arch
.tlbcfg
[0] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
721 if (params
->tlb_sizes
[0] <= 2048)
722 vcpu
->arch
.tlbcfg
[0] |= params
->tlb_sizes
[0];
723 vcpu
->arch
.tlbcfg
[0] |= params
->tlb_ways
[0] << TLBnCFG_ASSOC_SHIFT
;
725 vcpu
->arch
.tlbcfg
[1] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
726 vcpu
->arch
.tlbcfg
[1] |= params
->tlb_sizes
[1];
727 vcpu
->arch
.tlbcfg
[1] |= params
->tlb_ways
[1] << TLBnCFG_ASSOC_SHIFT
;
731 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
732 struct kvm_config_tlb
*cfg
)
734 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
735 struct kvm_book3e_206_tlb_params params
;
738 struct tlbe_priv
*privs
[2] = {};
742 int num_pages
, ret
, i
;
744 if (cfg
->mmu_type
!= KVM_MMU_FSL_BOOKE_NOHV
)
747 if (copy_from_user(¶ms
, (void __user
*)(uintptr_t)cfg
->params
,
751 if (params
.tlb_sizes
[1] > 64)
753 if (params
.tlb_ways
[1] != params
.tlb_sizes
[1])
755 if (params
.tlb_sizes
[2] != 0 || params
.tlb_sizes
[3] != 0)
757 if (params
.tlb_ways
[2] != 0 || params
.tlb_ways
[3] != 0)
760 if (!is_power_of_2(params
.tlb_ways
[0]))
763 sets
= params
.tlb_sizes
[0] >> ilog2(params
.tlb_ways
[0]);
764 if (!is_power_of_2(sets
))
767 array_len
= params
.tlb_sizes
[0] + params
.tlb_sizes
[1];
768 array_len
*= sizeof(struct kvm_book3e_206_tlb_entry
);
770 if (cfg
->array_len
< array_len
)
773 num_pages
= DIV_ROUND_UP(cfg
->array
+ array_len
- 1, PAGE_SIZE
) -
774 cfg
->array
/ PAGE_SIZE
;
775 pages
= kmalloc_array(num_pages
, sizeof(*pages
), GFP_KERNEL
);
779 ret
= get_user_pages_fast(cfg
->array
, num_pages
, FOLL_WRITE
, pages
);
783 if (ret
!= num_pages
) {
789 virt
= vmap(pages
, num_pages
, VM_MAP
, PAGE_KERNEL
);
795 privs
[0] = kcalloc(params
.tlb_sizes
[0], sizeof(*privs
[0]), GFP_KERNEL
);
801 privs
[1] = kcalloc(params
.tlb_sizes
[1], sizeof(*privs
[1]), GFP_KERNEL
);
804 goto free_privs_first
;
807 g2h_bitmap
= kcalloc(params
.tlb_sizes
[1],
812 goto free_privs_second
;
815 free_gtlb(vcpu_e500
);
817 vcpu_e500
->gtlb_priv
[0] = privs
[0];
818 vcpu_e500
->gtlb_priv
[1] = privs
[1];
819 vcpu_e500
->g2h_tlb1_map
= g2h_bitmap
;
821 vcpu_e500
->gtlb_arch
= (struct kvm_book3e_206_tlb_entry
*)
822 (virt
+ (cfg
->array
& (PAGE_SIZE
- 1)));
824 vcpu_e500
->gtlb_params
[0].entries
= params
.tlb_sizes
[0];
825 vcpu_e500
->gtlb_params
[1].entries
= params
.tlb_sizes
[1];
827 vcpu_e500
->gtlb_offset
[0] = 0;
828 vcpu_e500
->gtlb_offset
[1] = params
.tlb_sizes
[0];
830 /* Update vcpu's MMU geometry based on SW_TLB input */
831 vcpu_mmu_geometry_update(vcpu
, ¶ms
);
833 vcpu_e500
->shared_tlb_pages
= pages
;
834 vcpu_e500
->num_shared_tlb_pages
= num_pages
;
836 vcpu_e500
->gtlb_params
[0].ways
= params
.tlb_ways
[0];
837 vcpu_e500
->gtlb_params
[0].sets
= sets
;
839 vcpu_e500
->gtlb_params
[1].ways
= params
.tlb_sizes
[1];
840 vcpu_e500
->gtlb_params
[1].sets
= 1;
842 kvmppc_recalc_tlb1map_range(vcpu_e500
);
849 for (i
= 0; i
< num_pages
; i
++)
856 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
857 struct kvm_dirty_tlb
*dirty
)
859 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
860 kvmppc_recalc_tlb1map_range(vcpu_e500
);
861 kvmppc_core_flush_tlb(vcpu
);
865 /* Vcpu's MMU default configuration */
866 static int vcpu_mmu_init(struct kvm_vcpu
*vcpu
,
867 struct kvmppc_e500_tlb_params
*params
)
869 /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
870 vcpu
->arch
.mmucfg
= mfspr(SPRN_MMUCFG
) & ~MMUCFG_LPIDSIZE
;
872 /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
873 vcpu
->arch
.tlbcfg
[0] = mfspr(SPRN_TLB0CFG
) &
874 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
875 vcpu
->arch
.tlbcfg
[0] |= params
[0].entries
;
876 vcpu
->arch
.tlbcfg
[0] |= params
[0].ways
<< TLBnCFG_ASSOC_SHIFT
;
878 vcpu
->arch
.tlbcfg
[1] = mfspr(SPRN_TLB1CFG
) &
879 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
880 vcpu
->arch
.tlbcfg
[1] |= params
[1].entries
;
881 vcpu
->arch
.tlbcfg
[1] |= params
[1].ways
<< TLBnCFG_ASSOC_SHIFT
;
883 if (has_feature(vcpu
, VCPU_FTR_MMU_V2
)) {
884 vcpu
->arch
.tlbps
[0] = mfspr(SPRN_TLB0PS
);
885 vcpu
->arch
.tlbps
[1] = mfspr(SPRN_TLB1PS
);
887 vcpu
->arch
.mmucfg
&= ~MMUCFG_LRAT
;
889 /* Guest mmu emulation currently doesn't handle E.PT */
890 vcpu
->arch
.eptcfg
= 0;
891 vcpu
->arch
.tlbcfg
[0] &= ~TLBnCFG_PT
;
892 vcpu
->arch
.tlbcfg
[1] &= ~TLBnCFG_IND
;
898 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
900 struct kvm_vcpu
*vcpu
= &vcpu_e500
->vcpu
;
902 if (e500_mmu_host_init(vcpu_e500
))
905 vcpu_e500
->gtlb_params
[0].entries
= KVM_E500_TLB0_SIZE
;
906 vcpu_e500
->gtlb_params
[1].entries
= KVM_E500_TLB1_SIZE
;
908 vcpu_e500
->gtlb_params
[0].ways
= KVM_E500_TLB0_WAY_NUM
;
909 vcpu_e500
->gtlb_params
[0].sets
=
910 KVM_E500_TLB0_SIZE
/ KVM_E500_TLB0_WAY_NUM
;
912 vcpu_e500
->gtlb_params
[1].ways
= KVM_E500_TLB1_SIZE
;
913 vcpu_e500
->gtlb_params
[1].sets
= 1;
915 vcpu_e500
->gtlb_arch
= kmalloc_array(KVM_E500_TLB0_SIZE
+
917 sizeof(*vcpu_e500
->gtlb_arch
),
919 if (!vcpu_e500
->gtlb_arch
)
922 vcpu_e500
->gtlb_offset
[0] = 0;
923 vcpu_e500
->gtlb_offset
[1] = KVM_E500_TLB0_SIZE
;
925 vcpu_e500
->gtlb_priv
[0] = kcalloc(vcpu_e500
->gtlb_params
[0].entries
,
926 sizeof(struct tlbe_ref
),
928 if (!vcpu_e500
->gtlb_priv
[0])
931 vcpu_e500
->gtlb_priv
[1] = kcalloc(vcpu_e500
->gtlb_params
[1].entries
,
932 sizeof(struct tlbe_ref
),
934 if (!vcpu_e500
->gtlb_priv
[1])
937 vcpu_e500
->g2h_tlb1_map
= kcalloc(vcpu_e500
->gtlb_params
[1].entries
,
938 sizeof(*vcpu_e500
->g2h_tlb1_map
),
940 if (!vcpu_e500
->g2h_tlb1_map
)
943 vcpu_mmu_init(vcpu
, vcpu_e500
->gtlb_params
);
945 kvmppc_recalc_tlb1map_range(vcpu_e500
);
948 free_gtlb(vcpu_e500
);
952 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
954 free_gtlb(vcpu_e500
);
955 e500_mmu_host_uninit(vcpu_e500
);