2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
10 * This file is based on arch/powerpc/kvm/44x_tlb.c,
11 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/log2.h>
26 #include <linux/uaccess.h>
27 #include <linux/sched.h>
28 #include <linux/rwsem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hugetlb.h>
31 #include <asm/kvm_ppc.h>
37 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
39 static struct kvmppc_e500_tlb_params host_tlb_params
[E500_TLB_NUM
];
41 static inline unsigned int gtlb0_get_next_victim(
42 struct kvmppc_vcpu_e500
*vcpu_e500
)
46 victim
= vcpu_e500
->gtlb_nv
[0]++;
47 if (unlikely(vcpu_e500
->gtlb_nv
[0] >= vcpu_e500
->gtlb_params
[0].ways
))
48 vcpu_e500
->gtlb_nv
[0] = 0;
53 static inline unsigned int tlb1_max_shadow_size(void)
55 /* reserve one entry for magic page */
56 return host_tlb_params
[1].entries
- tlbcam_index
- 1;
59 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry
*tlbe
)
61 return tlbe
->mas7_3
& (MAS3_SW
|MAS3_UW
);
64 static inline u32
e500_shadow_mas3_attrib(u32 mas3
, int usermode
)
66 /* Mask off reserved bits. */
67 mas3
&= MAS3_ATTRIB_MASK
;
69 #ifndef CONFIG_KVM_BOOKE_HV
71 /* Guest is in supervisor mode,
72 * so we need to translate guest
73 * supervisor permissions into user permissions. */
74 mas3
&= ~E500_TLB_USER_PERM_MASK
;
75 mas3
|= (mas3
& E500_TLB_SUPER_PERM_MASK
) << 1;
77 mas3
|= E500_TLB_SUPER_PERM_MASK
;
82 static inline u32
e500_shadow_mas2_attrib(u32 mas2
, int usermode
)
85 return (mas2
& MAS2_ATTRIB_MASK
) | MAS2_M
;
87 return mas2
& MAS2_ATTRIB_MASK
;
92 * writing shadow tlb entry to host TLB
94 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry
*stlbe
,
99 local_irq_save(flags
);
100 mtspr(SPRN_MAS0
, mas0
);
101 mtspr(SPRN_MAS1
, stlbe
->mas1
);
102 mtspr(SPRN_MAS2
, (unsigned long)stlbe
->mas2
);
103 mtspr(SPRN_MAS3
, (u32
)stlbe
->mas7_3
);
104 mtspr(SPRN_MAS7
, (u32
)(stlbe
->mas7_3
>> 32));
105 #ifdef CONFIG_KVM_BOOKE_HV
106 mtspr(SPRN_MAS8
, stlbe
->mas8
);
108 asm volatile("isync; tlbwe" : : : "memory");
110 #ifdef CONFIG_KVM_BOOKE_HV
111 /* Must clear mas8 for other host tlbwe's */
115 local_irq_restore(flags
);
117 trace_kvm_booke206_stlb_write(mas0
, stlbe
->mas8
, stlbe
->mas1
,
118 stlbe
->mas2
, stlbe
->mas7_3
);
122 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
124 * We don't care about the address we're searching for, other than that it's
125 * in the right set and is not present in the TLB. Using a zero PID and a
126 * userspace address means we don't have to set and then restore MAS5, or
127 * calculate a proper MAS6 value.
129 static u32
get_host_mas0(unsigned long eaddr
)
134 local_irq_save(flags
);
136 asm volatile("tlbsx 0, %0" : : "b" (eaddr
& ~CONFIG_PAGE_OFFSET
));
137 mas0
= mfspr(SPRN_MAS0
);
138 local_irq_restore(flags
);
143 /* sesel is for tlb1 only */
144 static inline void write_host_tlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
145 int tlbsel
, int sesel
, struct kvm_book3e_206_tlb_entry
*stlbe
)
150 mas0
= get_host_mas0(stlbe
->mas2
);
151 __write_host_tlbe(stlbe
, mas0
);
153 __write_host_tlbe(stlbe
,
155 MAS0_ESEL(to_htlb1_esel(sesel
)));
159 #ifdef CONFIG_KVM_E500V2
160 void kvmppc_map_magic(struct kvm_vcpu
*vcpu
)
162 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
163 struct kvm_book3e_206_tlb_entry magic
;
164 ulong shared_page
= ((ulong
)vcpu
->arch
.shared
) & PAGE_MASK
;
168 pfn
= (pfn_t
)virt_to_phys((void *)shared_page
) >> PAGE_SHIFT
;
169 get_page(pfn_to_page(pfn
));
172 stid
= kvmppc_e500_get_sid(vcpu_e500
, 0, 0, 0, 0);
174 magic
.mas1
= MAS1_VALID
| MAS1_TS
| MAS1_TID(stid
) |
175 MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
176 magic
.mas2
= vcpu
->arch
.magic_page_ea
| MAS2_M
;
177 magic
.mas7_3
= ((u64
)pfn
<< PAGE_SHIFT
) |
178 MAS3_SW
| MAS3_SR
| MAS3_UW
| MAS3_UR
;
181 __write_host_tlbe(&magic
, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index
));
186 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500
*vcpu_e500
,
187 int tlbsel
, int esel
)
189 struct kvm_book3e_206_tlb_entry
*gtlbe
=
190 get_entry(vcpu_e500
, tlbsel
, esel
);
193 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
& E500_TLB_BITMAP
) {
194 u64 tmp
= vcpu_e500
->g2h_tlb1_map
[esel
];
198 local_irq_save(flags
);
200 hw_tlb_indx
= __ilog2_u64(tmp
& -tmp
);
203 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx
)));
205 asm volatile("tlbwe");
206 vcpu_e500
->h2g_tlb1_rmap
[hw_tlb_indx
] = 0;
210 vcpu_e500
->g2h_tlb1_map
[esel
] = 0;
211 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
&= ~E500_TLB_BITMAP
;
212 local_irq_restore(flags
);
217 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
218 kvmppc_e500_tlbil_one(vcpu_e500
, gtlbe
);
221 static int tlb0_set_base(gva_t addr
, int sets
, int ways
)
225 set_base
= (addr
>> PAGE_SHIFT
) & (sets
- 1);
231 static int gtlb0_set_base(struct kvmppc_vcpu_e500
*vcpu_e500
, gva_t addr
)
233 return tlb0_set_base(addr
, vcpu_e500
->gtlb_params
[0].sets
,
234 vcpu_e500
->gtlb_params
[0].ways
);
237 static unsigned int get_tlb_esel(struct kvm_vcpu
*vcpu
, int tlbsel
)
239 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
240 int esel
= get_tlb_esel_bit(vcpu
);
243 esel
&= vcpu_e500
->gtlb_params
[0].ways
- 1;
244 esel
+= gtlb0_set_base(vcpu_e500
, vcpu
->arch
.shared
->mas2
);
246 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].entries
- 1;
252 /* Search the guest TLB for a matching entry. */
253 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
254 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
256 int size
= vcpu_e500
->gtlb_params
[tlbsel
].entries
;
257 unsigned int set_base
, offset
;
261 set_base
= gtlb0_set_base(vcpu_e500
, eaddr
);
262 size
= vcpu_e500
->gtlb_params
[0].ways
;
264 if (eaddr
< vcpu_e500
->tlb1_min_eaddr
||
265 eaddr
> vcpu_e500
->tlb1_max_eaddr
)
270 offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
272 for (i
= 0; i
< size
; i
++) {
273 struct kvm_book3e_206_tlb_entry
*tlbe
=
274 &vcpu_e500
->gtlb_arch
[offset
+ set_base
+ i
];
277 if (eaddr
< get_tlb_eaddr(tlbe
))
280 if (eaddr
> get_tlb_end(tlbe
))
283 tid
= get_tlb_tid(tlbe
);
284 if (tid
&& (tid
!= pid
))
287 if (!get_tlb_v(tlbe
))
290 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
299 static inline void kvmppc_e500_ref_setup(struct tlbe_ref
*ref
,
300 struct kvm_book3e_206_tlb_entry
*gtlbe
,
304 ref
->flags
= E500_TLB_VALID
;
306 if (tlbe_is_writable(gtlbe
))
307 kvm_set_pfn_dirty(pfn
);
310 static inline void kvmppc_e500_ref_release(struct tlbe_ref
*ref
)
312 if (ref
->flags
& E500_TLB_VALID
) {
313 trace_kvm_booke206_ref_release(ref
->pfn
, ref
->flags
);
318 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500
*vcpu_e500
)
320 if (vcpu_e500
->g2h_tlb1_map
)
321 memset(vcpu_e500
->g2h_tlb1_map
, 0,
322 sizeof(u64
) * vcpu_e500
->gtlb_params
[1].entries
);
323 if (vcpu_e500
->h2g_tlb1_rmap
)
324 memset(vcpu_e500
->h2g_tlb1_rmap
, 0,
325 sizeof(unsigned int) * host_tlb_params
[1].entries
);
328 static void clear_tlb_privs(struct kvmppc_vcpu_e500
*vcpu_e500
)
333 for (i
= 0; i
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; i
++) {
334 struct tlbe_ref
*ref
=
335 &vcpu_e500
->gtlb_priv
[tlbsel
][i
].ref
;
336 kvmppc_e500_ref_release(ref
);
340 static void clear_tlb_refs(struct kvmppc_vcpu_e500
*vcpu_e500
)
345 kvmppc_e500_tlbil_all(vcpu_e500
);
347 for (i
= 0; i
< host_tlb_params
[stlbsel
].entries
; i
++) {
348 struct tlbe_ref
*ref
=
349 &vcpu_e500
->tlb_refs
[stlbsel
][i
];
350 kvmppc_e500_ref_release(ref
);
353 clear_tlb_privs(vcpu_e500
);
356 void kvmppc_core_flush_tlb(struct kvm_vcpu
*vcpu
)
358 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
359 clear_tlb_refs(vcpu_e500
);
360 clear_tlb1_bitmap(vcpu_e500
);
363 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
364 unsigned int eaddr
, int as
)
366 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
367 unsigned int victim
, tsized
;
370 /* since we only have two TLBs, only lower bit is used. */
371 tlbsel
= (vcpu
->arch
.shared
->mas4
>> 28) & 0x1;
372 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
373 tsized
= (vcpu
->arch
.shared
->mas4
>> 7) & 0x1f;
375 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
376 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
377 vcpu
->arch
.shared
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
378 | MAS1_TID(get_tlbmiss_tid(vcpu
))
379 | MAS1_TSIZE(tsized
);
380 vcpu
->arch
.shared
->mas2
= (eaddr
& MAS2_EPN
)
381 | (vcpu
->arch
.shared
->mas4
& MAS2_ATTRIB_MASK
);
382 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
383 vcpu
->arch
.shared
->mas6
= (vcpu
->arch
.shared
->mas6
& MAS6_SPID1
)
384 | (get_cur_pid(vcpu
) << 16)
385 | (as
? MAS6_SAS
: 0);
388 /* TID must be supplied by the caller */
389 static inline void kvmppc_e500_setup_stlbe(
390 struct kvm_vcpu
*vcpu
,
391 struct kvm_book3e_206_tlb_entry
*gtlbe
,
392 int tsize
, struct tlbe_ref
*ref
, u64 gvaddr
,
393 struct kvm_book3e_206_tlb_entry
*stlbe
)
395 pfn_t pfn
= ref
->pfn
;
396 u32 pr
= vcpu
->arch
.shared
->msr
& MSR_PR
;
398 BUG_ON(!(ref
->flags
& E500_TLB_VALID
));
400 /* Force IPROT=0 for all guest mappings. */
401 stlbe
->mas1
= MAS1_TSIZE(tsize
) | get_tlb_sts(gtlbe
) | MAS1_VALID
;
402 stlbe
->mas2
= (gvaddr
& MAS2_EPN
) |
403 e500_shadow_mas2_attrib(gtlbe
->mas2
, pr
);
404 stlbe
->mas7_3
= ((u64
)pfn
<< PAGE_SHIFT
) |
405 e500_shadow_mas3_attrib(gtlbe
->mas7_3
, pr
);
407 #ifdef CONFIG_KVM_BOOKE_HV
408 stlbe
->mas8
= MAS8_TGS
| vcpu
->kvm
->arch
.lpid
;
412 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
413 u64 gvaddr
, gfn_t gfn
, struct kvm_book3e_206_tlb_entry
*gtlbe
,
414 int tlbsel
, struct kvm_book3e_206_tlb_entry
*stlbe
,
415 struct tlbe_ref
*ref
)
417 struct kvm_memory_slot
*slot
;
418 unsigned long pfn
= 0; /* silence GCC warning */
421 int tsize
= BOOK3E_PAGESZ_4K
;
424 * Translate guest physical to true physical, acquiring
425 * a page reference if it is normal, non-reserved memory.
427 * gfn_to_memslot() must succeed because otherwise we wouldn't
428 * have gotten this far. Eventually we should just pass the slot
429 * pointer through from the first lookup.
431 slot
= gfn_to_memslot(vcpu_e500
->vcpu
.kvm
, gfn
);
432 hva
= gfn_to_hva_memslot(slot
, gfn
);
435 struct vm_area_struct
*vma
;
436 down_read(¤t
->mm
->mmap_sem
);
438 vma
= find_vma(current
->mm
, hva
);
439 if (vma
&& hva
>= vma
->vm_start
&&
440 (vma
->vm_flags
& VM_PFNMAP
)) {
442 * This VMA is a physically contiguous region (e.g.
443 * /dev/mem) that bypasses normal Linux page
444 * management. Find the overlap between the
445 * vma and the memslot.
448 unsigned long start
, end
;
449 unsigned long slot_start
, slot_end
;
453 start
= vma
->vm_pgoff
;
455 ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
);
457 pfn
= start
+ ((hva
- vma
->vm_start
) >> PAGE_SHIFT
);
459 slot_start
= pfn
- (gfn
- slot
->base_gfn
);
460 slot_end
= slot_start
+ slot
->npages
;
462 if (start
< slot_start
)
467 tsize
= (gtlbe
->mas1
& MAS1_TSIZE_MASK
) >>
471 * e500 doesn't implement the lowest tsize bit,
474 tsize
= max(BOOK3E_PAGESZ_4K
, tsize
& ~1);
477 * Now find the largest tsize (up to what the guest
478 * requested) that will cover gfn, stay within the
479 * range, and for which gfn and pfn are mutually
483 for (; tsize
> BOOK3E_PAGESZ_4K
; tsize
-= 2) {
484 unsigned long gfn_start
, gfn_end
, tsize_pages
;
485 tsize_pages
= 1 << (tsize
- 2);
487 gfn_start
= gfn
& ~(tsize_pages
- 1);
488 gfn_end
= gfn_start
+ tsize_pages
;
490 if (gfn_start
+ pfn
- gfn
< start
)
492 if (gfn_end
+ pfn
- gfn
> end
)
494 if ((gfn
& (tsize_pages
- 1)) !=
495 (pfn
& (tsize_pages
- 1)))
498 gvaddr
&= ~((tsize_pages
<< PAGE_SHIFT
) - 1);
499 pfn
&= ~(tsize_pages
- 1);
502 } else if (vma
&& hva
>= vma
->vm_start
&&
503 (vma
->vm_flags
& VM_HUGETLB
)) {
504 unsigned long psize
= vma_kernel_pagesize(vma
);
506 tsize
= (gtlbe
->mas1
& MAS1_TSIZE_MASK
) >>
510 * Take the largest page size that satisfies both host
513 tsize
= min(__ilog2(psize
) - 10, tsize
);
516 * e500 doesn't implement the lowest tsize bit,
519 tsize
= max(BOOK3E_PAGESZ_4K
, tsize
& ~1);
522 up_read(¤t
->mm
->mmap_sem
);
525 if (likely(!pfnmap
)) {
526 unsigned long tsize_pages
= 1 << (tsize
+ 10 - PAGE_SHIFT
);
527 pfn
= gfn_to_pfn_memslot(slot
, gfn
);
528 if (is_error_noslot_pfn(pfn
)) {
529 printk(KERN_ERR
"Couldn't get real page for gfn %lx!\n",
534 /* Align guest and physical address to page map boundaries */
535 pfn
&= ~(tsize_pages
- 1);
536 gvaddr
&= ~((tsize_pages
<< PAGE_SHIFT
) - 1);
539 /* Drop old ref and setup new one. */
540 kvmppc_e500_ref_release(ref
);
541 kvmppc_e500_ref_setup(ref
, gtlbe
, pfn
);
543 kvmppc_e500_setup_stlbe(&vcpu_e500
->vcpu
, gtlbe
, tsize
,
546 /* Clear i-cache for new pages */
547 kvmppc_mmu_flush_icache(pfn
);
549 /* Drop refcount on page, so that mmu notifiers can clear it */
550 kvm_release_pfn_clean(pfn
);
553 /* XXX only map the one-one case, for now use TLB0 */
554 static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
556 struct kvm_book3e_206_tlb_entry
*stlbe
)
558 struct kvm_book3e_206_tlb_entry
*gtlbe
;
559 struct tlbe_ref
*ref
;
561 gtlbe
= get_entry(vcpu_e500
, 0, esel
);
562 ref
= &vcpu_e500
->gtlb_priv
[0][esel
].ref
;
564 kvmppc_e500_shadow_map(vcpu_e500
, get_tlb_eaddr(gtlbe
),
565 get_tlb_raddr(gtlbe
) >> PAGE_SHIFT
,
566 gtlbe
, 0, stlbe
, ref
);
569 /* Caller must ensure that the specified guest TLB entry is safe to insert into
571 /* XXX for both one-one and one-to-many , for now use TLB1 */
572 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
573 u64 gvaddr
, gfn_t gfn
, struct kvm_book3e_206_tlb_entry
*gtlbe
,
574 struct kvm_book3e_206_tlb_entry
*stlbe
, int esel
)
576 struct tlbe_ref
*ref
;
579 victim
= vcpu_e500
->host_tlb1_nv
++;
581 if (unlikely(vcpu_e500
->host_tlb1_nv
>= tlb1_max_shadow_size()))
582 vcpu_e500
->host_tlb1_nv
= 0;
584 ref
= &vcpu_e500
->tlb_refs
[1][victim
];
585 kvmppc_e500_shadow_map(vcpu_e500
, gvaddr
, gfn
, gtlbe
, 1, stlbe
, ref
);
587 vcpu_e500
->g2h_tlb1_map
[esel
] |= (u64
)1 << victim
;
588 vcpu_e500
->gtlb_priv
[1][esel
].ref
.flags
|= E500_TLB_BITMAP
;
589 if (vcpu_e500
->h2g_tlb1_rmap
[victim
]) {
590 unsigned int idx
= vcpu_e500
->h2g_tlb1_rmap
[victim
];
591 vcpu_e500
->g2h_tlb1_map
[idx
] &= ~(1ULL << victim
);
593 vcpu_e500
->h2g_tlb1_rmap
[victim
] = esel
;
598 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
)
600 int size
= vcpu_e500
->gtlb_params
[1].entries
;
605 vcpu_e500
->tlb1_min_eaddr
= ~0UL;
606 vcpu_e500
->tlb1_max_eaddr
= 0;
607 offset
= vcpu_e500
->gtlb_offset
[1];
609 for (i
= 0; i
< size
; i
++) {
610 struct kvm_book3e_206_tlb_entry
*tlbe
=
611 &vcpu_e500
->gtlb_arch
[offset
+ i
];
613 if (!get_tlb_v(tlbe
))
616 eaddr
= get_tlb_eaddr(tlbe
);
617 vcpu_e500
->tlb1_min_eaddr
=
618 min(vcpu_e500
->tlb1_min_eaddr
, eaddr
);
620 eaddr
= get_tlb_end(tlbe
);
621 vcpu_e500
->tlb1_max_eaddr
=
622 max(vcpu_e500
->tlb1_max_eaddr
, eaddr
);
626 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500
*vcpu_e500
,
627 struct kvm_book3e_206_tlb_entry
*gtlbe
)
629 unsigned long start
, end
, size
;
631 size
= get_tlb_bytes(gtlbe
);
632 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
633 end
= start
+ size
- 1;
635 return vcpu_e500
->tlb1_min_eaddr
== start
||
636 vcpu_e500
->tlb1_max_eaddr
== end
;
639 /* This function is supposed to be called for a adding a new valid tlb entry */
640 static void kvmppc_set_tlb1map_range(struct kvm_vcpu
*vcpu
,
641 struct kvm_book3e_206_tlb_entry
*gtlbe
)
643 unsigned long start
, end
, size
;
644 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
646 if (!get_tlb_v(gtlbe
))
649 size
= get_tlb_bytes(gtlbe
);
650 start
= get_tlb_eaddr(gtlbe
) & ~(size
- 1);
651 end
= start
+ size
- 1;
653 vcpu_e500
->tlb1_min_eaddr
= min(vcpu_e500
->tlb1_min_eaddr
, start
);
654 vcpu_e500
->tlb1_max_eaddr
= max(vcpu_e500
->tlb1_max_eaddr
, end
);
657 static inline int kvmppc_e500_gtlbe_invalidate(
658 struct kvmppc_vcpu_e500
*vcpu_e500
,
659 int tlbsel
, int esel
)
661 struct kvm_book3e_206_tlb_entry
*gtlbe
=
662 get_entry(vcpu_e500
, tlbsel
, esel
);
664 if (unlikely(get_tlb_iprot(gtlbe
)))
667 if (tlbsel
== 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
668 kvmppc_recalc_tlb1map_range(vcpu_e500
);
675 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
679 if (value
& MMUCSR0_TLB0FI
)
680 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[0].entries
; esel
++)
681 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
682 if (value
& MMUCSR0_TLB1FI
)
683 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[1].entries
; esel
++)
684 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
686 /* Invalidate all vcpu id mappings */
687 kvmppc_e500_tlbil_all(vcpu_e500
);
692 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, gva_t ea
)
694 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
698 ia
= (ea
>> 2) & 0x1;
700 /* since we only have two TLBs, only lower bit is used. */
701 tlbsel
= (ea
>> 3) & 0x1;
704 /* invalidate all entries */
705 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
;
707 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
710 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
711 get_cur_pid(vcpu
), -1);
713 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
716 /* Invalidate all vcpu id mappings */
717 kvmppc_e500_tlbil_all(vcpu_e500
);
722 static void tlbilx_all(struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
,
725 struct kvm_book3e_206_tlb_entry
*tlbe
;
728 /* invalidate all entries */
729 for (esel
= 0; esel
< vcpu_e500
->gtlb_params
[tlbsel
].entries
; esel
++) {
730 tlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
731 tid
= get_tlb_tid(tlbe
);
732 if (type
== 0 || tid
== pid
) {
733 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
734 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
739 static void tlbilx_one(struct kvmppc_vcpu_e500
*vcpu_e500
, int pid
,
744 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
745 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, -1);
747 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
748 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
754 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int type
, gva_t ea
)
756 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
757 int pid
= get_cur_spid(vcpu
);
759 if (type
== 0 || type
== 1) {
760 tlbilx_all(vcpu_e500
, 0, pid
, type
);
761 tlbilx_all(vcpu_e500
, 1, pid
, type
);
762 } else if (type
== 3) {
763 tlbilx_one(vcpu_e500
, pid
, ea
);
769 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
771 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
773 struct kvm_book3e_206_tlb_entry
*gtlbe
;
775 tlbsel
= get_tlb_tlbsel(vcpu
);
776 esel
= get_tlb_esel(vcpu
, tlbsel
);
778 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
779 vcpu
->arch
.shared
->mas0
&= ~MAS0_NV(~0);
780 vcpu
->arch
.shared
->mas0
|= MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
781 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
782 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
783 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
788 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, gva_t ea
)
790 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
791 int as
= !!get_cur_sas(vcpu
);
792 unsigned int pid
= get_cur_spid(vcpu
);
794 struct kvm_book3e_206_tlb_entry
*gtlbe
= NULL
;
796 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
797 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
799 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
805 esel
&= vcpu_e500
->gtlb_params
[tlbsel
].ways
- 1;
807 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
808 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
809 vcpu
->arch
.shared
->mas1
= gtlbe
->mas1
;
810 vcpu
->arch
.shared
->mas2
= gtlbe
->mas2
;
811 vcpu
->arch
.shared
->mas7_3
= gtlbe
->mas7_3
;
815 /* since we only have two TLBs, only lower bit is used. */
816 tlbsel
= vcpu
->arch
.shared
->mas4
>> 28 & 0x1;
817 victim
= (tlbsel
== 0) ? gtlb0_get_next_victim(vcpu_e500
) : 0;
819 vcpu
->arch
.shared
->mas0
= MAS0_TLBSEL(tlbsel
)
821 | MAS0_NV(vcpu_e500
->gtlb_nv
[tlbsel
]);
822 vcpu
->arch
.shared
->mas1
=
823 (vcpu
->arch
.shared
->mas6
& MAS6_SPID0
)
824 | (vcpu
->arch
.shared
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
825 | (vcpu
->arch
.shared
->mas4
& MAS4_TSIZED(~0));
826 vcpu
->arch
.shared
->mas2
&= MAS2_EPN
;
827 vcpu
->arch
.shared
->mas2
|= vcpu
->arch
.shared
->mas4
&
829 vcpu
->arch
.shared
->mas7_3
&= MAS3_U0
| MAS3_U1
|
833 kvmppc_set_exit_type(vcpu
, EMULATED_TLBSX_EXITS
);
837 /* sesel is for tlb1 only */
838 static void write_stlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
839 struct kvm_book3e_206_tlb_entry
*gtlbe
,
840 struct kvm_book3e_206_tlb_entry
*stlbe
,
841 int stlbsel
, int sesel
)
846 stid
= kvmppc_e500_get_tlb_stid(&vcpu_e500
->vcpu
, gtlbe
);
848 stlbe
->mas1
|= MAS1_TID(stid
);
849 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
, stlbe
);
853 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
855 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
856 struct kvm_book3e_206_tlb_entry
*gtlbe
, stlbe
;
857 int tlbsel
, esel
, stlbsel
, sesel
;
860 tlbsel
= get_tlb_tlbsel(vcpu
);
861 esel
= get_tlb_esel(vcpu
, tlbsel
);
863 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
865 if (get_tlb_v(gtlbe
)) {
866 inval_gtlbe_on_host(vcpu_e500
, tlbsel
, esel
);
868 kvmppc_need_recalc_tlb1map_range(vcpu_e500
, gtlbe
))
872 gtlbe
->mas1
= vcpu
->arch
.shared
->mas1
;
873 gtlbe
->mas2
= vcpu
->arch
.shared
->mas2
;
874 if (!(vcpu
->arch
.shared
->msr
& MSR_CM
))
875 gtlbe
->mas2
&= 0xffffffffUL
;
876 gtlbe
->mas7_3
= vcpu
->arch
.shared
->mas7_3
;
878 trace_kvm_booke206_gtlb_write(vcpu
->arch
.shared
->mas0
, gtlbe
->mas1
,
879 gtlbe
->mas2
, gtlbe
->mas7_3
);
883 * If a valid tlb1 entry is overwritten then recalculate the
884 * min/max TLB1 map address range otherwise no need to look
888 kvmppc_recalc_tlb1map_range(vcpu_e500
);
890 kvmppc_set_tlb1map_range(vcpu
, gtlbe
);
893 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
894 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
901 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
902 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
905 kvmppc_e500_tlb0_map(vcpu_e500
, esel
, &stlbe
);
906 sesel
= 0; /* unused */
912 eaddr
= get_tlb_eaddr(gtlbe
);
913 raddr
= get_tlb_raddr(gtlbe
);
915 /* Create a 4KB mapping on the host.
916 * If the guest wanted a large page,
917 * only the first 4KB is mapped here and the rest
918 * are mapped on the fly. */
920 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
,
921 raddr
>> PAGE_SHIFT
, gtlbe
, &stlbe
, esel
);
928 write_stlbe(vcpu_e500
, gtlbe
, &stlbe
, stlbsel
, sesel
);
931 kvmppc_set_exit_type(vcpu
, EMULATED_TLBWE_EXITS
);
935 static int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
936 gva_t eaddr
, unsigned int pid
, int as
)
938 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
941 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
942 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
944 return index_of(tlbsel
, esel
);
950 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
951 int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
952 struct kvm_translation
*tr
)
959 eaddr
= tr
->linear_address
;
960 pid
= (tr
->linear_address
>> 32) & 0xff;
961 as
= (tr
->linear_address
>> 40) & 0x1;
963 index
= kvmppc_e500_tlb_search(vcpu
, eaddr
, pid
, as
);
969 tr
->physical_address
= kvmppc_mmu_xlate(vcpu
, index
, eaddr
);
970 /* XXX what does "writeable" and "usermode" even mean? */
977 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
979 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
981 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
984 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
986 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
988 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
991 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
993 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_IS
);
995 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
998 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
1000 unsigned int as
= !!(vcpu
->arch
.shared
->msr
& MSR_DS
);
1002 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
1005 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
1008 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1009 struct kvm_book3e_206_tlb_entry
*gtlbe
;
1012 gtlbe
= get_entry(vcpu_e500
, tlbsel_of(index
), esel_of(index
));
1013 pgmask
= get_tlb_bytes(gtlbe
) - 1;
1015 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
1018 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
1022 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 eaddr
, gpa_t gpaddr
,
1025 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1026 struct tlbe_priv
*priv
;
1027 struct kvm_book3e_206_tlb_entry
*gtlbe
, stlbe
;
1028 int tlbsel
= tlbsel_of(index
);
1029 int esel
= esel_of(index
);
1032 gtlbe
= get_entry(vcpu_e500
, tlbsel
, esel
);
1037 sesel
= 0; /* unused */
1038 priv
= &vcpu_e500
->gtlb_priv
[tlbsel
][esel
];
1040 /* Only triggers after clear_tlb_refs */
1041 if (unlikely(!(priv
->ref
.flags
& E500_TLB_VALID
)))
1042 kvmppc_e500_tlb0_map(vcpu_e500
, esel
, &stlbe
);
1044 kvmppc_e500_setup_stlbe(vcpu
, gtlbe
, BOOK3E_PAGESZ_4K
,
1045 &priv
->ref
, eaddr
, &stlbe
);
1049 gfn_t gfn
= gpaddr
>> PAGE_SHIFT
;
1052 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
, gfn
,
1053 gtlbe
, &stlbe
, esel
);
1062 write_stlbe(vcpu_e500
, gtlbe
, &stlbe
, stlbsel
, sesel
);
1065 /************* MMU Notifiers *************/
1067 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
1069 trace_kvm_unmap_hva(hva
);
1072 * Flush all shadow tlb entries everywhere. This is slow, but
1073 * we are 100% sure that we catch the to be unmapped page
1075 kvm_flush_remote_tlbs(kvm
);
1080 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
1082 /* kvm_unmap_hva flushes everything anyways */
1083 kvm_unmap_hva(kvm
, start
);
1088 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
1090 /* XXX could be more clever ;) */
1094 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
1096 /* XXX could be more clever ;) */
1100 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
1102 /* The page will get remapped properly on its next fault */
1103 kvm_unmap_hva(kvm
, hva
);
1106 /*****************************************/
1108 static void free_gtlb(struct kvmppc_vcpu_e500
*vcpu_e500
)
1112 clear_tlb1_bitmap(vcpu_e500
);
1113 kfree(vcpu_e500
->g2h_tlb1_map
);
1115 clear_tlb_refs(vcpu_e500
);
1116 kfree(vcpu_e500
->gtlb_priv
[0]);
1117 kfree(vcpu_e500
->gtlb_priv
[1]);
1119 if (vcpu_e500
->shared_tlb_pages
) {
1120 vfree((void *)(round_down((uintptr_t)vcpu_e500
->gtlb_arch
,
1123 for (i
= 0; i
< vcpu_e500
->num_shared_tlb_pages
; i
++) {
1124 set_page_dirty_lock(vcpu_e500
->shared_tlb_pages
[i
]);
1125 put_page(vcpu_e500
->shared_tlb_pages
[i
]);
1128 vcpu_e500
->num_shared_tlb_pages
= 0;
1130 kfree(vcpu_e500
->shared_tlb_pages
);
1131 vcpu_e500
->shared_tlb_pages
= NULL
;
1133 kfree(vcpu_e500
->gtlb_arch
);
1136 vcpu_e500
->gtlb_arch
= NULL
;
1139 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1141 sregs
->u
.e
.mas0
= vcpu
->arch
.shared
->mas0
;
1142 sregs
->u
.e
.mas1
= vcpu
->arch
.shared
->mas1
;
1143 sregs
->u
.e
.mas2
= vcpu
->arch
.shared
->mas2
;
1144 sregs
->u
.e
.mas7_3
= vcpu
->arch
.shared
->mas7_3
;
1145 sregs
->u
.e
.mas4
= vcpu
->arch
.shared
->mas4
;
1146 sregs
->u
.e
.mas6
= vcpu
->arch
.shared
->mas6
;
1148 sregs
->u
.e
.mmucfg
= vcpu
->arch
.mmucfg
;
1149 sregs
->u
.e
.tlbcfg
[0] = vcpu
->arch
.tlbcfg
[0];
1150 sregs
->u
.e
.tlbcfg
[1] = vcpu
->arch
.tlbcfg
[1];
1151 sregs
->u
.e
.tlbcfg
[2] = 0;
1152 sregs
->u
.e
.tlbcfg
[3] = 0;
1155 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
)
1157 if (sregs
->u
.e
.features
& KVM_SREGS_E_ARCH206_MMU
) {
1158 vcpu
->arch
.shared
->mas0
= sregs
->u
.e
.mas0
;
1159 vcpu
->arch
.shared
->mas1
= sregs
->u
.e
.mas1
;
1160 vcpu
->arch
.shared
->mas2
= sregs
->u
.e
.mas2
;
1161 vcpu
->arch
.shared
->mas7_3
= sregs
->u
.e
.mas7_3
;
1162 vcpu
->arch
.shared
->mas4
= sregs
->u
.e
.mas4
;
1163 vcpu
->arch
.shared
->mas6
= sregs
->u
.e
.mas6
;
1169 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
1170 struct kvm_config_tlb
*cfg
)
1172 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1173 struct kvm_book3e_206_tlb_params params
;
1175 struct page
**pages
;
1176 struct tlbe_priv
*privs
[2] = {};
1177 u64
*g2h_bitmap
= NULL
;
1180 int num_pages
, ret
, i
;
1182 if (cfg
->mmu_type
!= KVM_MMU_FSL_BOOKE_NOHV
)
1185 if (copy_from_user(¶ms
, (void __user
*)(uintptr_t)cfg
->params
,
1189 if (params
.tlb_sizes
[1] > 64)
1191 if (params
.tlb_ways
[1] != params
.tlb_sizes
[1])
1193 if (params
.tlb_sizes
[2] != 0 || params
.tlb_sizes
[3] != 0)
1195 if (params
.tlb_ways
[2] != 0 || params
.tlb_ways
[3] != 0)
1198 if (!is_power_of_2(params
.tlb_ways
[0]))
1201 sets
= params
.tlb_sizes
[0] >> ilog2(params
.tlb_ways
[0]);
1202 if (!is_power_of_2(sets
))
1205 array_len
= params
.tlb_sizes
[0] + params
.tlb_sizes
[1];
1206 array_len
*= sizeof(struct kvm_book3e_206_tlb_entry
);
1208 if (cfg
->array_len
< array_len
)
1211 num_pages
= DIV_ROUND_UP(cfg
->array
+ array_len
- 1, PAGE_SIZE
) -
1212 cfg
->array
/ PAGE_SIZE
;
1213 pages
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_KERNEL
);
1217 ret
= get_user_pages_fast(cfg
->array
, num_pages
, 1, pages
);
1221 if (ret
!= num_pages
) {
1227 virt
= vmap(pages
, num_pages
, VM_MAP
, PAGE_KERNEL
);
1233 privs
[0] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[0],
1235 privs
[1] = kzalloc(sizeof(struct tlbe_priv
) * params
.tlb_sizes
[1],
1238 if (!privs
[0] || !privs
[1]) {
1243 g2h_bitmap
= kzalloc(sizeof(u64
) * params
.tlb_sizes
[1],
1250 free_gtlb(vcpu_e500
);
1252 vcpu_e500
->gtlb_priv
[0] = privs
[0];
1253 vcpu_e500
->gtlb_priv
[1] = privs
[1];
1254 vcpu_e500
->g2h_tlb1_map
= g2h_bitmap
;
1256 vcpu_e500
->gtlb_arch
= (struct kvm_book3e_206_tlb_entry
*)
1257 (virt
+ (cfg
->array
& (PAGE_SIZE
- 1)));
1259 vcpu_e500
->gtlb_params
[0].entries
= params
.tlb_sizes
[0];
1260 vcpu_e500
->gtlb_params
[1].entries
= params
.tlb_sizes
[1];
1262 vcpu_e500
->gtlb_offset
[0] = 0;
1263 vcpu_e500
->gtlb_offset
[1] = params
.tlb_sizes
[0];
1265 vcpu
->arch
.mmucfg
= mfspr(SPRN_MMUCFG
) & ~MMUCFG_LPIDSIZE
;
1267 vcpu
->arch
.tlbcfg
[0] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1268 if (params
.tlb_sizes
[0] <= 2048)
1269 vcpu
->arch
.tlbcfg
[0] |= params
.tlb_sizes
[0];
1270 vcpu
->arch
.tlbcfg
[0] |= params
.tlb_ways
[0] << TLBnCFG_ASSOC_SHIFT
;
1272 vcpu
->arch
.tlbcfg
[1] &= ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1273 vcpu
->arch
.tlbcfg
[1] |= params
.tlb_sizes
[1];
1274 vcpu
->arch
.tlbcfg
[1] |= params
.tlb_ways
[1] << TLBnCFG_ASSOC_SHIFT
;
1276 vcpu_e500
->shared_tlb_pages
= pages
;
1277 vcpu_e500
->num_shared_tlb_pages
= num_pages
;
1279 vcpu_e500
->gtlb_params
[0].ways
= params
.tlb_ways
[0];
1280 vcpu_e500
->gtlb_params
[0].sets
= sets
;
1282 vcpu_e500
->gtlb_params
[1].ways
= params
.tlb_sizes
[1];
1283 vcpu_e500
->gtlb_params
[1].sets
= 1;
1285 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1293 for (i
= 0; i
< num_pages
; i
++)
1301 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
1302 struct kvm_dirty_tlb
*dirty
)
1304 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
1305 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1306 clear_tlb_refs(vcpu_e500
);
1310 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
1312 struct kvm_vcpu
*vcpu
= &vcpu_e500
->vcpu
;
1313 int entry_size
= sizeof(struct kvm_book3e_206_tlb_entry
);
1314 int entries
= KVM_E500_TLB0_SIZE
+ KVM_E500_TLB1_SIZE
;
1316 host_tlb_params
[0].entries
= mfspr(SPRN_TLB0CFG
) & TLBnCFG_N_ENTRY
;
1317 host_tlb_params
[1].entries
= mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
;
1320 * This should never happen on real e500 hardware, but is
1321 * architecturally possible -- e.g. in some weird nested
1322 * virtualization case.
1324 if (host_tlb_params
[0].entries
== 0 ||
1325 host_tlb_params
[1].entries
== 0) {
1326 pr_err("%s: need to know host tlb size\n", __func__
);
1330 host_tlb_params
[0].ways
= (mfspr(SPRN_TLB0CFG
) & TLBnCFG_ASSOC
) >>
1331 TLBnCFG_ASSOC_SHIFT
;
1332 host_tlb_params
[1].ways
= host_tlb_params
[1].entries
;
1334 if (!is_power_of_2(host_tlb_params
[0].entries
) ||
1335 !is_power_of_2(host_tlb_params
[0].ways
) ||
1336 host_tlb_params
[0].entries
< host_tlb_params
[0].ways
||
1337 host_tlb_params
[0].ways
== 0) {
1338 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1339 __func__
, host_tlb_params
[0].entries
,
1340 host_tlb_params
[0].ways
);
1344 host_tlb_params
[0].sets
=
1345 host_tlb_params
[0].entries
/ host_tlb_params
[0].ways
;
1346 host_tlb_params
[1].sets
= 1;
1348 vcpu_e500
->gtlb_params
[0].entries
= KVM_E500_TLB0_SIZE
;
1349 vcpu_e500
->gtlb_params
[1].entries
= KVM_E500_TLB1_SIZE
;
1351 vcpu_e500
->gtlb_params
[0].ways
= KVM_E500_TLB0_WAY_NUM
;
1352 vcpu_e500
->gtlb_params
[0].sets
=
1353 KVM_E500_TLB0_SIZE
/ KVM_E500_TLB0_WAY_NUM
;
1355 vcpu_e500
->gtlb_params
[1].ways
= KVM_E500_TLB1_SIZE
;
1356 vcpu_e500
->gtlb_params
[1].sets
= 1;
1358 vcpu_e500
->gtlb_arch
= kmalloc(entries
* entry_size
, GFP_KERNEL
);
1359 if (!vcpu_e500
->gtlb_arch
)
1362 vcpu_e500
->gtlb_offset
[0] = 0;
1363 vcpu_e500
->gtlb_offset
[1] = KVM_E500_TLB0_SIZE
;
1365 vcpu_e500
->tlb_refs
[0] =
1366 kzalloc(sizeof(struct tlbe_ref
) * host_tlb_params
[0].entries
,
1368 if (!vcpu_e500
->tlb_refs
[0])
1371 vcpu_e500
->tlb_refs
[1] =
1372 kzalloc(sizeof(struct tlbe_ref
) * host_tlb_params
[1].entries
,
1374 if (!vcpu_e500
->tlb_refs
[1])
1377 vcpu_e500
->gtlb_priv
[0] = kzalloc(sizeof(struct tlbe_ref
) *
1378 vcpu_e500
->gtlb_params
[0].entries
,
1380 if (!vcpu_e500
->gtlb_priv
[0])
1383 vcpu_e500
->gtlb_priv
[1] = kzalloc(sizeof(struct tlbe_ref
) *
1384 vcpu_e500
->gtlb_params
[1].entries
,
1386 if (!vcpu_e500
->gtlb_priv
[1])
1389 vcpu_e500
->g2h_tlb1_map
= kzalloc(sizeof(u64
) *
1390 vcpu_e500
->gtlb_params
[1].entries
,
1392 if (!vcpu_e500
->g2h_tlb1_map
)
1395 vcpu_e500
->h2g_tlb1_rmap
= kzalloc(sizeof(unsigned int) *
1396 host_tlb_params
[1].entries
,
1398 if (!vcpu_e500
->h2g_tlb1_rmap
)
1401 /* Init TLB configuration register */
1402 vcpu
->arch
.tlbcfg
[0] = mfspr(SPRN_TLB0CFG
) &
1403 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1404 vcpu
->arch
.tlbcfg
[0] |= vcpu_e500
->gtlb_params
[0].entries
;
1405 vcpu
->arch
.tlbcfg
[0] |=
1406 vcpu_e500
->gtlb_params
[0].ways
<< TLBnCFG_ASSOC_SHIFT
;
1408 vcpu
->arch
.tlbcfg
[1] = mfspr(SPRN_TLB1CFG
) &
1409 ~(TLBnCFG_N_ENTRY
| TLBnCFG_ASSOC
);
1410 vcpu
->arch
.tlbcfg
[1] |= vcpu_e500
->gtlb_params
[1].entries
;
1411 vcpu
->arch
.tlbcfg
[1] |=
1412 vcpu_e500
->gtlb_params
[1].ways
<< TLBnCFG_ASSOC_SHIFT
;
1414 kvmppc_recalc_tlb1map_range(vcpu_e500
);
1418 free_gtlb(vcpu_e500
);
1419 kfree(vcpu_e500
->tlb_refs
[0]);
1420 kfree(vcpu_e500
->tlb_refs
[1]);
1424 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
1426 free_gtlb(vcpu_e500
);
1427 kfree(vcpu_e500
->h2g_tlb1_rmap
);
1428 kfree(vcpu_e500
->tlb_refs
[0]);
1429 kfree(vcpu_e500
->tlb_refs
[1]);