2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/types.h>
16 #include <linux/string.h>
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/highmem.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_e500.h>
23 #include "../mm/mmu_decl.h"
26 #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
28 static unsigned int tlb1_entry_num
;
30 void kvmppc_dump_tlbs(struct kvm_vcpu
*vcpu
)
32 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
36 printk("| %8s | %8s | %8s | %8s | %8s |\n",
37 "nr", "mas1", "mas2", "mas3", "mas7");
39 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
40 printk("Guest TLB%d:\n", tlbsel
);
41 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
42 tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
43 if (tlbe
->mas1
& MAS1_VALID
)
44 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
45 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
46 tlbe
->mas3
, tlbe
->mas7
);
50 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
51 printk("Shadow TLB%d:\n", tlbsel
);
52 for (i
= 0; i
< vcpu_e500
->shadow_tlb_size
[tlbsel
]; i
++) {
53 tlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][i
];
54 if (tlbe
->mas1
& MAS1_VALID
)
55 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
56 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
57 tlbe
->mas3
, tlbe
->mas7
);
62 static inline unsigned int tlb0_get_next_victim(
63 struct kvmppc_vcpu_e500
*vcpu_e500
)
67 victim
= vcpu_e500
->guest_tlb_nv
[0]++;
68 if (unlikely(vcpu_e500
->guest_tlb_nv
[0] >= KVM_E500_TLB0_WAY_NUM
))
69 vcpu_e500
->guest_tlb_nv
[0] = 0;
74 static inline unsigned int tlb1_max_shadow_size(void)
76 return tlb1_entry_num
- tlbcam_index
;
79 static inline int tlbe_is_writable(struct tlbe
*tlbe
)
81 return tlbe
->mas3
& (MAS3_SW
|MAS3_UW
);
84 static inline u32
e500_shadow_mas3_attrib(u32 mas3
, int usermode
)
86 /* Mask off reserved bits. */
87 mas3
&= MAS3_ATTRIB_MASK
;
90 /* Guest is in supervisor mode,
91 * so we need to translate guest
92 * supervisor permissions into user permissions. */
93 mas3
&= ~E500_TLB_USER_PERM_MASK
;
94 mas3
|= (mas3
& E500_TLB_SUPER_PERM_MASK
) << 1;
97 return mas3
| E500_TLB_SUPER_PERM_MASK
;
100 static inline u32
e500_shadow_mas2_attrib(u32 mas2
, int usermode
)
103 return (mas2
& MAS2_ATTRIB_MASK
) | MAS2_M
;
105 return mas2
& MAS2_ATTRIB_MASK
;
110 * writing shadow tlb entry to host TLB
112 static inline void __write_host_tlbe(struct tlbe
*stlbe
)
114 mtspr(SPRN_MAS1
, stlbe
->mas1
);
115 mtspr(SPRN_MAS2
, stlbe
->mas2
);
116 mtspr(SPRN_MAS3
, stlbe
->mas3
);
117 mtspr(SPRN_MAS7
, stlbe
->mas7
);
118 __asm__
__volatile__ ("tlbwe\n" : : );
121 static inline void write_host_tlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
122 int tlbsel
, int esel
)
124 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
128 __write_host_tlbe(stlbe
);
130 unsigned register mas0
;
132 mas0
= mfspr(SPRN_MAS0
);
134 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel
)));
135 __write_host_tlbe(stlbe
);
137 mtspr(SPRN_MAS0
, mas0
);
142 void kvmppc_e500_tlb_load(struct kvm_vcpu
*vcpu
, int cpu
)
144 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
146 unsigned register mas0
;
148 /* Load all valid TLB1 entries to reduce guest tlb miss fault */
150 mas0
= mfspr(SPRN_MAS0
);
151 for (i
= 0; i
< tlb1_max_shadow_size(); i
++) {
152 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
154 if (get_tlb_v(stlbe
)) {
155 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1)
156 | MAS0_ESEL(to_htlb1_esel(i
)));
157 __write_host_tlbe(stlbe
);
160 mtspr(SPRN_MAS0
, mas0
);
164 void kvmppc_e500_tlb_put(struct kvm_vcpu
*vcpu
)
169 /* Search the guest TLB for a matching entry. */
170 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
171 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
175 /* XXX Replace loop with fancy data structures. */
176 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
177 struct tlbe
*tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
180 if (eaddr
< get_tlb_eaddr(tlbe
))
183 if (eaddr
> get_tlb_end(tlbe
))
186 tid
= get_tlb_tid(tlbe
);
187 if (tid
&& (tid
!= pid
))
190 if (!get_tlb_v(tlbe
))
193 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
202 static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500
*vcpu_e500
,
203 int tlbsel
, int esel
)
205 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
206 struct page
*page
= vcpu_e500
->shadow_pages
[tlbsel
][esel
];
209 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = NULL
;
211 if (get_tlb_v(stlbe
)) {
212 if (tlbe_is_writable(stlbe
))
213 kvm_release_page_dirty(page
);
215 kvm_release_page_clean(page
);
220 static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
221 int tlbsel
, int esel
)
223 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
225 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
227 KVMTRACE_5D(STLB_INVAL
, &vcpu_e500
->vcpu
, index_of(tlbsel
, esel
),
228 stlbe
->mas1
, stlbe
->mas2
, stlbe
->mas3
, stlbe
->mas7
,
232 static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
233 gva_t eaddr
, gva_t eend
, u32 tid
)
235 unsigned int pid
= tid
& 0xff;
238 /* XXX Replace loop with fancy data structures. */
239 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[1]; i
++) {
240 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
243 if (!get_tlb_v(stlbe
))
246 if (eend
< get_tlb_eaddr(stlbe
))
249 if (eaddr
> get_tlb_end(stlbe
))
252 tid
= get_tlb_tid(stlbe
);
253 if (tid
&& (tid
!= pid
))
256 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
257 write_host_tlbe(vcpu_e500
, 1, i
);
261 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
262 unsigned int eaddr
, int as
)
264 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
265 unsigned int victim
, pidsel
, tsized
;
268 /* since we only have two TLBs, only lower bit is used. */
269 tlbsel
= (vcpu_e500
->mas4
>> 28) & 0x1;
270 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
271 pidsel
= (vcpu_e500
->mas4
>> 16) & 0xf;
272 tsized
= (vcpu_e500
->mas4
>> 8) & 0xf;
274 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
275 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
276 vcpu_e500
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
277 | MAS1_TID(vcpu_e500
->pid
[pidsel
])
278 | MAS1_TSIZE(tsized
);
279 vcpu_e500
->mas2
= (eaddr
& MAS2_EPN
)
280 | (vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
);
281 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
282 vcpu_e500
->mas6
= (vcpu_e500
->mas6
& MAS6_SPID1
)
283 | (get_cur_pid(vcpu
) << 16)
284 | (as
? MAS6_SAS
: 0);
288 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
289 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
, int tlbsel
, int esel
)
291 struct page
*new_page
;
295 stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
297 /* Get reference to new page. */
298 new_page
= gfn_to_page(vcpu_e500
->vcpu
.kvm
, gfn
);
299 if (is_error_page(new_page
)) {
300 printk(KERN_ERR
"Couldn't get guest page for gfn %lx!\n", gfn
);
301 kvm_release_page_clean(new_page
);
304 hpaddr
= page_to_phys(new_page
);
306 /* Drop reference to old page. */
307 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
309 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = new_page
;
311 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
312 stlbe
->mas1
= MAS1_TSIZE(BOOKE_PAGESZ_4K
)
313 | MAS1_TID(get_tlb_tid(gtlbe
)) | MAS1_TS
| MAS1_VALID
;
314 stlbe
->mas2
= (gvaddr
& MAS2_EPN
)
315 | e500_shadow_mas2_attrib(gtlbe
->mas2
,
316 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
317 stlbe
->mas3
= (hpaddr
& MAS3_RPN
)
318 | e500_shadow_mas3_attrib(gtlbe
->mas3
,
319 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
320 stlbe
->mas7
= (hpaddr
>> 32) & MAS7_RPN
;
322 KVMTRACE_5D(STLB_WRITE
, &vcpu_e500
->vcpu
, index_of(tlbsel
, esel
),
323 stlbe
->mas1
, stlbe
->mas2
, stlbe
->mas3
, stlbe
->mas7
,
327 /* XXX only map the one-one case, for now use TLB0 */
328 static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
329 int tlbsel
, int esel
)
333 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
335 kvmppc_e500_shadow_map(vcpu_e500
, get_tlb_eaddr(gtlbe
),
336 get_tlb_raddr(gtlbe
) >> PAGE_SHIFT
,
337 gtlbe
, tlbsel
, esel
);
342 /* Caller must ensure that the specified guest TLB entry is safe to insert into
344 /* XXX for both one-one and one-to-many , for now use TLB1 */
345 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
346 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
)
350 victim
= vcpu_e500
->guest_tlb_nv
[1]++;
352 if (unlikely(vcpu_e500
->guest_tlb_nv
[1] >= tlb1_max_shadow_size()))
353 vcpu_e500
->guest_tlb_nv
[1] = 0;
355 kvmppc_e500_shadow_map(vcpu_e500
, gvaddr
, gfn
, gtlbe
, 1, victim
);
360 /* Invalidate all guest kernel mappings when enter usermode,
361 * so that when they fault back in they will get the
362 * proper permission bits. */
363 void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
)
366 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
369 /* XXX Replace loop with fancy data structures. */
370 for (i
= 0; i
< tlb1_max_shadow_size(); i
++)
371 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
377 static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
378 int tlbsel
, int esel
)
380 struct tlbe
*gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
382 if (unlikely(get_tlb_iprot(gtlbe
)))
386 kvmppc_e500_tlb1_invalidate(vcpu_e500
, get_tlb_eaddr(gtlbe
),
390 kvmppc_e500_stlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
398 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
402 if (value
& MMUCSR0_TLB0FI
)
403 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[0]; esel
++)
404 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
405 if (value
& MMUCSR0_TLB1FI
)
406 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[1]; esel
++)
407 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
414 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
416 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
421 ea
= ((ra
) ? vcpu
->arch
.gpr
[ra
] : 0) + vcpu
->arch
.gpr
[rb
];
423 ia
= (ea
>> 2) & 0x1;
425 /* since we only have two TLBs, only lower bit is used. */
426 tlbsel
= (ea
>> 3) & 0x1;
429 /* invalidate all entries */
430 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[tlbsel
]; esel
++)
431 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
434 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
435 get_cur_pid(vcpu
), -1);
437 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
445 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
447 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
451 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
452 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
454 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
455 vcpu_e500
->mas0
&= ~MAS0_NV(~0);
456 vcpu_e500
->mas0
|= MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
457 vcpu_e500
->mas1
= gtlbe
->mas1
;
458 vcpu_e500
->mas2
= gtlbe
->mas2
;
459 vcpu_e500
->mas3
= gtlbe
->mas3
;
460 vcpu_e500
->mas7
= gtlbe
->mas7
;
465 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, int rb
)
467 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
468 int as
= !!get_cur_sas(vcpu_e500
);
469 unsigned int pid
= get_cur_spid(vcpu_e500
);
471 struct tlbe
*gtlbe
= NULL
;
474 ea
= vcpu
->arch
.gpr
[rb
];
476 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
477 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
479 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
485 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
486 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
487 vcpu_e500
->mas1
= gtlbe
->mas1
;
488 vcpu_e500
->mas2
= gtlbe
->mas2
;
489 vcpu_e500
->mas3
= gtlbe
->mas3
;
490 vcpu_e500
->mas7
= gtlbe
->mas7
;
494 /* since we only have two TLBs, only lower bit is used. */
495 tlbsel
= vcpu_e500
->mas4
>> 28 & 0x1;
496 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
498 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
499 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
500 vcpu_e500
->mas1
= (vcpu_e500
->mas6
& MAS6_SPID0
)
501 | (vcpu_e500
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
502 | (vcpu_e500
->mas4
& MAS4_TSIZED(~0));
503 vcpu_e500
->mas2
&= MAS2_EPN
;
504 vcpu_e500
->mas2
|= vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
;
505 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
512 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
514 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
519 int tlbsel
, esel
, stlbsel
, sesel
;
521 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
522 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
524 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
526 if (get_tlb_v(gtlbe
) && tlbsel
== 1) {
527 eaddr
= get_tlb_eaddr(gtlbe
);
528 tid
= get_tlb_tid(gtlbe
);
529 kvmppc_e500_tlb1_invalidate(vcpu_e500
, eaddr
,
530 get_tlb_end(gtlbe
), tid
);
533 gtlbe
->mas1
= vcpu_e500
->mas1
;
534 gtlbe
->mas2
= vcpu_e500
->mas2
;
535 gtlbe
->mas3
= vcpu_e500
->mas3
;
536 gtlbe
->mas7
= vcpu_e500
->mas7
;
538 KVMTRACE_5D(GTLB_WRITE
, vcpu
, vcpu_e500
->mas0
,
539 gtlbe
->mas1
, gtlbe
->mas2
, gtlbe
->mas3
, gtlbe
->mas7
,
542 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
543 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
547 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
548 gtlbe
->mas1
|= MAS1_TSIZE(BOOKE_PAGESZ_4K
);
551 sesel
= kvmppc_e500_stlbe_map(vcpu_e500
, 0, esel
);
557 eaddr
= get_tlb_eaddr(gtlbe
);
558 raddr
= get_tlb_raddr(gtlbe
);
560 /* Create a 4KB mapping on the host.
561 * If the guest wanted a large page,
562 * only the first 4KB is mapped here and the rest
563 * are mapped on the fly. */
565 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
,
566 raddr
>> PAGE_SHIFT
, gtlbe
);
572 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
578 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
580 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
582 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
585 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
587 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
589 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
592 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
594 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
596 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
599 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
601 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
603 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
606 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
609 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
611 &vcpu_e500
->guest_tlb
[tlbsel_of(index
)][esel_of(index
)];
612 u64 pgmask
= get_tlb_bytes(gtlbe
) - 1;
614 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
617 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
619 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
622 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++)
623 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++)
624 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, i
);
626 /* discard all guest mapping */
630 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 eaddr
, gpa_t gpaddr
,
633 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
634 int tlbsel
= tlbsel_of(index
);
635 int esel
= esel_of(index
);
645 gfn_t gfn
= gpaddr
>> PAGE_SHIFT
;
647 = &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
650 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
, gfn
, gtlbe
);
658 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
661 int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
662 gva_t eaddr
, unsigned int pid
, int as
)
664 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
667 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
668 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
670 return index_of(tlbsel
, esel
);
676 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
680 /* Insert large initial mapping for guest. */
681 tlbe
= &vcpu_e500
->guest_tlb
[1][0];
682 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOKE_PAGESZ_256M
);
684 tlbe
->mas3
= E500_TLB_SUPER_PERM_MASK
;
687 /* 4K map for serial output. Used by kernel wrapper. */
688 tlbe
= &vcpu_e500
->guest_tlb
[1][1];
689 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOKE_PAGESZ_4K
);
690 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
691 tlbe
->mas3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
695 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
697 tlb1_entry_num
= mfspr(SPRN_TLB1CFG
) & 0xFFF;
699 vcpu_e500
->guest_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
700 vcpu_e500
->guest_tlb
[0] =
701 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
702 if (vcpu_e500
->guest_tlb
[0] == NULL
)
705 vcpu_e500
->shadow_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
706 vcpu_e500
->shadow_tlb
[0] =
707 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
708 if (vcpu_e500
->shadow_tlb
[0] == NULL
)
711 vcpu_e500
->guest_tlb_size
[1] = KVM_E500_TLB1_SIZE
;
712 vcpu_e500
->guest_tlb
[1] =
713 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB1_SIZE
, GFP_KERNEL
);
714 if (vcpu_e500
->guest_tlb
[1] == NULL
)
715 goto err_out_shadow0
;
717 vcpu_e500
->shadow_tlb_size
[1] = tlb1_entry_num
;
718 vcpu_e500
->shadow_tlb
[1] =
719 kzalloc(sizeof(struct tlbe
) * tlb1_entry_num
, GFP_KERNEL
);
720 if (vcpu_e500
->shadow_tlb
[1] == NULL
)
723 vcpu_e500
->shadow_pages
[0] = (struct page
**)
724 kzalloc(sizeof(struct page
*) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
725 if (vcpu_e500
->shadow_pages
[0] == NULL
)
726 goto err_out_shadow1
;
728 vcpu_e500
->shadow_pages
[1] = (struct page
**)
729 kzalloc(sizeof(struct page
*) * tlb1_entry_num
, GFP_KERNEL
);
730 if (vcpu_e500
->shadow_pages
[1] == NULL
)
736 kfree(vcpu_e500
->shadow_pages
[0]);
738 kfree(vcpu_e500
->shadow_tlb
[1]);
740 kfree(vcpu_e500
->guest_tlb
[1]);
742 kfree(vcpu_e500
->shadow_tlb
[0]);
744 kfree(vcpu_e500
->guest_tlb
[0]);
749 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
751 kfree(vcpu_e500
->shadow_pages
[1]);
752 kfree(vcpu_e500
->shadow_pages
[0]);
753 kfree(vcpu_e500
->shadow_tlb
[1]);
754 kfree(vcpu_e500
->guest_tlb
[1]);
755 kfree(vcpu_e500
->shadow_tlb
[0]);
756 kfree(vcpu_e500
->guest_tlb
[0]);