2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/types.h>
16 #include <linux/string.h>
17 #include <linux/kvm.h>
18 #include <linux/kvm_host.h>
19 #include <linux/highmem.h>
20 #include <asm/kvm_ppc.h>
21 #include <asm/kvm_e500.h>
23 #include "../mm/mmu_decl.h"
27 #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
29 static unsigned int tlb1_entry_num
;
31 void kvmppc_dump_tlbs(struct kvm_vcpu
*vcpu
)
33 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
37 printk("| %8s | %8s | %8s | %8s | %8s |\n",
38 "nr", "mas1", "mas2", "mas3", "mas7");
40 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
41 printk("Guest TLB%d:\n", tlbsel
);
42 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
43 tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
44 if (tlbe
->mas1
& MAS1_VALID
)
45 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
46 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
47 tlbe
->mas3
, tlbe
->mas7
);
51 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
52 printk("Shadow TLB%d:\n", tlbsel
);
53 for (i
= 0; i
< vcpu_e500
->shadow_tlb_size
[tlbsel
]; i
++) {
54 tlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][i
];
55 if (tlbe
->mas1
& MAS1_VALID
)
56 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
57 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
58 tlbe
->mas3
, tlbe
->mas7
);
63 static inline unsigned int tlb0_get_next_victim(
64 struct kvmppc_vcpu_e500
*vcpu_e500
)
68 victim
= vcpu_e500
->guest_tlb_nv
[0]++;
69 if (unlikely(vcpu_e500
->guest_tlb_nv
[0] >= KVM_E500_TLB0_WAY_NUM
))
70 vcpu_e500
->guest_tlb_nv
[0] = 0;
75 static inline unsigned int tlb1_max_shadow_size(void)
77 return tlb1_entry_num
- tlbcam_index
;
80 static inline int tlbe_is_writable(struct tlbe
*tlbe
)
82 return tlbe
->mas3
& (MAS3_SW
|MAS3_UW
);
85 static inline u32
e500_shadow_mas3_attrib(u32 mas3
, int usermode
)
87 /* Mask off reserved bits. */
88 mas3
&= MAS3_ATTRIB_MASK
;
91 /* Guest is in supervisor mode,
92 * so we need to translate guest
93 * supervisor permissions into user permissions. */
94 mas3
&= ~E500_TLB_USER_PERM_MASK
;
95 mas3
|= (mas3
& E500_TLB_SUPER_PERM_MASK
) << 1;
98 return mas3
| E500_TLB_SUPER_PERM_MASK
;
101 static inline u32
e500_shadow_mas2_attrib(u32 mas2
, int usermode
)
104 return (mas2
& MAS2_ATTRIB_MASK
) | MAS2_M
;
106 return mas2
& MAS2_ATTRIB_MASK
;
111 * writing shadow tlb entry to host TLB
113 static inline void __write_host_tlbe(struct tlbe
*stlbe
)
115 mtspr(SPRN_MAS1
, stlbe
->mas1
);
116 mtspr(SPRN_MAS2
, stlbe
->mas2
);
117 mtspr(SPRN_MAS3
, stlbe
->mas3
);
118 mtspr(SPRN_MAS7
, stlbe
->mas7
);
119 __asm__
__volatile__ ("tlbwe\n" : : );
122 static inline void write_host_tlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
123 int tlbsel
, int esel
)
125 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
129 __write_host_tlbe(stlbe
);
131 unsigned register mas0
;
133 mas0
= mfspr(SPRN_MAS0
);
135 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel
)));
136 __write_host_tlbe(stlbe
);
138 mtspr(SPRN_MAS0
, mas0
);
143 void kvmppc_e500_tlb_load(struct kvm_vcpu
*vcpu
, int cpu
)
145 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
147 unsigned register mas0
;
149 /* Load all valid TLB1 entries to reduce guest tlb miss fault */
151 mas0
= mfspr(SPRN_MAS0
);
152 for (i
= 0; i
< tlb1_max_shadow_size(); i
++) {
153 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
155 if (get_tlb_v(stlbe
)) {
156 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1)
157 | MAS0_ESEL(to_htlb1_esel(i
)));
158 __write_host_tlbe(stlbe
);
161 mtspr(SPRN_MAS0
, mas0
);
165 void kvmppc_e500_tlb_put(struct kvm_vcpu
*vcpu
)
170 /* Search the guest TLB for a matching entry. */
171 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
172 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
176 /* XXX Replace loop with fancy data structures. */
177 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
178 struct tlbe
*tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
181 if (eaddr
< get_tlb_eaddr(tlbe
))
184 if (eaddr
> get_tlb_end(tlbe
))
187 tid
= get_tlb_tid(tlbe
);
188 if (tid
&& (tid
!= pid
))
191 if (!get_tlb_v(tlbe
))
194 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
203 static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500
*vcpu_e500
,
204 int tlbsel
, int esel
)
206 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
207 struct page
*page
= vcpu_e500
->shadow_pages
[tlbsel
][esel
];
210 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = NULL
;
212 if (get_tlb_v(stlbe
)) {
213 if (tlbe_is_writable(stlbe
))
214 kvm_release_page_dirty(page
);
216 kvm_release_page_clean(page
);
221 static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
222 int tlbsel
, int esel
)
224 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
226 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
228 trace_kvm_stlb_inval(index_of(tlbsel
, esel
), stlbe
->mas1
, stlbe
->mas2
,
229 stlbe
->mas3
, stlbe
->mas7
);
232 static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
233 gva_t eaddr
, gva_t eend
, u32 tid
)
235 unsigned int pid
= tid
& 0xff;
238 /* XXX Replace loop with fancy data structures. */
239 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[1]; i
++) {
240 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
243 if (!get_tlb_v(stlbe
))
246 if (eend
< get_tlb_eaddr(stlbe
))
249 if (eaddr
> get_tlb_end(stlbe
))
252 tid
= get_tlb_tid(stlbe
);
253 if (tid
&& (tid
!= pid
))
256 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
257 write_host_tlbe(vcpu_e500
, 1, i
);
261 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
262 unsigned int eaddr
, int as
)
264 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
265 unsigned int victim
, pidsel
, tsized
;
268 /* since we only have two TLBs, only lower bit is used. */
269 tlbsel
= (vcpu_e500
->mas4
>> 28) & 0x1;
270 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
271 pidsel
= (vcpu_e500
->mas4
>> 16) & 0xf;
272 tsized
= (vcpu_e500
->mas4
>> 7) & 0x1f;
274 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
275 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
276 vcpu_e500
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
277 | MAS1_TID(vcpu_e500
->pid
[pidsel
])
278 | MAS1_TSIZE(tsized
);
279 vcpu_e500
->mas2
= (eaddr
& MAS2_EPN
)
280 | (vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
);
281 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
282 vcpu_e500
->mas6
= (vcpu_e500
->mas6
& MAS6_SPID1
)
283 | (get_cur_pid(vcpu
) << 16)
284 | (as
? MAS6_SAS
: 0);
288 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
289 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
, int tlbsel
, int esel
)
291 struct page
*new_page
;
295 stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
297 /* Get reference to new page. */
298 new_page
= gfn_to_page(vcpu_e500
->vcpu
.kvm
, gfn
);
299 if (is_error_page(new_page
)) {
300 printk(KERN_ERR
"Couldn't get guest page for gfn %lx!\n", gfn
);
301 kvm_release_page_clean(new_page
);
304 hpaddr
= page_to_phys(new_page
);
306 /* Drop reference to old page. */
307 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
309 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = new_page
;
311 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
312 stlbe
->mas1
= MAS1_TSIZE(BOOK3E_PAGESZ_4K
)
313 | MAS1_TID(get_tlb_tid(gtlbe
)) | MAS1_TS
| MAS1_VALID
;
314 stlbe
->mas2
= (gvaddr
& MAS2_EPN
)
315 | e500_shadow_mas2_attrib(gtlbe
->mas2
,
316 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
317 stlbe
->mas3
= (hpaddr
& MAS3_RPN
)
318 | e500_shadow_mas3_attrib(gtlbe
->mas3
,
319 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
320 stlbe
->mas7
= (hpaddr
>> 32) & MAS7_RPN
;
322 trace_kvm_stlb_write(index_of(tlbsel
, esel
), stlbe
->mas1
, stlbe
->mas2
,
323 stlbe
->mas3
, stlbe
->mas7
);
326 /* XXX only map the one-one case, for now use TLB0 */
327 static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
328 int tlbsel
, int esel
)
332 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
334 kvmppc_e500_shadow_map(vcpu_e500
, get_tlb_eaddr(gtlbe
),
335 get_tlb_raddr(gtlbe
) >> PAGE_SHIFT
,
336 gtlbe
, tlbsel
, esel
);
341 /* Caller must ensure that the specified guest TLB entry is safe to insert into
343 /* XXX for both one-one and one-to-many , for now use TLB1 */
344 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
345 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
)
349 victim
= vcpu_e500
->guest_tlb_nv
[1]++;
351 if (unlikely(vcpu_e500
->guest_tlb_nv
[1] >= tlb1_max_shadow_size()))
352 vcpu_e500
->guest_tlb_nv
[1] = 0;
354 kvmppc_e500_shadow_map(vcpu_e500
, gvaddr
, gfn
, gtlbe
, 1, victim
);
359 /* Invalidate all guest kernel mappings when enter usermode,
360 * so that when they fault back in they will get the
361 * proper permission bits. */
362 void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
)
365 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
368 /* XXX Replace loop with fancy data structures. */
369 for (i
= 0; i
< tlb1_max_shadow_size(); i
++)
370 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
376 static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
377 int tlbsel
, int esel
)
379 struct tlbe
*gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
381 if (unlikely(get_tlb_iprot(gtlbe
)))
385 kvmppc_e500_tlb1_invalidate(vcpu_e500
, get_tlb_eaddr(gtlbe
),
389 kvmppc_e500_stlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
397 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
401 if (value
& MMUCSR0_TLB0FI
)
402 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[0]; esel
++)
403 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
404 if (value
& MMUCSR0_TLB1FI
)
405 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[1]; esel
++)
406 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
413 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
415 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
420 ea
= ((ra
) ? vcpu
->arch
.gpr
[ra
] : 0) + vcpu
->arch
.gpr
[rb
];
422 ia
= (ea
>> 2) & 0x1;
424 /* since we only have two TLBs, only lower bit is used. */
425 tlbsel
= (ea
>> 3) & 0x1;
428 /* invalidate all entries */
429 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[tlbsel
]; esel
++)
430 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
433 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
434 get_cur_pid(vcpu
), -1);
436 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
444 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
446 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
450 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
451 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
453 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
454 vcpu_e500
->mas0
&= ~MAS0_NV(~0);
455 vcpu_e500
->mas0
|= MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
456 vcpu_e500
->mas1
= gtlbe
->mas1
;
457 vcpu_e500
->mas2
= gtlbe
->mas2
;
458 vcpu_e500
->mas3
= gtlbe
->mas3
;
459 vcpu_e500
->mas7
= gtlbe
->mas7
;
464 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, int rb
)
466 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
467 int as
= !!get_cur_sas(vcpu_e500
);
468 unsigned int pid
= get_cur_spid(vcpu_e500
);
470 struct tlbe
*gtlbe
= NULL
;
473 ea
= vcpu
->arch
.gpr
[rb
];
475 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
476 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
478 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
484 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
485 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
486 vcpu_e500
->mas1
= gtlbe
->mas1
;
487 vcpu_e500
->mas2
= gtlbe
->mas2
;
488 vcpu_e500
->mas3
= gtlbe
->mas3
;
489 vcpu_e500
->mas7
= gtlbe
->mas7
;
493 /* since we only have two TLBs, only lower bit is used. */
494 tlbsel
= vcpu_e500
->mas4
>> 28 & 0x1;
495 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
497 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
498 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
499 vcpu_e500
->mas1
= (vcpu_e500
->mas6
& MAS6_SPID0
)
500 | (vcpu_e500
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
501 | (vcpu_e500
->mas4
& MAS4_TSIZED(~0));
502 vcpu_e500
->mas2
&= MAS2_EPN
;
503 vcpu_e500
->mas2
|= vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
;
504 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
511 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
513 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
518 int tlbsel
, esel
, stlbsel
, sesel
;
520 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
521 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
523 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
525 if (get_tlb_v(gtlbe
) && tlbsel
== 1) {
526 eaddr
= get_tlb_eaddr(gtlbe
);
527 tid
= get_tlb_tid(gtlbe
);
528 kvmppc_e500_tlb1_invalidate(vcpu_e500
, eaddr
,
529 get_tlb_end(gtlbe
), tid
);
532 gtlbe
->mas1
= vcpu_e500
->mas1
;
533 gtlbe
->mas2
= vcpu_e500
->mas2
;
534 gtlbe
->mas3
= vcpu_e500
->mas3
;
535 gtlbe
->mas7
= vcpu_e500
->mas7
;
537 trace_kvm_gtlb_write(vcpu_e500
->mas0
, gtlbe
->mas1
, gtlbe
->mas2
,
538 gtlbe
->mas3
, gtlbe
->mas7
);
540 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
541 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
545 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
546 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
549 sesel
= kvmppc_e500_stlbe_map(vcpu_e500
, 0, esel
);
555 eaddr
= get_tlb_eaddr(gtlbe
);
556 raddr
= get_tlb_raddr(gtlbe
);
558 /* Create a 4KB mapping on the host.
559 * If the guest wanted a large page,
560 * only the first 4KB is mapped here and the rest
561 * are mapped on the fly. */
563 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
,
564 raddr
>> PAGE_SHIFT
, gtlbe
);
570 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
576 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
578 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
580 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
583 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
585 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
587 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
590 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
592 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
594 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
597 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
599 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
601 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
604 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
607 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
609 &vcpu_e500
->guest_tlb
[tlbsel_of(index
)][esel_of(index
)];
610 u64 pgmask
= get_tlb_bytes(gtlbe
) - 1;
612 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
615 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
617 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
620 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++)
621 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++)
622 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, i
);
624 /* discard all guest mapping */
628 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 eaddr
, gpa_t gpaddr
,
631 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
632 int tlbsel
= tlbsel_of(index
);
633 int esel
= esel_of(index
);
643 gfn_t gfn
= gpaddr
>> PAGE_SHIFT
;
645 = &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
648 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
, gfn
, gtlbe
);
656 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
659 int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
660 gva_t eaddr
, unsigned int pid
, int as
)
662 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
665 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
666 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
668 return index_of(tlbsel
, esel
);
674 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
678 /* Insert large initial mapping for guest. */
679 tlbe
= &vcpu_e500
->guest_tlb
[1][0];
680 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_256M
);
682 tlbe
->mas3
= E500_TLB_SUPER_PERM_MASK
;
685 /* 4K map for serial output. Used by kernel wrapper. */
686 tlbe
= &vcpu_e500
->guest_tlb
[1][1];
687 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
688 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
689 tlbe
->mas3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
693 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
695 tlb1_entry_num
= mfspr(SPRN_TLB1CFG
) & 0xFFF;
697 vcpu_e500
->guest_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
698 vcpu_e500
->guest_tlb
[0] =
699 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
700 if (vcpu_e500
->guest_tlb
[0] == NULL
)
703 vcpu_e500
->shadow_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
704 vcpu_e500
->shadow_tlb
[0] =
705 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
706 if (vcpu_e500
->shadow_tlb
[0] == NULL
)
709 vcpu_e500
->guest_tlb_size
[1] = KVM_E500_TLB1_SIZE
;
710 vcpu_e500
->guest_tlb
[1] =
711 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB1_SIZE
, GFP_KERNEL
);
712 if (vcpu_e500
->guest_tlb
[1] == NULL
)
713 goto err_out_shadow0
;
715 vcpu_e500
->shadow_tlb_size
[1] = tlb1_entry_num
;
716 vcpu_e500
->shadow_tlb
[1] =
717 kzalloc(sizeof(struct tlbe
) * tlb1_entry_num
, GFP_KERNEL
);
718 if (vcpu_e500
->shadow_tlb
[1] == NULL
)
721 vcpu_e500
->shadow_pages
[0] = (struct page
**)
722 kzalloc(sizeof(struct page
*) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
723 if (vcpu_e500
->shadow_pages
[0] == NULL
)
724 goto err_out_shadow1
;
726 vcpu_e500
->shadow_pages
[1] = (struct page
**)
727 kzalloc(sizeof(struct page
*) * tlb1_entry_num
, GFP_KERNEL
);
728 if (vcpu_e500
->shadow_pages
[1] == NULL
)
734 kfree(vcpu_e500
->shadow_pages
[0]);
736 kfree(vcpu_e500
->shadow_tlb
[1]);
738 kfree(vcpu_e500
->guest_tlb
[1]);
740 kfree(vcpu_e500
->shadow_tlb
[0]);
742 kfree(vcpu_e500
->guest_tlb
[0]);
747 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
749 kfree(vcpu_e500
->shadow_pages
[1]);
750 kfree(vcpu_e500
->shadow_pages
[0]);
751 kfree(vcpu_e500
->shadow_tlb
[1]);
752 kfree(vcpu_e500
->guest_tlb
[1]);
753 kfree(vcpu_e500
->shadow_tlb
[0]);
754 kfree(vcpu_e500
->guest_tlb
[0]);