1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
5 * Author: Yu Liu <yu.liu@freescale.com>
6 * Scott Wood <scottwood@freescale.com>
7 * Ashish Kalra <ashish.kalra@freescale.com>
8 * Varun Sethi <varun.sethi@freescale.com>
11 * This file is based on arch/powerpc/kvm/44x_tlb.h and
12 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
13 * Copyright IBM Corp. 2007-2008
19 #include <linux/kvm_host.h>
20 #include <asm/nohash/mmu-book3e.h>
22 #include <asm/cputhreads.h>
28 #define E500_PID_NUM 3
29 #define E500_TLB_NUM 2
31 /* entry is mapped somewhere in host TLB */
32 #define E500_TLB_VALID (1 << 31)
33 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
34 #define E500_TLB_BITMAP (1 << 30)
35 /* TLB1 entry is mapped by host TLB0 */
36 #define E500_TLB_TLB0 (1 << 29)
37 /* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
38 #define E500_TLB_MAS2_ATTR (0x7f)
41 kvm_pfn_t pfn
; /* valid only for TLB0, except briefly */
42 unsigned int flags
; /* E500_TLB_* */
49 #ifdef CONFIG_KVM_E500V2
53 struct kvmppc_e500_tlb_params
{
54 int entries
, ways
, sets
;
57 struct kvmppc_vcpu_e500
{
60 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
61 struct kvm_book3e_206_tlb_entry
*gtlb_arch
;
63 /* Starting entry number in gtlb_arch[] */
64 int gtlb_offset
[E500_TLB_NUM
];
66 /* KVM internal information associated with each guest TLB entry */
67 struct tlbe_priv
*gtlb_priv
[E500_TLB_NUM
];
69 struct kvmppc_e500_tlb_params gtlb_params
[E500_TLB_NUM
];
71 unsigned int gtlb_nv
[E500_TLB_NUM
];
73 unsigned int host_tlb1_nv
;
82 struct page
**shared_tlb_pages
;
83 int num_shared_tlb_pages
;
86 unsigned int *h2g_tlb1_rmap
;
88 /* Minimum and maximum address mapped my TLB1 */
89 unsigned long tlb1_min_eaddr
;
90 unsigned long tlb1_max_eaddr
;
92 #ifdef CONFIG_KVM_E500V2
93 u32 pid
[E500_PID_NUM
];
96 struct vcpu_id_table
*idt
;
100 static inline struct kvmppc_vcpu_e500
*to_e500(struct kvm_vcpu
*vcpu
)
102 return container_of(vcpu
, struct kvmppc_vcpu_e500
, vcpu
);
106 /* This geometry is the legacy default -- can be overridden by userspace */
107 #define KVM_E500_TLB0_WAY_SIZE 128
108 #define KVM_E500_TLB0_WAY_NUM 2
110 #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
111 #define KVM_E500_TLB1_SIZE 16
113 #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
114 #define tlbsel_of(index) ((index) >> 16)
115 #define esel_of(index) ((index) & 0xFFFF)
117 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
118 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
119 #define MAS2_ATTRIB_MASK \
120 (MAS2_X0 | MAS2_X1 | MAS2_E | MAS2_G)
121 #define MAS3_ATTRIB_MASK \
122 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
123 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
125 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
,
127 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
);
128 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
);
129 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, gva_t ea
);
130 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int type
, gva_t ea
);
131 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, gva_t ea
);
132 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
);
133 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
);
135 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
136 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
138 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
139 union kvmppc_one_reg
*val
);
140 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu
*vcpu
, u64 id
,
141 union kvmppc_one_reg
*val
);
143 #ifdef CONFIG_KVM_E500V2
144 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500
*vcpu_e500
,
145 unsigned int as
, unsigned int gid
,
146 unsigned int pr
, int avoid_recursion
);
149 /* TLB helper functions */
150 static inline unsigned int
151 get_tlb_size(const struct kvm_book3e_206_tlb_entry
*tlbe
)
153 return (tlbe
->mas1
>> 7) & 0x1f;
156 static inline gva_t
get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry
*tlbe
)
158 return tlbe
->mas2
& MAS2_EPN
;
161 static inline u64
get_tlb_bytes(const struct kvm_book3e_206_tlb_entry
*tlbe
)
163 unsigned int pgsize
= get_tlb_size(tlbe
);
164 return 1ULL << 10 << pgsize
;
167 static inline gva_t
get_tlb_end(const struct kvm_book3e_206_tlb_entry
*tlbe
)
169 u64 bytes
= get_tlb_bytes(tlbe
);
170 return get_tlb_eaddr(tlbe
) + bytes
- 1;
173 static inline u64
get_tlb_raddr(const struct kvm_book3e_206_tlb_entry
*tlbe
)
175 return tlbe
->mas7_3
& ~0xfffULL
;
178 static inline unsigned int
179 get_tlb_tid(const struct kvm_book3e_206_tlb_entry
*tlbe
)
181 return (tlbe
->mas1
>> 16) & 0xff;
184 static inline unsigned int
185 get_tlb_ts(const struct kvm_book3e_206_tlb_entry
*tlbe
)
187 return (tlbe
->mas1
>> 12) & 0x1;
190 static inline unsigned int
191 get_tlb_v(const struct kvm_book3e_206_tlb_entry
*tlbe
)
193 return (tlbe
->mas1
>> 31) & 0x1;
196 static inline unsigned int
197 get_tlb_iprot(const struct kvm_book3e_206_tlb_entry
*tlbe
)
199 return (tlbe
->mas1
>> 30) & 0x1;
202 static inline unsigned int
203 get_tlb_tsize(const struct kvm_book3e_206_tlb_entry
*tlbe
)
205 return (tlbe
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
208 static inline unsigned int get_cur_pid(struct kvm_vcpu
*vcpu
)
210 return vcpu
->arch
.pid
& 0xff;
213 static inline unsigned int get_cur_as(struct kvm_vcpu
*vcpu
)
215 return !!(vcpu
->arch
.shared
->msr
& (MSR_IS
| MSR_DS
));
218 static inline unsigned int get_cur_pr(struct kvm_vcpu
*vcpu
)
220 return !!(vcpu
->arch
.shared
->msr
& MSR_PR
);
223 static inline unsigned int get_cur_spid(const struct kvm_vcpu
*vcpu
)
225 return (vcpu
->arch
.shared
->mas6
>> 16) & 0xff;
228 static inline unsigned int get_cur_sas(const struct kvm_vcpu
*vcpu
)
230 return vcpu
->arch
.shared
->mas6
& 0x1;
233 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu
*vcpu
)
236 * Manual says that tlbsel has 2 bits wide.
237 * Since we only have two TLBs, only lower bit is used.
239 return (vcpu
->arch
.shared
->mas0
>> 28) & 0x1;
242 static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu
*vcpu
)
244 return vcpu
->arch
.shared
->mas0
& 0xfff;
247 static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu
*vcpu
)
249 return (vcpu
->arch
.shared
->mas0
>> 16) & 0xfff;
252 static inline int tlbe_is_host_safe(const struct kvm_vcpu
*vcpu
,
253 const struct kvm_book3e_206_tlb_entry
*tlbe
)
257 if (!get_tlb_v(tlbe
))
260 #ifndef CONFIG_KVM_BOOKE_HV
261 /* Does it match current guest AS? */
262 /* XXX what about IS != DS? */
263 if (get_tlb_ts(tlbe
) != !!(vcpu
->arch
.shared
->msr
& MSR_IS
))
267 gpa
= get_tlb_raddr(tlbe
);
268 if (!gfn_to_memslot(vcpu
->kvm
, gpa
>> PAGE_SHIFT
))
269 /* Mapping is not for RAM. */
275 static inline struct kvm_book3e_206_tlb_entry
*get_entry(
276 struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
, int entry
)
278 int offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
279 return &vcpu_e500
->gtlb_arch
[offset
+ entry
];
282 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
283 struct kvm_book3e_206_tlb_entry
*gtlbe
);
284 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
);
286 #ifdef CONFIG_KVM_BOOKE_HV
287 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
288 #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
289 #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
292 * These functions should be called with preemption disabled
293 * and the returned value is valid only in that context
295 static inline int get_thread_specific_lpid(int vm_lpid
)
297 int vcpu_lpid
= vm_lpid
;
299 if (threads_per_core
== 2)
300 vcpu_lpid
|= smp_processor_id() & 1;
305 static inline int get_lpid(struct kvm_vcpu
*vcpu
)
307 return get_thread_specific_lpid(vcpu
->kvm
->arch
.lpid
);
310 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu
*vcpu
,
311 struct kvm_book3e_206_tlb_entry
*gtlbe
);
313 static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu
*vcpu
)
315 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
316 unsigned int tidseld
= (vcpu
->arch
.shared
->mas4
>> 16) & 0xf;
318 return vcpu_e500
->pid
[tidseld
];
321 /* Force TS=1 for all guest mappings. */
322 #define get_tlb_sts(gtlbe) (MAS1_TS)
323 #endif /* !BOOKE_HV */
325 static inline bool has_feature(const struct kvm_vcpu
*vcpu
,
330 case VCPU_FTR_MMU_V2
:
331 has_ftr
= ((vcpu
->arch
.mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V2
);
339 #endif /* KVM_E500_H */