1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011. Freescale Inc. All rights reserved.
6 * Alexander Graf <agraf@suse.de>
7 * Paul Mackerras <paulus@samba.org>
11 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
15 #include <linux/anon_inodes.h>
17 #include <linux/uaccess.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/kvm_book3s.h>
21 #define HPTE_SIZE 16 /* bytes per HPT entry */
23 static unsigned long get_pteg_addr(struct kvm_vcpu
*vcpu
, long pte_index
)
25 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
26 unsigned long pteg_addr
;
29 pte_index
&= ((1 << ((vcpu_book3s
->sdr1
& 0x1f) + 11)) - 1) << 7 | 0x70;
30 pteg_addr
= vcpu_book3s
->sdr1
& 0xfffffffffffc0000ULL
;
31 pteg_addr
|= pte_index
;
36 static int kvmppc_h_pr_enter(struct kvm_vcpu
*vcpu
)
38 long flags
= kvmppc_get_gpr(vcpu
, 4);
39 long pte_index
= kvmppc_get_gpr(vcpu
, 5);
42 unsigned long pteg_addr
, i
;
47 pteg_addr
= get_pteg_addr(vcpu
, pte_index
);
49 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
51 if (copy_from_user(pteg
, (void __user
*)pteg_addr
, sizeof(pteg
)))
56 if (likely((flags
& H_EXACT
) == 0)) {
60 if ((be64_to_cpu(*hpte
) & HPTE_V_VALID
) == 0)
66 if (*hpte
& HPTE_V_VALID
)
70 hpte
[0] = cpu_to_be64(kvmppc_get_gpr(vcpu
, 6));
71 hpte
[1] = cpu_to_be64(kvmppc_get_gpr(vcpu
, 7));
72 pteg_addr
+= i
* HPTE_SIZE
;
74 if (copy_to_user((void __user
*)pteg_addr
, hpte
, HPTE_SIZE
))
76 kvmppc_set_gpr(vcpu
, 4, pte_index
| i
);
80 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
81 kvmppc_set_gpr(vcpu
, 3, ret
);
86 static int kvmppc_h_pr_remove(struct kvm_vcpu
*vcpu
)
88 unsigned long flags
= kvmppc_get_gpr(vcpu
, 4);
89 unsigned long pte_index
= kvmppc_get_gpr(vcpu
, 5);
90 unsigned long avpn
= kvmppc_get_gpr(vcpu
, 6);
91 unsigned long v
= 0, pteg
, rb
;
95 pteg
= get_pteg_addr(vcpu
, pte_index
);
96 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
98 if (copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
)))
100 pte
[0] = be64_to_cpu((__force __be64
)pte
[0]);
101 pte
[1] = be64_to_cpu((__force __be64
)pte
[1]);
104 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
105 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != avpn
) ||
106 ((flags
& H_ANDCOND
) && (pte
[0] & avpn
) != 0))
110 if (copy_to_user((void __user
*)pteg
, &v
, sizeof(v
)))
113 rb
= compute_tlbie_rb(pte
[0], pte
[1], pte_index
);
114 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
117 kvmppc_set_gpr(vcpu
, 4, pte
[0]);
118 kvmppc_set_gpr(vcpu
, 5, pte
[1]);
121 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
122 kvmppc_set_gpr(vcpu
, 3, ret
);
127 /* Request defs for kvmppc_h_pr_bulk_remove() */
128 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
129 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
130 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
131 #define H_BULK_REMOVE_END 0xc000000000000000ULL
132 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
133 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
134 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
135 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
136 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
137 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
138 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
139 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
140 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
141 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
142 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
143 #define H_BULK_REMOVE_MAX_BATCH 4
145 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu
*vcpu
)
151 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
152 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
153 unsigned long tsh
= kvmppc_get_gpr(vcpu
, paramnr
+(2*i
));
154 unsigned long tsl
= kvmppc_get_gpr(vcpu
, paramnr
+(2*i
)+1);
155 unsigned long pteg
, rb
, flags
;
156 unsigned long pte
[2];
159 if ((tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
160 break; /* Exit success */
161 } else if ((tsh
& H_BULK_REMOVE_TYPE
) !=
162 H_BULK_REMOVE_REQUEST
) {
164 break; /* Exit fail */
167 tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
168 tsh
|= H_BULK_REMOVE_RESPONSE
;
170 if ((tsh
& H_BULK_REMOVE_ANDCOND
) &&
171 (tsh
& H_BULK_REMOVE_AVPN
)) {
172 tsh
|= H_BULK_REMOVE_PARM
;
173 kvmppc_set_gpr(vcpu
, paramnr
+(2*i
), tsh
);
175 break; /* Exit fail */
178 pteg
= get_pteg_addr(vcpu
, tsh
& H_BULK_REMOVE_PTEX
);
179 if (copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
))) {
183 pte
[0] = be64_to_cpu((__force __be64
)pte
[0]);
184 pte
[1] = be64_to_cpu((__force __be64
)pte
[1]);
187 flags
= (tsh
& H_BULK_REMOVE_FLAGS
) >> 26;
189 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
190 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != tsl
) ||
191 ((flags
& H_ANDCOND
) && (pte
[0] & tsl
) != 0)) {
192 tsh
|= H_BULK_REMOVE_NOT_FOUND
;
194 /* Splat the pteg in (userland) hpt */
195 if (copy_to_user((void __user
*)pteg
, &v
, sizeof(v
))) {
200 rb
= compute_tlbie_rb(pte
[0], pte
[1],
201 tsh
& H_BULK_REMOVE_PTEX
);
202 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
203 tsh
|= H_BULK_REMOVE_SUCCESS
;
204 tsh
|= (pte
[1] & (HPTE_R_C
| HPTE_R_R
)) << 43;
206 kvmppc_set_gpr(vcpu
, paramnr
+(2*i
), tsh
);
208 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
209 kvmppc_set_gpr(vcpu
, 3, ret
);
214 static int kvmppc_h_pr_protect(struct kvm_vcpu
*vcpu
)
216 unsigned long flags
= kvmppc_get_gpr(vcpu
, 4);
217 unsigned long pte_index
= kvmppc_get_gpr(vcpu
, 5);
218 unsigned long avpn
= kvmppc_get_gpr(vcpu
, 6);
219 unsigned long rb
, pteg
, r
, v
;
220 unsigned long pte
[2];
223 pteg
= get_pteg_addr(vcpu
, pte_index
);
224 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
226 if (copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
)))
228 pte
[0] = be64_to_cpu((__force __be64
)pte
[0]);
229 pte
[1] = be64_to_cpu((__force __be64
)pte
[1]);
232 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
233 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != avpn
))
238 r
&= ~(HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_HI
|
240 r
|= (flags
<< 55) & HPTE_R_PP0
;
241 r
|= (flags
<< 48) & HPTE_R_KEY_HI
;
242 r
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
246 rb
= compute_tlbie_rb(v
, r
, pte_index
);
247 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
248 pte
[0] = (__force u64
)cpu_to_be64(pte
[0]);
249 pte
[1] = (__force u64
)cpu_to_be64(pte
[1]);
251 if (copy_to_user((void __user
*)pteg
, pte
, sizeof(pte
)))
256 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
257 kvmppc_set_gpr(vcpu
, 3, ret
);
262 static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu
*vcpu
)
266 rc
= kvmppc_h_logical_ci_load(vcpu
);
267 if (rc
== H_TOO_HARD
)
269 kvmppc_set_gpr(vcpu
, 3, rc
);
273 static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu
*vcpu
)
277 rc
= kvmppc_h_logical_ci_store(vcpu
);
278 if (rc
== H_TOO_HARD
)
280 kvmppc_set_gpr(vcpu
, 3, rc
);
284 #ifdef CONFIG_SPAPR_TCE_IOMMU
285 static int kvmppc_h_pr_put_tce(struct kvm_vcpu
*vcpu
)
287 unsigned long liobn
= kvmppc_get_gpr(vcpu
, 4);
288 unsigned long ioba
= kvmppc_get_gpr(vcpu
, 5);
289 unsigned long tce
= kvmppc_get_gpr(vcpu
, 6);
292 rc
= kvmppc_h_put_tce(vcpu
, liobn
, ioba
, tce
);
293 if (rc
== H_TOO_HARD
)
295 kvmppc_set_gpr(vcpu
, 3, rc
);
299 static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu
*vcpu
)
301 unsigned long liobn
= kvmppc_get_gpr(vcpu
, 4);
302 unsigned long ioba
= kvmppc_get_gpr(vcpu
, 5);
303 unsigned long tce
= kvmppc_get_gpr(vcpu
, 6);
304 unsigned long npages
= kvmppc_get_gpr(vcpu
, 7);
307 rc
= kvmppc_h_put_tce_indirect(vcpu
, liobn
, ioba
,
309 if (rc
== H_TOO_HARD
)
311 kvmppc_set_gpr(vcpu
, 3, rc
);
315 static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu
*vcpu
)
317 unsigned long liobn
= kvmppc_get_gpr(vcpu
, 4);
318 unsigned long ioba
= kvmppc_get_gpr(vcpu
, 5);
319 unsigned long tce_value
= kvmppc_get_gpr(vcpu
, 6);
320 unsigned long npages
= kvmppc_get_gpr(vcpu
, 7);
323 rc
= kvmppc_h_stuff_tce(vcpu
, liobn
, ioba
, tce_value
, npages
);
324 if (rc
== H_TOO_HARD
)
326 kvmppc_set_gpr(vcpu
, 3, rc
);
330 #else /* CONFIG_SPAPR_TCE_IOMMU */
331 static int kvmppc_h_pr_put_tce(struct kvm_vcpu
*vcpu
)
336 static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu
*vcpu
)
341 static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu
*vcpu
)
345 #endif /* CONFIG_SPAPR_TCE_IOMMU */
347 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
)
349 long rc
= kvmppc_xics_hcall(vcpu
, cmd
);
350 kvmppc_set_gpr(vcpu
, 3, rc
);
354 int kvmppc_h_pr(struct kvm_vcpu
*vcpu
, unsigned long cmd
)
358 if (cmd
<= MAX_HCALL_OPCODE
&&
359 !test_bit(cmd
/4, vcpu
->kvm
->arch
.enabled_hcalls
))
364 return kvmppc_h_pr_enter(vcpu
);
366 return kvmppc_h_pr_remove(vcpu
);
368 return kvmppc_h_pr_protect(vcpu
);
370 return kvmppc_h_pr_bulk_remove(vcpu
);
372 return kvmppc_h_pr_put_tce(vcpu
);
373 case H_PUT_TCE_INDIRECT
:
374 return kvmppc_h_pr_put_tce_indirect(vcpu
);
376 return kvmppc_h_pr_stuff_tce(vcpu
);
378 kvmppc_set_msr_fast(vcpu
, kvmppc_get_msr(vcpu
) | MSR_EE
);
379 kvm_vcpu_block(vcpu
);
380 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
381 vcpu
->stat
.halt_wakeup
++;
383 case H_LOGICAL_CI_LOAD
:
384 return kvmppc_h_pr_logical_ci_load(vcpu
);
385 case H_LOGICAL_CI_STORE
:
386 return kvmppc_h_pr_logical_ci_store(vcpu
);
393 if (kvmppc_xics_enabled(vcpu
))
394 return kvmppc_h_pr_xics_hcall(vcpu
, cmd
);
397 if (list_empty(&vcpu
->kvm
->arch
.rtas_tokens
))
399 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
400 rc
= kvmppc_rtas_hcall(vcpu
);
401 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
404 kvmppc_set_gpr(vcpu
, 3, 0);
411 int kvmppc_hcall_impl_pr(unsigned long cmd
)
419 case H_PUT_TCE_INDIRECT
:
422 case H_LOGICAL_CI_LOAD
:
423 case H_LOGICAL_CI_STORE
:
424 #ifdef CONFIG_KVM_XICS
438 * List of hcall numbers to enable by default.
439 * For compatibility with old userspace, we enable by default
440 * all hcalls that were implemented before the hcall-enabling
441 * facility was added. Note this list should not include H_RTAS.
443 static unsigned int default_hcall_list
[] = {
450 #ifdef CONFIG_KVM_XICS
461 void kvmppc_pr_init_default_hcalls(struct kvm
*kvm
)
466 for (i
= 0; default_hcall_list
[i
]; ++i
) {
467 hcall
= default_hcall_list
[i
];
468 WARN_ON(!kvmppc_hcall_impl_pr(hcall
));
469 __set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);