2 * Copyright (C) 2011. Freescale Inc. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Paul Mackerras <paulus@samba.org>
10 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
18 #include <linux/anon_inodes.h>
20 #include <asm/uaccess.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
24 #define HPTE_SIZE 16 /* bytes per HPT entry */
26 static unsigned long get_pteg_addr(struct kvm_vcpu
*vcpu
, long pte_index
)
28 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
29 unsigned long pteg_addr
;
32 pte_index
&= ((1 << ((vcpu_book3s
->sdr1
& 0x1f) + 11)) - 1) << 7 | 0x70;
33 pteg_addr
= vcpu_book3s
->sdr1
& 0xfffffffffffc0000ULL
;
34 pteg_addr
|= pte_index
;
39 static int kvmppc_h_pr_enter(struct kvm_vcpu
*vcpu
)
41 long flags
= kvmppc_get_gpr(vcpu
, 4);
42 long pte_index
= kvmppc_get_gpr(vcpu
, 5);
43 unsigned long pteg
[2 * 8];
44 unsigned long pteg_addr
, i
, *hpte
;
49 pteg_addr
= get_pteg_addr(vcpu
, pte_index
);
51 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
52 copy_from_user(pteg
, (void __user
*)pteg_addr
, sizeof(pteg
));
56 if (likely((flags
& H_EXACT
) == 0)) {
60 if ((*hpte
& HPTE_V_VALID
) == 0)
66 if (*hpte
& HPTE_V_VALID
)
70 hpte
[0] = kvmppc_get_gpr(vcpu
, 6);
71 hpte
[1] = kvmppc_get_gpr(vcpu
, 7);
72 pteg_addr
+= i
* HPTE_SIZE
;
73 copy_to_user((void __user
*)pteg_addr
, hpte
, HPTE_SIZE
);
74 kvmppc_set_gpr(vcpu
, 4, pte_index
| i
);
78 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
79 kvmppc_set_gpr(vcpu
, 3, ret
);
84 static int kvmppc_h_pr_remove(struct kvm_vcpu
*vcpu
)
86 unsigned long flags
= kvmppc_get_gpr(vcpu
, 4);
87 unsigned long pte_index
= kvmppc_get_gpr(vcpu
, 5);
88 unsigned long avpn
= kvmppc_get_gpr(vcpu
, 6);
89 unsigned long v
= 0, pteg
, rb
;
93 pteg
= get_pteg_addr(vcpu
, pte_index
);
94 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
95 copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
));
98 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
99 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != avpn
) ||
100 ((flags
& H_ANDCOND
) && (pte
[0] & avpn
) != 0))
103 copy_to_user((void __user
*)pteg
, &v
, sizeof(v
));
105 rb
= compute_tlbie_rb(pte
[0], pte
[1], pte_index
);
106 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
109 kvmppc_set_gpr(vcpu
, 4, pte
[0]);
110 kvmppc_set_gpr(vcpu
, 5, pte
[1]);
113 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
114 kvmppc_set_gpr(vcpu
, 3, ret
);
119 /* Request defs for kvmppc_h_pr_bulk_remove() */
120 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
121 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
122 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
123 #define H_BULK_REMOVE_END 0xc000000000000000ULL
124 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL
125 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
126 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
127 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL
128 #define H_BULK_REMOVE_HW 0x3000000000000000ULL
129 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL
130 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
131 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
132 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
133 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
134 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
135 #define H_BULK_REMOVE_MAX_BATCH 4
137 static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu
*vcpu
)
143 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
144 for (i
= 0; i
< H_BULK_REMOVE_MAX_BATCH
; i
++) {
145 unsigned long tsh
= kvmppc_get_gpr(vcpu
, paramnr
+(2*i
));
146 unsigned long tsl
= kvmppc_get_gpr(vcpu
, paramnr
+(2*i
)+1);
147 unsigned long pteg
, rb
, flags
;
148 unsigned long pte
[2];
151 if ((tsh
& H_BULK_REMOVE_TYPE
) == H_BULK_REMOVE_END
) {
152 break; /* Exit success */
153 } else if ((tsh
& H_BULK_REMOVE_TYPE
) !=
154 H_BULK_REMOVE_REQUEST
) {
156 break; /* Exit fail */
159 tsh
&= H_BULK_REMOVE_PTEX
| H_BULK_REMOVE_FLAGS
;
160 tsh
|= H_BULK_REMOVE_RESPONSE
;
162 if ((tsh
& H_BULK_REMOVE_ANDCOND
) &&
163 (tsh
& H_BULK_REMOVE_AVPN
)) {
164 tsh
|= H_BULK_REMOVE_PARM
;
165 kvmppc_set_gpr(vcpu
, paramnr
+(2*i
), tsh
);
167 break; /* Exit fail */
170 pteg
= get_pteg_addr(vcpu
, tsh
& H_BULK_REMOVE_PTEX
);
171 copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
));
174 flags
= (tsh
& H_BULK_REMOVE_FLAGS
) >> 26;
176 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
177 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != tsl
) ||
178 ((flags
& H_ANDCOND
) && (pte
[0] & tsl
) != 0)) {
179 tsh
|= H_BULK_REMOVE_NOT_FOUND
;
181 /* Splat the pteg in (userland) hpt */
182 copy_to_user((void __user
*)pteg
, &v
, sizeof(v
));
184 rb
= compute_tlbie_rb(pte
[0], pte
[1],
185 tsh
& H_BULK_REMOVE_PTEX
);
186 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
187 tsh
|= H_BULK_REMOVE_SUCCESS
;
188 tsh
|= (pte
[1] & (HPTE_R_C
| HPTE_R_R
)) << 43;
190 kvmppc_set_gpr(vcpu
, paramnr
+(2*i
), tsh
);
192 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
193 kvmppc_set_gpr(vcpu
, 3, ret
);
198 static int kvmppc_h_pr_protect(struct kvm_vcpu
*vcpu
)
200 unsigned long flags
= kvmppc_get_gpr(vcpu
, 4);
201 unsigned long pte_index
= kvmppc_get_gpr(vcpu
, 5);
202 unsigned long avpn
= kvmppc_get_gpr(vcpu
, 6);
203 unsigned long rb
, pteg
, r
, v
;
204 unsigned long pte
[2];
207 pteg
= get_pteg_addr(vcpu
, pte_index
);
208 mutex_lock(&vcpu
->kvm
->arch
.hpt_mutex
);
209 copy_from_user(pte
, (void __user
*)pteg
, sizeof(pte
));
212 if ((pte
[0] & HPTE_V_VALID
) == 0 ||
213 ((flags
& H_AVPN
) && (pte
[0] & ~0x7fUL
) != avpn
))
218 r
&= ~(HPTE_R_PP0
| HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_HI
|
220 r
|= (flags
<< 55) & HPTE_R_PP0
;
221 r
|= (flags
<< 48) & HPTE_R_KEY_HI
;
222 r
|= flags
& (HPTE_R_PP
| HPTE_R_N
| HPTE_R_KEY_LO
);
226 rb
= compute_tlbie_rb(v
, r
, pte_index
);
227 vcpu
->arch
.mmu
.tlbie(vcpu
, rb
, rb
& 1 ? true : false);
228 copy_to_user((void __user
*)pteg
, pte
, sizeof(pte
));
232 mutex_unlock(&vcpu
->kvm
->arch
.hpt_mutex
);
233 kvmppc_set_gpr(vcpu
, 3, ret
);
238 static int kvmppc_h_pr_put_tce(struct kvm_vcpu
*vcpu
)
240 unsigned long liobn
= kvmppc_get_gpr(vcpu
, 4);
241 unsigned long ioba
= kvmppc_get_gpr(vcpu
, 5);
242 unsigned long tce
= kvmppc_get_gpr(vcpu
, 6);
245 rc
= kvmppc_h_put_tce(vcpu
, liobn
, ioba
, tce
);
246 if (rc
== H_TOO_HARD
)
248 kvmppc_set_gpr(vcpu
, 3, rc
);
252 static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
)
254 long rc
= kvmppc_xics_hcall(vcpu
, cmd
);
255 kvmppc_set_gpr(vcpu
, 3, rc
);
259 int kvmppc_h_pr(struct kvm_vcpu
*vcpu
, unsigned long cmd
)
263 return kvmppc_h_pr_enter(vcpu
);
265 return kvmppc_h_pr_remove(vcpu
);
267 return kvmppc_h_pr_protect(vcpu
);
269 return kvmppc_h_pr_bulk_remove(vcpu
);
271 return kvmppc_h_pr_put_tce(vcpu
);
273 vcpu
->arch
.shared
->msr
|= MSR_EE
;
274 kvm_vcpu_block(vcpu
);
275 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
276 vcpu
->stat
.halt_wakeup
++;
284 if (kvmppc_xics_enabled(vcpu
))
285 return kvmppc_h_pr_xics_hcall(vcpu
, cmd
);
288 if (list_empty(&vcpu
->kvm
->arch
.rtas_tokens
))
290 if (kvmppc_rtas_hcall(vcpu
))
292 kvmppc_set_gpr(vcpu
, 3, 0);