1 // SPDX-License-Identifier: GPL-2.0
3 * Hosting Protected Virtual Machines
5 * Copyright IBM Corp. 2019, 2020
6 * Author(s): Janosch Frank <frankja@linux.ibm.com>
9 #include <linux/kvm_host.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/signal.h>
17 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu
*vcpu
, u16
*rc
, u16
*rrc
)
21 if (kvm_s390_pv_cpu_get_handle(vcpu
)) {
22 cc
= uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu
),
23 UVC_CMD_DESTROY_SEC_CPU
, rc
, rrc
);
25 KVM_UV_EVENT(vcpu
->kvm
, 3,
26 "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
27 vcpu
->vcpu_id
, *rc
, *rrc
);
28 WARN_ONCE(cc
, "protvirt destroy cpu failed rc %x rrc %x",
31 /* Intended memory leak for something that should never happen. */
33 free_pages(vcpu
->arch
.pv
.stor_base
,
34 get_order(uv_info
.guest_cpu_stor_len
));
36 free_page(sida_origin(vcpu
->arch
.sie_block
));
37 vcpu
->arch
.sie_block
->pv_handle_cpu
= 0;
38 vcpu
->arch
.sie_block
->pv_handle_config
= 0;
39 memset(&vcpu
->arch
.pv
, 0, sizeof(vcpu
->arch
.pv
));
40 vcpu
->arch
.sie_block
->sdf
= 0;
42 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
43 * Use the reset value of gbea to avoid leaking the kernel pointer of
44 * the just freed sida.
46 vcpu
->arch
.sie_block
->gbea
= 1;
47 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
52 int kvm_s390_pv_create_cpu(struct kvm_vcpu
*vcpu
, u16
*rc
, u16
*rrc
)
54 struct uv_cb_csc uvcb
= {
55 .header
.cmd
= UVC_CMD_CREATE_SEC_CPU
,
56 .header
.len
= sizeof(uvcb
),
60 if (kvm_s390_pv_cpu_get_handle(vcpu
))
63 vcpu
->arch
.pv
.stor_base
= __get_free_pages(GFP_KERNEL_ACCOUNT
,
64 get_order(uv_info
.guest_cpu_stor_len
));
65 if (!vcpu
->arch
.pv
.stor_base
)
69 uvcb
.guest_handle
= kvm_s390_pv_get_handle(vcpu
->kvm
);
70 uvcb
.num
= vcpu
->arch
.sie_block
->icpua
;
71 uvcb
.state_origin
= (u64
)vcpu
->arch
.sie_block
;
72 uvcb
.stor_origin
= (u64
)vcpu
->arch
.pv
.stor_base
;
74 /* Alloc Secure Instruction Data Area Designation */
75 vcpu
->arch
.sie_block
->sidad
= __get_free_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
76 if (!vcpu
->arch
.sie_block
->sidad
) {
77 free_pages(vcpu
->arch
.pv
.stor_base
,
78 get_order(uv_info
.guest_cpu_stor_len
));
82 cc
= uv_call(0, (u64
)&uvcb
);
84 *rrc
= uvcb
.header
.rrc
;
85 KVM_UV_EVENT(vcpu
->kvm
, 3,
86 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
87 vcpu
->vcpu_id
, uvcb
.cpu_handle
, uvcb
.header
.rc
,
93 kvm_s390_pv_destroy_cpu(vcpu
, &dummy
, &dummy
);
98 vcpu
->arch
.pv
.handle
= uvcb
.cpu_handle
;
99 vcpu
->arch
.sie_block
->pv_handle_cpu
= uvcb
.cpu_handle
;
100 vcpu
->arch
.sie_block
->pv_handle_config
= kvm_s390_pv_get_handle(vcpu
->kvm
);
101 vcpu
->arch
.sie_block
->sdf
= 2;
102 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
106 /* only free resources when the destroy was successful */
107 static void kvm_s390_pv_dealloc_vm(struct kvm
*kvm
)
109 vfree(kvm
->arch
.pv
.stor_var
);
110 free_pages(kvm
->arch
.pv
.stor_base
,
111 get_order(uv_info
.guest_base_stor_len
));
112 memset(&kvm
->arch
.pv
, 0, sizeof(kvm
->arch
.pv
));
115 static int kvm_s390_pv_alloc_vm(struct kvm
*kvm
)
117 unsigned long base
= uv_info
.guest_base_stor_len
;
118 unsigned long virt
= uv_info
.guest_virt_var_stor_len
;
119 unsigned long npages
= 0, vlen
= 0;
120 struct kvm_memory_slot
*memslot
;
122 kvm
->arch
.pv
.stor_var
= NULL
;
123 kvm
->arch
.pv
.stor_base
= __get_free_pages(GFP_KERNEL_ACCOUNT
, get_order(base
));
124 if (!kvm
->arch
.pv
.stor_base
)
128 * Calculate current guest storage for allocation of the
129 * variable storage, which is based on the length in MB.
131 * Slots are sorted by GFN
133 mutex_lock(&kvm
->slots_lock
);
134 memslot
= kvm_memslots(kvm
)->memslots
;
135 npages
= memslot
->base_gfn
+ memslot
->npages
;
136 mutex_unlock(&kvm
->slots_lock
);
138 kvm
->arch
.pv
.guest_len
= npages
* PAGE_SIZE
;
140 /* Allocate variable storage */
141 vlen
= ALIGN(virt
* ((npages
* PAGE_SIZE
) / HPAGE_SIZE
), PAGE_SIZE
);
142 vlen
+= uv_info
.guest_virt_base_stor_len
;
143 kvm
->arch
.pv
.stor_var
= vzalloc(vlen
);
144 if (!kvm
->arch
.pv
.stor_var
)
149 kvm_s390_pv_dealloc_vm(kvm
);
153 /* this should not fail, but if it does, we must not free the donated memory */
154 int kvm_s390_pv_deinit_vm(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
158 /* make all pages accessible before destroying the guest */
159 s390_reset_acc(kvm
->mm
);
161 cc
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
162 UVC_CMD_DESTROY_SEC_CONF
, rc
, rrc
);
163 WRITE_ONCE(kvm
->arch
.gmap
->guest_handle
, 0);
164 atomic_set(&kvm
->mm
->context
.is_protected
, 0);
165 KVM_UV_EVENT(kvm
, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc
, *rrc
);
166 WARN_ONCE(cc
, "protvirt destroy vm failed rc %x rrc %x", *rc
, *rrc
);
167 /* Inteded memory leak on "impossible" error */
169 kvm_s390_pv_dealloc_vm(kvm
);
170 return cc
? -EIO
: 0;
173 int kvm_s390_pv_init_vm(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
175 struct uv_cb_cgc uvcb
= {
176 .header
.cmd
= UVC_CMD_CREATE_SEC_CONF
,
177 .header
.len
= sizeof(uvcb
)
182 ret
= kvm_s390_pv_alloc_vm(kvm
);
187 uvcb
.guest_stor_origin
= 0; /* MSO is 0 for KVM */
188 uvcb
.guest_stor_len
= kvm
->arch
.pv
.guest_len
;
189 uvcb
.guest_asce
= kvm
->arch
.gmap
->asce
;
190 uvcb
.guest_sca
= (unsigned long)kvm
->arch
.sca
;
191 uvcb
.conf_base_stor_origin
= (u64
)kvm
->arch
.pv
.stor_base
;
192 uvcb
.conf_virt_stor_origin
= (u64
)kvm
->arch
.pv
.stor_var
;
194 cc
= uv_call(0, (u64
)&uvcb
);
195 *rc
= uvcb
.header
.rc
;
196 *rrc
= uvcb
.header
.rrc
;
197 KVM_UV_EVENT(kvm
, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
198 uvcb
.guest_handle
, uvcb
.guest_stor_len
, *rc
, *rrc
);
201 kvm
->arch
.pv
.handle
= uvcb
.guest_handle
;
204 if (uvcb
.header
.rc
& UVC_RC_NEED_DESTROY
)
205 kvm_s390_pv_deinit_vm(kvm
, &dummy
, &dummy
);
207 kvm_s390_pv_dealloc_vm(kvm
);
210 kvm
->arch
.gmap
->guest_handle
= uvcb
.guest_handle
;
214 int kvm_s390_pv_set_sec_parms(struct kvm
*kvm
, void *hdr
, u64 length
, u16
*rc
,
217 struct uv_cb_ssc uvcb
= {
218 .header
.cmd
= UVC_CMD_SET_SEC_CONF_PARAMS
,
219 .header
.len
= sizeof(uvcb
),
220 .sec_header_origin
= (u64
)hdr
,
221 .sec_header_len
= length
,
222 .guest_handle
= kvm_s390_pv_get_handle(kvm
),
224 int cc
= uv_call(0, (u64
)&uvcb
);
226 *rc
= uvcb
.header
.rc
;
227 *rrc
= uvcb
.header
.rrc
;
228 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
231 atomic_set(&kvm
->mm
->context
.is_protected
, 1);
232 return cc
? -EINVAL
: 0;
235 static int unpack_one(struct kvm
*kvm
, unsigned long addr
, u64 tweak
,
236 u64 offset
, u16
*rc
, u16
*rrc
)
238 struct uv_cb_unp uvcb
= {
239 .header
.cmd
= UVC_CMD_UNPACK_IMG
,
240 .header
.len
= sizeof(uvcb
),
241 .guest_handle
= kvm_s390_pv_get_handle(kvm
),
246 int ret
= gmap_make_secure(kvm
->arch
.gmap
, addr
, &uvcb
);
248 *rc
= uvcb
.header
.rc
;
249 *rrc
= uvcb
.header
.rrc
;
251 if (ret
&& ret
!= -EAGAIN
)
252 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
253 uvcb
.gaddr
, *rc
, *rrc
);
257 int kvm_s390_pv_unpack(struct kvm
*kvm
, unsigned long addr
, unsigned long size
,
258 unsigned long tweak
, u16
*rc
, u16
*rrc
)
263 if (addr
& ~PAGE_MASK
|| !size
|| size
& ~PAGE_MASK
)
266 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
269 while (offset
< size
) {
270 ret
= unpack_one(kvm
, addr
, tweak
, offset
, rc
, rrc
);
271 if (ret
== -EAGAIN
) {
273 if (fatal_signal_pending(current
))
283 KVM_UV_EVENT(kvm
, 3, "%s", "PROTVIRT VM UNPACK: successful");
287 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu
*vcpu
, u8 state
)
289 struct uv_cb_cpu_set_state uvcb
= {
290 .header
.cmd
= UVC_CMD_CPU_SET_STATE
,
291 .header
.len
= sizeof(uvcb
),
292 .cpu_handle
= kvm_s390_pv_cpu_get_handle(vcpu
),
297 cc
= uv_call(0, (u64
)&uvcb
);
298 KVM_UV_EVENT(vcpu
->kvm
, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
299 vcpu
->vcpu_id
, state
, uvcb
.header
.rc
, uvcb
.header
.rrc
);