1 // SPDX-License-Identifier: GPL-2.0
3 * Hosting Protected Virtual Machines
5 * Copyright IBM Corp. 2019, 2020
6 * Author(s): Janosch Frank <frankja@linux.ibm.com>
9 #include <linux/kvm_host.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/signal.h>
12 #include <asm/pgalloc.h>
18 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu
*vcpu
, u16
*rc
, u16
*rrc
)
22 if (kvm_s390_pv_cpu_get_handle(vcpu
)) {
23 cc
= uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu
),
24 UVC_CMD_DESTROY_SEC_CPU
, rc
, rrc
);
26 KVM_UV_EVENT(vcpu
->kvm
, 3,
27 "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
28 vcpu
->vcpu_id
, *rc
, *rrc
);
29 WARN_ONCE(cc
, "protvirt destroy cpu failed rc %x rrc %x",
32 /* Intended memory leak for something that should never happen. */
34 free_pages(vcpu
->arch
.pv
.stor_base
,
35 get_order(uv_info
.guest_cpu_stor_len
));
37 free_page(sida_origin(vcpu
->arch
.sie_block
));
38 vcpu
->arch
.sie_block
->pv_handle_cpu
= 0;
39 vcpu
->arch
.sie_block
->pv_handle_config
= 0;
40 memset(&vcpu
->arch
.pv
, 0, sizeof(vcpu
->arch
.pv
));
41 vcpu
->arch
.sie_block
->sdf
= 0;
43 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
44 * Use the reset value of gbea to avoid leaking the kernel pointer of
45 * the just freed sida.
47 vcpu
->arch
.sie_block
->gbea
= 1;
48 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
53 int kvm_s390_pv_create_cpu(struct kvm_vcpu
*vcpu
, u16
*rc
, u16
*rrc
)
55 struct uv_cb_csc uvcb
= {
56 .header
.cmd
= UVC_CMD_CREATE_SEC_CPU
,
57 .header
.len
= sizeof(uvcb
),
61 if (kvm_s390_pv_cpu_get_handle(vcpu
))
64 vcpu
->arch
.pv
.stor_base
= __get_free_pages(GFP_KERNEL
,
65 get_order(uv_info
.guest_cpu_stor_len
));
66 if (!vcpu
->arch
.pv
.stor_base
)
70 uvcb
.guest_handle
= kvm_s390_pv_get_handle(vcpu
->kvm
);
71 uvcb
.num
= vcpu
->arch
.sie_block
->icpua
;
72 uvcb
.state_origin
= (u64
)vcpu
->arch
.sie_block
;
73 uvcb
.stor_origin
= (u64
)vcpu
->arch
.pv
.stor_base
;
75 /* Alloc Secure Instruction Data Area Designation */
76 vcpu
->arch
.sie_block
->sidad
= __get_free_page(GFP_KERNEL
| __GFP_ZERO
);
77 if (!vcpu
->arch
.sie_block
->sidad
) {
78 free_pages(vcpu
->arch
.pv
.stor_base
,
79 get_order(uv_info
.guest_cpu_stor_len
));
83 cc
= uv_call(0, (u64
)&uvcb
);
85 *rrc
= uvcb
.header
.rrc
;
86 KVM_UV_EVENT(vcpu
->kvm
, 3,
87 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
88 vcpu
->vcpu_id
, uvcb
.cpu_handle
, uvcb
.header
.rc
,
94 kvm_s390_pv_destroy_cpu(vcpu
, &dummy
, &dummy
);
99 vcpu
->arch
.pv
.handle
= uvcb
.cpu_handle
;
100 vcpu
->arch
.sie_block
->pv_handle_cpu
= uvcb
.cpu_handle
;
101 vcpu
->arch
.sie_block
->pv_handle_config
= kvm_s390_pv_get_handle(vcpu
->kvm
);
102 vcpu
->arch
.sie_block
->sdf
= 2;
103 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
107 /* only free resources when the destroy was successful */
108 static void kvm_s390_pv_dealloc_vm(struct kvm
*kvm
)
110 vfree(kvm
->arch
.pv
.stor_var
);
111 free_pages(kvm
->arch
.pv
.stor_base
,
112 get_order(uv_info
.guest_base_stor_len
));
113 memset(&kvm
->arch
.pv
, 0, sizeof(kvm
->arch
.pv
));
116 static int kvm_s390_pv_alloc_vm(struct kvm
*kvm
)
118 unsigned long base
= uv_info
.guest_base_stor_len
;
119 unsigned long virt
= uv_info
.guest_virt_var_stor_len
;
120 unsigned long npages
= 0, vlen
= 0;
121 struct kvm_memory_slot
*memslot
;
123 kvm
->arch
.pv
.stor_var
= NULL
;
124 kvm
->arch
.pv
.stor_base
= __get_free_pages(GFP_KERNEL
, get_order(base
));
125 if (!kvm
->arch
.pv
.stor_base
)
129 * Calculate current guest storage for allocation of the
130 * variable storage, which is based on the length in MB.
132 * Slots are sorted by GFN
134 mutex_lock(&kvm
->slots_lock
);
135 memslot
= kvm_memslots(kvm
)->memslots
;
136 npages
= memslot
->base_gfn
+ memslot
->npages
;
137 mutex_unlock(&kvm
->slots_lock
);
139 kvm
->arch
.pv
.guest_len
= npages
* PAGE_SIZE
;
141 /* Allocate variable storage */
142 vlen
= ALIGN(virt
* ((npages
* PAGE_SIZE
) / HPAGE_SIZE
), PAGE_SIZE
);
143 vlen
+= uv_info
.guest_virt_base_stor_len
;
144 kvm
->arch
.pv
.stor_var
= vzalloc(vlen
);
145 if (!kvm
->arch
.pv
.stor_var
)
150 kvm_s390_pv_dealloc_vm(kvm
);
154 /* this should not fail, but if it does, we must not free the donated memory */
155 int kvm_s390_pv_deinit_vm(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
159 /* make all pages accessible before destroying the guest */
160 s390_reset_acc(kvm
->mm
);
162 cc
= uv_cmd_nodata(kvm_s390_pv_get_handle(kvm
),
163 UVC_CMD_DESTROY_SEC_CONF
, rc
, rrc
);
164 WRITE_ONCE(kvm
->arch
.gmap
->guest_handle
, 0);
165 atomic_set(&kvm
->mm
->context
.is_protected
, 0);
166 KVM_UV_EVENT(kvm
, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc
, *rrc
);
167 WARN_ONCE(cc
, "protvirt destroy vm failed rc %x rrc %x", *rc
, *rrc
);
168 /* Inteded memory leak on "impossible" error */
170 kvm_s390_pv_dealloc_vm(kvm
);
171 return cc
? -EIO
: 0;
174 int kvm_s390_pv_init_vm(struct kvm
*kvm
, u16
*rc
, u16
*rrc
)
176 struct uv_cb_cgc uvcb
= {
177 .header
.cmd
= UVC_CMD_CREATE_SEC_CONF
,
178 .header
.len
= sizeof(uvcb
)
183 ret
= kvm_s390_pv_alloc_vm(kvm
);
188 uvcb
.guest_stor_origin
= 0; /* MSO is 0 for KVM */
189 uvcb
.guest_stor_len
= kvm
->arch
.pv
.guest_len
;
190 uvcb
.guest_asce
= kvm
->arch
.gmap
->asce
;
191 uvcb
.guest_sca
= (unsigned long)kvm
->arch
.sca
;
192 uvcb
.conf_base_stor_origin
= (u64
)kvm
->arch
.pv
.stor_base
;
193 uvcb
.conf_virt_stor_origin
= (u64
)kvm
->arch
.pv
.stor_var
;
195 cc
= uv_call(0, (u64
)&uvcb
);
196 *rc
= uvcb
.header
.rc
;
197 *rrc
= uvcb
.header
.rrc
;
198 KVM_UV_EVENT(kvm
, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
199 uvcb
.guest_handle
, uvcb
.guest_stor_len
, *rc
, *rrc
);
202 kvm
->arch
.pv
.handle
= uvcb
.guest_handle
;
205 if (uvcb
.header
.rc
& UVC_RC_NEED_DESTROY
)
206 kvm_s390_pv_deinit_vm(kvm
, &dummy
, &dummy
);
208 kvm_s390_pv_dealloc_vm(kvm
);
211 kvm
->arch
.gmap
->guest_handle
= uvcb
.guest_handle
;
212 atomic_set(&kvm
->mm
->context
.is_protected
, 1);
216 int kvm_s390_pv_set_sec_parms(struct kvm
*kvm
, void *hdr
, u64 length
, u16
*rc
,
219 struct uv_cb_ssc uvcb
= {
220 .header
.cmd
= UVC_CMD_SET_SEC_CONF_PARAMS
,
221 .header
.len
= sizeof(uvcb
),
222 .sec_header_origin
= (u64
)hdr
,
223 .sec_header_len
= length
,
224 .guest_handle
= kvm_s390_pv_get_handle(kvm
),
226 int cc
= uv_call(0, (u64
)&uvcb
);
228 *rc
= uvcb
.header
.rc
;
229 *rrc
= uvcb
.header
.rrc
;
230 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
232 return cc
? -EINVAL
: 0;
235 static int unpack_one(struct kvm
*kvm
, unsigned long addr
, u64 tweak
,
236 u64 offset
, u16
*rc
, u16
*rrc
)
238 struct uv_cb_unp uvcb
= {
239 .header
.cmd
= UVC_CMD_UNPACK_IMG
,
240 .header
.len
= sizeof(uvcb
),
241 .guest_handle
= kvm_s390_pv_get_handle(kvm
),
246 int ret
= gmap_make_secure(kvm
->arch
.gmap
, addr
, &uvcb
);
248 *rc
= uvcb
.header
.rc
;
249 *rrc
= uvcb
.header
.rrc
;
251 if (ret
&& ret
!= -EAGAIN
)
252 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
253 uvcb
.gaddr
, *rc
, *rrc
);
257 int kvm_s390_pv_unpack(struct kvm
*kvm
, unsigned long addr
, unsigned long size
,
258 unsigned long tweak
, u16
*rc
, u16
*rrc
)
263 if (addr
& ~PAGE_MASK
|| !size
|| size
& ~PAGE_MASK
)
266 KVM_UV_EVENT(kvm
, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
269 while (offset
< size
) {
270 ret
= unpack_one(kvm
, addr
, tweak
, offset
, rc
, rrc
);
271 if (ret
== -EAGAIN
) {
273 if (fatal_signal_pending(current
))
283 KVM_UV_EVENT(kvm
, 3, "%s", "PROTVIRT VM UNPACK: successful");
287 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu
*vcpu
, u8 state
)
289 struct uv_cb_cpu_set_state uvcb
= {
290 .header
.cmd
= UVC_CMD_CPU_SET_STATE
,
291 .header
.len
= sizeof(uvcb
),
292 .cpu_handle
= kvm_s390_pv_cpu_get_handle(vcpu
),
297 cc
= uv_call(0, (u64
)&uvcb
);
298 KVM_UV_EVENT(vcpu
->kvm
, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
299 vcpu
->vcpu_id
, state
, uvcb
.header
.rc
, uvcb
.header
.rrc
);