1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest
);
26 #if IS_ENABLED(CONFIG_KVM)
28 EXPORT_SYMBOL(prot_virt_host
);
29 struct uv_info
__bootdata_preserved(uv_info
);
30 EXPORT_SYMBOL(uv_info
);
32 static int __init
prot_virt_setup(char *val
)
37 rc
= kstrtobool(val
, &enabled
);
41 if (is_prot_virt_guest() && prot_virt_host
) {
43 pr_warn("Protected virtualization not available in protected guests.");
46 if (prot_virt_host
&& !test_facility(158)) {
48 pr_warn("Protected virtualization not supported by the hardware.");
53 early_param("prot_virt", prot_virt_setup
);
55 static int __init
uv_init(unsigned long stor_base
, unsigned long stor_len
)
57 struct uv_cb_init uvcb
= {
58 .header
.cmd
= UVC_CMD_INIT_UV
,
59 .header
.len
= sizeof(uvcb
),
60 .stor_origin
= stor_base
,
64 if (uv_call(0, (uint64_t)&uvcb
)) {
65 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
66 uvcb
.header
.rc
, uvcb
.header
.rrc
);
72 void __init
setup_uv(void)
74 unsigned long uv_stor_base
;
76 uv_stor_base
= (unsigned long)memblock_alloc_try_nid(
77 uv_info
.uv_base_stor_len
, SZ_1M
, SZ_2G
,
78 MEMBLOCK_ALLOC_ACCESSIBLE
, NUMA_NO_NODE
);
80 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
81 uv_info
.uv_base_stor_len
);
85 if (uv_init(uv_stor_base
, uv_info
.uv_base_stor_len
)) {
86 memblock_free(uv_stor_base
, uv_info
.uv_base_stor_len
);
90 pr_info("Reserving %luMB as ultravisor base storage\n",
91 uv_info
.uv_base_stor_len
>> 20);
94 pr_info("Disabling support for protected virtualization");
98 void adjust_to_uv_max(unsigned long *vmax
)
100 *vmax
= min_t(unsigned long, *vmax
, uv_info
.max_sec_stor_addr
);
104 * Requests the Ultravisor to pin the page in the shared state. This will
105 * cause an intercept when the guest attempts to unshare the pinned page.
107 static int uv_pin_shared(unsigned long paddr
)
109 struct uv_cb_cfs uvcb
= {
110 .header
.cmd
= UVC_CMD_PIN_PAGE_SHARED
,
111 .header
.len
= sizeof(uvcb
),
115 if (uv_call(0, (u64
)&uvcb
))
121 * Requests the Ultravisor to encrypt a guest page and make it
122 * accessible to the host for paging (export).
124 * @paddr: Absolute host address of page to be exported
126 int uv_convert_from_secure(unsigned long paddr
)
128 struct uv_cb_cfs uvcb
= {
129 .header
.cmd
= UVC_CMD_CONV_FROM_SEC_STOR
,
130 .header
.len
= sizeof(uvcb
),
134 if (uv_call(0, (u64
)&uvcb
))
140 * Calculate the expected ref_count for a page that would otherwise have no
141 * further pins. This was cribbed from similar functions in other places in
142 * the kernel, but with some slight modifications. We know that a secure
143 * page can not be a huge page for example.
145 static int expected_page_refs(struct page
*page
)
149 res
= page_mapcount(page
);
150 if (PageSwapCache(page
)) {
152 } else if (page_mapping(page
)) {
154 if (page_has_private(page
))
160 static int make_secure_pte(pte_t
*ptep
, unsigned long addr
,
161 struct page
*exp_page
, struct uv_cb_header
*uvcb
)
163 pte_t entry
= READ_ONCE(*ptep
);
165 int expected
, rc
= 0;
167 if (!pte_present(entry
))
169 if (pte_val(entry
) & _PAGE_INVALID
)
172 page
= pte_page(entry
);
173 if (page
!= exp_page
)
175 if (PageWriteback(page
))
177 expected
= expected_page_refs(page
);
178 if (!page_ref_freeze(page
, expected
))
180 set_bit(PG_arch_1
, &page
->flags
);
181 rc
= uv_call(0, (u64
)uvcb
);
182 page_ref_unfreeze(page
, expected
);
183 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
185 rc
= uvcb
->rc
== 0x10a ? -ENXIO
: -EINVAL
;
190 * Requests the Ultravisor to make a page accessible to a guest.
191 * If it's brought in the first time, it will be cleared. If
192 * it has been exported before, it will be decrypted and integrity
195 int gmap_make_secure(struct gmap
*gmap
, unsigned long gaddr
, void *uvcb
)
197 struct vm_area_struct
*vma
;
198 bool local_drain
= false;
207 down_read(&gmap
->mm
->mmap_sem
);
209 uaddr
= __gmap_translate(gmap
, gaddr
);
210 if (IS_ERR_VALUE(uaddr
))
212 vma
= find_vma(gmap
->mm
, uaddr
);
216 * Secure pages cannot be huge and userspace should not combine both.
217 * In case userspace does it anyway this will result in an -EFAULT for
218 * the unpack. The guest is thus never reaching secure mode. If
219 * userspace is playing dirty tricky with mapping huge pages later
220 * on this will result in a segmentation fault.
222 if (is_vm_hugetlb_page(vma
))
226 page
= follow_page(vma
, uaddr
, FOLL_WRITE
);
227 if (IS_ERR_OR_NULL(page
))
231 ptep
= get_locked_pte(gmap
->mm
, uaddr
, &ptelock
);
232 rc
= make_secure_pte(ptep
, uaddr
, page
, uvcb
);
233 pte_unmap_unlock(ptep
, ptelock
);
236 up_read(&gmap
->mm
->mmap_sem
);
239 wait_on_page_writeback(page
);
240 } else if (rc
== -EBUSY
) {
242 * If we have tried a local drain and the page refcount
243 * still does not match our expected safe value, try with a
244 * system wide drain. This is needed if the pagevecs holding
245 * the page are on a different CPU.
249 /* We give up here, and let the caller try again */
253 * We are here if the page refcount does not match the
254 * expected safe value. The main culprits are usually
255 * pagevecs. With lru_add_drain() we drain the pagevecs
256 * on the local CPU so that hopefully the refcount will
257 * reach the expected safe value.
261 /* And now we try again immediately after draining */
263 } else if (rc
== -ENXIO
) {
264 if (gmap_fault(gmap
, gaddr
, FAULT_FLAG_WRITE
))
270 EXPORT_SYMBOL_GPL(gmap_make_secure
);
272 int gmap_convert_to_secure(struct gmap
*gmap
, unsigned long gaddr
)
274 struct uv_cb_cts uvcb
= {
275 .header
.cmd
= UVC_CMD_CONV_TO_SEC_STOR
,
276 .header
.len
= sizeof(uvcb
),
277 .guest_handle
= gmap
->guest_handle
,
281 return gmap_make_secure(gmap
, gaddr
, &uvcb
);
283 EXPORT_SYMBOL_GPL(gmap_convert_to_secure
);
286 * To be called with the page locked or with an extra reference! This will
287 * prevent gmap_make_secure from touching the page concurrently. Having 2
288 * parallel make_page_accessible is fine, as the UV calls will become a
289 * no-op if the page is already exported.
291 int arch_make_page_accessible(struct page
*page
)
295 /* Hugepage cannot be protected, so nothing to do */
300 * PG_arch_1 is used in 3 places:
301 * 1. for kernel page tables during early boot
302 * 2. for storage keys of huge pages and KVM
303 * 3. As an indication that this page might be secure. This can
304 * overindicate, e.g. we set the bit before calling
306 * As secure pages are never huge, all 3 variants can co-exists.
308 if (!test_bit(PG_arch_1
, &page
->flags
))
311 rc
= uv_pin_shared(page_to_phys(page
));
313 clear_bit(PG_arch_1
, &page
->flags
);
317 rc
= uv_convert_from_secure(page_to_phys(page
));
319 clear_bit(PG_arch_1
, &page
->flags
);
325 EXPORT_SYMBOL_GPL(arch_make_page_accessible
);
329 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
330 static ssize_t
uv_query_facilities(struct kobject
*kobj
,
331 struct kobj_attribute
*attr
, char *page
)
333 return snprintf(page
, PAGE_SIZE
, "%lx\n%lx\n%lx\n%lx\n",
334 uv_info
.inst_calls_list
[0],
335 uv_info
.inst_calls_list
[1],
336 uv_info
.inst_calls_list
[2],
337 uv_info
.inst_calls_list
[3]);
340 static struct kobj_attribute uv_query_facilities_attr
=
341 __ATTR(facilities
, 0444, uv_query_facilities
, NULL
);
343 static ssize_t
uv_query_max_guest_cpus(struct kobject
*kobj
,
344 struct kobj_attribute
*attr
, char *page
)
346 return snprintf(page
, PAGE_SIZE
, "%d\n",
347 uv_info
.max_guest_cpus
);
350 static struct kobj_attribute uv_query_max_guest_cpus_attr
=
351 __ATTR(max_cpus
, 0444, uv_query_max_guest_cpus
, NULL
);
353 static ssize_t
uv_query_max_guest_vms(struct kobject
*kobj
,
354 struct kobj_attribute
*attr
, char *page
)
356 return snprintf(page
, PAGE_SIZE
, "%d\n",
357 uv_info
.max_num_sec_conf
);
360 static struct kobj_attribute uv_query_max_guest_vms_attr
=
361 __ATTR(max_guests
, 0444, uv_query_max_guest_vms
, NULL
);
363 static ssize_t
uv_query_max_guest_addr(struct kobject
*kobj
,
364 struct kobj_attribute
*attr
, char *page
)
366 return snprintf(page
, PAGE_SIZE
, "%lx\n",
367 uv_info
.max_sec_stor_addr
);
370 static struct kobj_attribute uv_query_max_guest_addr_attr
=
371 __ATTR(max_address
, 0444, uv_query_max_guest_addr
, NULL
);
373 static struct attribute
*uv_query_attrs
[] = {
374 &uv_query_facilities_attr
.attr
,
375 &uv_query_max_guest_cpus_attr
.attr
,
376 &uv_query_max_guest_vms_attr
.attr
,
377 &uv_query_max_guest_addr_attr
.attr
,
381 static struct attribute_group uv_query_attr_group
= {
382 .attrs
= uv_query_attrs
,
385 static struct kset
*uv_query_kset
;
386 static struct kobject
*uv_kobj
;
388 static int __init
uv_info_init(void)
392 if (!test_facility(158))
395 uv_kobj
= kobject_create_and_add("uv", firmware_kobj
);
399 uv_query_kset
= kset_create_and_add("query", NULL
, uv_kobj
);
403 rc
= sysfs_create_group(&uv_query_kset
->kobj
, &uv_query_attr_group
);
407 kset_unregister(uv_query_kset
);
409 kobject_del(uv_kobj
);
410 kobject_put(uv_kobj
);
413 device_initcall(uv_info_init
);