1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest
);
26 struct uv_info
__bootdata_preserved(uv_info
);
28 #if IS_ENABLED(CONFIG_KVM)
30 EXPORT_SYMBOL(prot_virt_host
);
31 EXPORT_SYMBOL(uv_info
);
33 static int __init
prot_virt_setup(char *val
)
38 rc
= kstrtobool(val
, &enabled
);
42 if (is_prot_virt_guest() && prot_virt_host
) {
44 pr_warn("Protected virtualization not available in protected guests.");
47 if (prot_virt_host
&& !test_facility(158)) {
49 pr_warn("Protected virtualization not supported by the hardware.");
54 early_param("prot_virt", prot_virt_setup
);
56 static int __init
uv_init(unsigned long stor_base
, unsigned long stor_len
)
58 struct uv_cb_init uvcb
= {
59 .header
.cmd
= UVC_CMD_INIT_UV
,
60 .header
.len
= sizeof(uvcb
),
61 .stor_origin
= stor_base
,
65 if (uv_call(0, (uint64_t)&uvcb
)) {
66 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
67 uvcb
.header
.rc
, uvcb
.header
.rrc
);
73 void __init
setup_uv(void)
75 unsigned long uv_stor_base
;
77 uv_stor_base
= (unsigned long)memblock_alloc_try_nid(
78 uv_info
.uv_base_stor_len
, SZ_1M
, SZ_2G
,
79 MEMBLOCK_ALLOC_ACCESSIBLE
, NUMA_NO_NODE
);
81 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
82 uv_info
.uv_base_stor_len
);
86 if (uv_init(uv_stor_base
, uv_info
.uv_base_stor_len
)) {
87 memblock_free(uv_stor_base
, uv_info
.uv_base_stor_len
);
91 pr_info("Reserving %luMB as ultravisor base storage\n",
92 uv_info
.uv_base_stor_len
>> 20);
95 pr_info("Disabling support for protected virtualization");
99 void adjust_to_uv_max(unsigned long *vmax
)
101 *vmax
= min_t(unsigned long, *vmax
, uv_info
.max_sec_stor_addr
);
105 * Requests the Ultravisor to pin the page in the shared state. This will
106 * cause an intercept when the guest attempts to unshare the pinned page.
108 static int uv_pin_shared(unsigned long paddr
)
110 struct uv_cb_cfs uvcb
= {
111 .header
.cmd
= UVC_CMD_PIN_PAGE_SHARED
,
112 .header
.len
= sizeof(uvcb
),
116 if (uv_call(0, (u64
)&uvcb
))
122 * Requests the Ultravisor to encrypt a guest page and make it
123 * accessible to the host for paging (export).
125 * @paddr: Absolute host address of page to be exported
127 int uv_convert_from_secure(unsigned long paddr
)
129 struct uv_cb_cfs uvcb
= {
130 .header
.cmd
= UVC_CMD_CONV_FROM_SEC_STOR
,
131 .header
.len
= sizeof(uvcb
),
135 if (uv_call(0, (u64
)&uvcb
))
141 * Calculate the expected ref_count for a page that would otherwise have no
142 * further pins. This was cribbed from similar functions in other places in
143 * the kernel, but with some slight modifications. We know that a secure
144 * page can not be a huge page for example.
146 static int expected_page_refs(struct page
*page
)
150 res
= page_mapcount(page
);
151 if (PageSwapCache(page
)) {
153 } else if (page_mapping(page
)) {
155 if (page_has_private(page
))
161 static int make_secure_pte(pte_t
*ptep
, unsigned long addr
,
162 struct page
*exp_page
, struct uv_cb_header
*uvcb
)
164 pte_t entry
= READ_ONCE(*ptep
);
166 int expected
, rc
= 0;
168 if (!pte_present(entry
))
170 if (pte_val(entry
) & _PAGE_INVALID
)
173 page
= pte_page(entry
);
174 if (page
!= exp_page
)
176 if (PageWriteback(page
))
178 expected
= expected_page_refs(page
);
179 if (!page_ref_freeze(page
, expected
))
181 set_bit(PG_arch_1
, &page
->flags
);
182 rc
= uv_call(0, (u64
)uvcb
);
183 page_ref_unfreeze(page
, expected
);
184 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
186 rc
= uvcb
->rc
== 0x10a ? -ENXIO
: -EINVAL
;
191 * Requests the Ultravisor to make a page accessible to a guest.
192 * If it's brought in the first time, it will be cleared. If
193 * it has been exported before, it will be decrypted and integrity
196 int gmap_make_secure(struct gmap
*gmap
, unsigned long gaddr
, void *uvcb
)
198 struct vm_area_struct
*vma
;
199 bool local_drain
= false;
208 down_read(&gmap
->mm
->mmap_sem
);
210 uaddr
= __gmap_translate(gmap
, gaddr
);
211 if (IS_ERR_VALUE(uaddr
))
213 vma
= find_vma(gmap
->mm
, uaddr
);
217 * Secure pages cannot be huge and userspace should not combine both.
218 * In case userspace does it anyway this will result in an -EFAULT for
219 * the unpack. The guest is thus never reaching secure mode. If
220 * userspace is playing dirty tricky with mapping huge pages later
221 * on this will result in a segmentation fault.
223 if (is_vm_hugetlb_page(vma
))
227 page
= follow_page(vma
, uaddr
, FOLL_WRITE
);
228 if (IS_ERR_OR_NULL(page
))
232 ptep
= get_locked_pte(gmap
->mm
, uaddr
, &ptelock
);
233 rc
= make_secure_pte(ptep
, uaddr
, page
, uvcb
);
234 pte_unmap_unlock(ptep
, ptelock
);
237 up_read(&gmap
->mm
->mmap_sem
);
240 wait_on_page_writeback(page
);
241 } else if (rc
== -EBUSY
) {
243 * If we have tried a local drain and the page refcount
244 * still does not match our expected safe value, try with a
245 * system wide drain. This is needed if the pagevecs holding
246 * the page are on a different CPU.
250 /* We give up here, and let the caller try again */
254 * We are here if the page refcount does not match the
255 * expected safe value. The main culprits are usually
256 * pagevecs. With lru_add_drain() we drain the pagevecs
257 * on the local CPU so that hopefully the refcount will
258 * reach the expected safe value.
262 /* And now we try again immediately after draining */
264 } else if (rc
== -ENXIO
) {
265 if (gmap_fault(gmap
, gaddr
, FAULT_FLAG_WRITE
))
271 EXPORT_SYMBOL_GPL(gmap_make_secure
);
273 int gmap_convert_to_secure(struct gmap
*gmap
, unsigned long gaddr
)
275 struct uv_cb_cts uvcb
= {
276 .header
.cmd
= UVC_CMD_CONV_TO_SEC_STOR
,
277 .header
.len
= sizeof(uvcb
),
278 .guest_handle
= gmap
->guest_handle
,
282 return gmap_make_secure(gmap
, gaddr
, &uvcb
);
284 EXPORT_SYMBOL_GPL(gmap_convert_to_secure
);
287 * To be called with the page locked or with an extra reference! This will
288 * prevent gmap_make_secure from touching the page concurrently. Having 2
289 * parallel make_page_accessible is fine, as the UV calls will become a
290 * no-op if the page is already exported.
292 int arch_make_page_accessible(struct page
*page
)
296 /* Hugepage cannot be protected, so nothing to do */
301 * PG_arch_1 is used in 3 places:
302 * 1. for kernel page tables during early boot
303 * 2. for storage keys of huge pages and KVM
304 * 3. As an indication that this page might be secure. This can
305 * overindicate, e.g. we set the bit before calling
307 * As secure pages are never huge, all 3 variants can co-exists.
309 if (!test_bit(PG_arch_1
, &page
->flags
))
312 rc
= uv_pin_shared(page_to_phys(page
));
314 clear_bit(PG_arch_1
, &page
->flags
);
318 rc
= uv_convert_from_secure(page_to_phys(page
));
320 clear_bit(PG_arch_1
, &page
->flags
);
326 EXPORT_SYMBOL_GPL(arch_make_page_accessible
);
330 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
331 static ssize_t
uv_query_facilities(struct kobject
*kobj
,
332 struct kobj_attribute
*attr
, char *page
)
334 return snprintf(page
, PAGE_SIZE
, "%lx\n%lx\n%lx\n%lx\n",
335 uv_info
.inst_calls_list
[0],
336 uv_info
.inst_calls_list
[1],
337 uv_info
.inst_calls_list
[2],
338 uv_info
.inst_calls_list
[3]);
341 static struct kobj_attribute uv_query_facilities_attr
=
342 __ATTR(facilities
, 0444, uv_query_facilities
, NULL
);
344 static ssize_t
uv_query_max_guest_cpus(struct kobject
*kobj
,
345 struct kobj_attribute
*attr
, char *page
)
347 return snprintf(page
, PAGE_SIZE
, "%d\n",
348 uv_info
.max_guest_cpus
);
351 static struct kobj_attribute uv_query_max_guest_cpus_attr
=
352 __ATTR(max_cpus
, 0444, uv_query_max_guest_cpus
, NULL
);
354 static ssize_t
uv_query_max_guest_vms(struct kobject
*kobj
,
355 struct kobj_attribute
*attr
, char *page
)
357 return snprintf(page
, PAGE_SIZE
, "%d\n",
358 uv_info
.max_num_sec_conf
);
361 static struct kobj_attribute uv_query_max_guest_vms_attr
=
362 __ATTR(max_guests
, 0444, uv_query_max_guest_vms
, NULL
);
364 static ssize_t
uv_query_max_guest_addr(struct kobject
*kobj
,
365 struct kobj_attribute
*attr
, char *page
)
367 return snprintf(page
, PAGE_SIZE
, "%lx\n",
368 uv_info
.max_sec_stor_addr
);
371 static struct kobj_attribute uv_query_max_guest_addr_attr
=
372 __ATTR(max_address
, 0444, uv_query_max_guest_addr
, NULL
);
374 static struct attribute
*uv_query_attrs
[] = {
375 &uv_query_facilities_attr
.attr
,
376 &uv_query_max_guest_cpus_attr
.attr
,
377 &uv_query_max_guest_vms_attr
.attr
,
378 &uv_query_max_guest_addr_attr
.attr
,
382 static struct attribute_group uv_query_attr_group
= {
383 .attrs
= uv_query_attrs
,
386 static struct kset
*uv_query_kset
;
387 static struct kobject
*uv_kobj
;
389 static int __init
uv_info_init(void)
393 if (!test_facility(158))
396 uv_kobj
= kobject_create_and_add("uv", firmware_kobj
);
400 uv_query_kset
= kset_create_and_add("query", NULL
, uv_kobj
);
404 rc
= sysfs_create_group(&uv_query_kset
->kobj
, &uv_query_attr_group
);
408 kset_unregister(uv_query_kset
);
410 kobject_del(uv_kobj
);
411 kobject_put(uv_kobj
);
414 device_initcall(uv_info_init
);