1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest
);
26 struct uv_info
__bootdata_preserved(uv_info
);
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host
);
30 EXPORT_SYMBOL(prot_virt_host
);
31 EXPORT_SYMBOL(uv_info
);
33 static int __init
uv_init(unsigned long stor_base
, unsigned long stor_len
)
35 struct uv_cb_init uvcb
= {
36 .header
.cmd
= UVC_CMD_INIT_UV
,
37 .header
.len
= sizeof(uvcb
),
38 .stor_origin
= stor_base
,
42 if (uv_call(0, (uint64_t)&uvcb
)) {
43 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 uvcb
.header
.rc
, uvcb
.header
.rrc
);
50 void __init
setup_uv(void)
52 unsigned long uv_stor_base
;
55 * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
57 if (!is_prot_virt_host())
60 if (is_prot_virt_guest()) {
62 pr_warn("Protected virtualization not available in protected guests.");
66 if (!test_facility(158)) {
68 pr_warn("Protected virtualization not supported by the hardware.");
72 uv_stor_base
= (unsigned long)memblock_alloc_try_nid(
73 uv_info
.uv_base_stor_len
, SZ_1M
, SZ_2G
,
74 MEMBLOCK_ALLOC_ACCESSIBLE
, NUMA_NO_NODE
);
76 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
77 uv_info
.uv_base_stor_len
);
81 if (uv_init(uv_stor_base
, uv_info
.uv_base_stor_len
)) {
82 memblock_free(uv_stor_base
, uv_info
.uv_base_stor_len
);
86 pr_info("Reserving %luMB as ultravisor base storage\n",
87 uv_info
.uv_base_stor_len
>> 20);
90 pr_info("Disabling support for protected virtualization");
94 void adjust_to_uv_max(unsigned long *vmax
)
96 if (uv_info
.max_sec_stor_addr
)
97 *vmax
= min_t(unsigned long, *vmax
, uv_info
.max_sec_stor_addr
);
101 * Requests the Ultravisor to pin the page in the shared state. This will
102 * cause an intercept when the guest attempts to unshare the pinned page.
104 static int uv_pin_shared(unsigned long paddr
)
106 struct uv_cb_cfs uvcb
= {
107 .header
.cmd
= UVC_CMD_PIN_PAGE_SHARED
,
108 .header
.len
= sizeof(uvcb
),
112 if (uv_call(0, (u64
)&uvcb
))
118 * Requests the Ultravisor to destroy a guest page and make it
119 * accessible to the host. The destroy clears the page instead of
122 * @paddr: Absolute host address of page to be destroyed
124 int uv_destroy_page(unsigned long paddr
)
126 struct uv_cb_cfs uvcb
= {
127 .header
.cmd
= UVC_CMD_DESTR_SEC_STOR
,
128 .header
.len
= sizeof(uvcb
),
132 if (uv_call(0, (u64
)&uvcb
)) {
134 * Older firmware uses 107/d as an indication of a non secure
135 * page. Let us emulate the newer variant (no-op).
137 if (uvcb
.header
.rc
== 0x107 && uvcb
.header
.rrc
== 0xd)
145 * Requests the Ultravisor to encrypt a guest page and make it
146 * accessible to the host for paging (export).
148 * @paddr: Absolute host address of page to be exported
150 int uv_convert_from_secure(unsigned long paddr
)
152 struct uv_cb_cfs uvcb
= {
153 .header
.cmd
= UVC_CMD_CONV_FROM_SEC_STOR
,
154 .header
.len
= sizeof(uvcb
),
158 if (uv_call(0, (u64
)&uvcb
))
164 * Calculate the expected ref_count for a page that would otherwise have no
165 * further pins. This was cribbed from similar functions in other places in
166 * the kernel, but with some slight modifications. We know that a secure
167 * page can not be a huge page for example.
169 static int expected_page_refs(struct page
*page
)
173 res
= page_mapcount(page
);
174 if (PageSwapCache(page
)) {
176 } else if (page_mapping(page
)) {
178 if (page_has_private(page
))
184 static int make_secure_pte(pte_t
*ptep
, unsigned long addr
,
185 struct page
*exp_page
, struct uv_cb_header
*uvcb
)
187 pte_t entry
= READ_ONCE(*ptep
);
189 int expected
, rc
= 0;
191 if (!pte_present(entry
))
193 if (pte_val(entry
) & _PAGE_INVALID
)
196 page
= pte_page(entry
);
197 if (page
!= exp_page
)
199 if (PageWriteback(page
))
201 expected
= expected_page_refs(page
);
202 if (!page_ref_freeze(page
, expected
))
204 set_bit(PG_arch_1
, &page
->flags
);
205 rc
= uv_call(0, (u64
)uvcb
);
206 page_ref_unfreeze(page
, expected
);
207 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
209 rc
= uvcb
->rc
== 0x10a ? -ENXIO
: -EINVAL
;
214 * Requests the Ultravisor to make a page accessible to a guest.
215 * If it's brought in the first time, it will be cleared. If
216 * it has been exported before, it will be decrypted and integrity
219 int gmap_make_secure(struct gmap
*gmap
, unsigned long gaddr
, void *uvcb
)
221 struct vm_area_struct
*vma
;
222 bool local_drain
= false;
231 mmap_read_lock(gmap
->mm
);
233 uaddr
= __gmap_translate(gmap
, gaddr
);
234 if (IS_ERR_VALUE(uaddr
))
236 vma
= find_vma(gmap
->mm
, uaddr
);
240 * Secure pages cannot be huge and userspace should not combine both.
241 * In case userspace does it anyway this will result in an -EFAULT for
242 * the unpack. The guest is thus never reaching secure mode. If
243 * userspace is playing dirty tricky with mapping huge pages later
244 * on this will result in a segmentation fault.
246 if (is_vm_hugetlb_page(vma
))
250 page
= follow_page(vma
, uaddr
, FOLL_WRITE
);
251 if (IS_ERR_OR_NULL(page
))
255 ptep
= get_locked_pte(gmap
->mm
, uaddr
, &ptelock
);
256 rc
= make_secure_pte(ptep
, uaddr
, page
, uvcb
);
257 pte_unmap_unlock(ptep
, ptelock
);
260 mmap_read_unlock(gmap
->mm
);
263 wait_on_page_writeback(page
);
264 } else if (rc
== -EBUSY
) {
266 * If we have tried a local drain and the page refcount
267 * still does not match our expected safe value, try with a
268 * system wide drain. This is needed if the pagevecs holding
269 * the page are on a different CPU.
273 /* We give up here, and let the caller try again */
277 * We are here if the page refcount does not match the
278 * expected safe value. The main culprits are usually
279 * pagevecs. With lru_add_drain() we drain the pagevecs
280 * on the local CPU so that hopefully the refcount will
281 * reach the expected safe value.
285 /* And now we try again immediately after draining */
287 } else if (rc
== -ENXIO
) {
288 if (gmap_fault(gmap
, gaddr
, FAULT_FLAG_WRITE
))
294 EXPORT_SYMBOL_GPL(gmap_make_secure
);
296 int gmap_convert_to_secure(struct gmap
*gmap
, unsigned long gaddr
)
298 struct uv_cb_cts uvcb
= {
299 .header
.cmd
= UVC_CMD_CONV_TO_SEC_STOR
,
300 .header
.len
= sizeof(uvcb
),
301 .guest_handle
= gmap
->guest_handle
,
305 return gmap_make_secure(gmap
, gaddr
, &uvcb
);
307 EXPORT_SYMBOL_GPL(gmap_convert_to_secure
);
310 * To be called with the page locked or with an extra reference! This will
311 * prevent gmap_make_secure from touching the page concurrently. Having 2
312 * parallel make_page_accessible is fine, as the UV calls will become a
313 * no-op if the page is already exported.
315 int arch_make_page_accessible(struct page
*page
)
319 /* Hugepage cannot be protected, so nothing to do */
324 * PG_arch_1 is used in 3 places:
325 * 1. for kernel page tables during early boot
326 * 2. for storage keys of huge pages and KVM
327 * 3. As an indication that this page might be secure. This can
328 * overindicate, e.g. we set the bit before calling
330 * As secure pages are never huge, all 3 variants can co-exists.
332 if (!test_bit(PG_arch_1
, &page
->flags
))
335 rc
= uv_pin_shared(page_to_phys(page
));
337 clear_bit(PG_arch_1
, &page
->flags
);
341 rc
= uv_convert_from_secure(page_to_phys(page
));
343 clear_bit(PG_arch_1
, &page
->flags
);
349 EXPORT_SYMBOL_GPL(arch_make_page_accessible
);
353 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
354 static ssize_t
uv_query_facilities(struct kobject
*kobj
,
355 struct kobj_attribute
*attr
, char *page
)
357 return scnprintf(page
, PAGE_SIZE
, "%lx\n%lx\n%lx\n%lx\n",
358 uv_info
.inst_calls_list
[0],
359 uv_info
.inst_calls_list
[1],
360 uv_info
.inst_calls_list
[2],
361 uv_info
.inst_calls_list
[3]);
364 static struct kobj_attribute uv_query_facilities_attr
=
365 __ATTR(facilities
, 0444, uv_query_facilities
, NULL
);
367 static ssize_t
uv_query_max_guest_cpus(struct kobject
*kobj
,
368 struct kobj_attribute
*attr
, char *page
)
370 return scnprintf(page
, PAGE_SIZE
, "%d\n",
371 uv_info
.max_guest_cpus
);
374 static struct kobj_attribute uv_query_max_guest_cpus_attr
=
375 __ATTR(max_cpus
, 0444, uv_query_max_guest_cpus
, NULL
);
377 static ssize_t
uv_query_max_guest_vms(struct kobject
*kobj
,
378 struct kobj_attribute
*attr
, char *page
)
380 return scnprintf(page
, PAGE_SIZE
, "%d\n",
381 uv_info
.max_num_sec_conf
);
384 static struct kobj_attribute uv_query_max_guest_vms_attr
=
385 __ATTR(max_guests
, 0444, uv_query_max_guest_vms
, NULL
);
387 static ssize_t
uv_query_max_guest_addr(struct kobject
*kobj
,
388 struct kobj_attribute
*attr
, char *page
)
390 return scnprintf(page
, PAGE_SIZE
, "%lx\n",
391 uv_info
.max_sec_stor_addr
);
394 static struct kobj_attribute uv_query_max_guest_addr_attr
=
395 __ATTR(max_address
, 0444, uv_query_max_guest_addr
, NULL
);
397 static struct attribute
*uv_query_attrs
[] = {
398 &uv_query_facilities_attr
.attr
,
399 &uv_query_max_guest_cpus_attr
.attr
,
400 &uv_query_max_guest_vms_attr
.attr
,
401 &uv_query_max_guest_addr_attr
.attr
,
405 static struct attribute_group uv_query_attr_group
= {
406 .attrs
= uv_query_attrs
,
409 static struct kset
*uv_query_kset
;
410 static struct kobject
*uv_kobj
;
412 static int __init
uv_info_init(void)
416 if (!test_facility(158))
419 uv_kobj
= kobject_create_and_add("uv", firmware_kobj
);
423 uv_query_kset
= kset_create_and_add("query", NULL
, uv_kobj
);
427 rc
= sysfs_create_group(&uv_query_kset
->kobj
, &uv_query_attr_group
);
431 kset_unregister(uv_query_kset
);
433 kobject_del(uv_kobj
);
434 kobject_put(uv_kobj
);
437 device_initcall(uv_info_init
);