gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / arch / s390 / kernel / uv.c
blobc86d654351d1615f18f62caf4a501e5322513e8a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
6 */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
26 #if IS_ENABLED(CONFIG_KVM)
27 int prot_virt_host;
28 EXPORT_SYMBOL(prot_virt_host);
29 struct uv_info __bootdata_preserved(uv_info);
30 EXPORT_SYMBOL(uv_info);
32 static int __init prot_virt_setup(char *val)
34 bool enabled;
35 int rc;
37 rc = kstrtobool(val, &enabled);
38 if (!rc && enabled)
39 prot_virt_host = 1;
41 if (is_prot_virt_guest() && prot_virt_host) {
42 prot_virt_host = 0;
43 pr_warn("Protected virtualization not available in protected guests.");
46 if (prot_virt_host && !test_facility(158)) {
47 prot_virt_host = 0;
48 pr_warn("Protected virtualization not supported by the hardware.");
51 return rc;
53 early_param("prot_virt", prot_virt_setup);
55 static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
57 struct uv_cb_init uvcb = {
58 .header.cmd = UVC_CMD_INIT_UV,
59 .header.len = sizeof(uvcb),
60 .stor_origin = stor_base,
61 .stor_len = stor_len,
64 if (uv_call(0, (uint64_t)&uvcb)) {
65 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
66 uvcb.header.rc, uvcb.header.rrc);
67 return -1;
69 return 0;
72 void __init setup_uv(void)
74 unsigned long uv_stor_base;
76 uv_stor_base = (unsigned long)memblock_alloc_try_nid(
77 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
78 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
79 if (!uv_stor_base) {
80 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
81 uv_info.uv_base_stor_len);
82 goto fail;
85 if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
86 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
87 goto fail;
90 pr_info("Reserving %luMB as ultravisor base storage\n",
91 uv_info.uv_base_stor_len >> 20);
92 return;
93 fail:
94 pr_info("Disabling support for protected virtualization");
95 prot_virt_host = 0;
98 void adjust_to_uv_max(unsigned long *vmax)
100 *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
104 * Requests the Ultravisor to pin the page in the shared state. This will
105 * cause an intercept when the guest attempts to unshare the pinned page.
107 static int uv_pin_shared(unsigned long paddr)
109 struct uv_cb_cfs uvcb = {
110 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
111 .header.len = sizeof(uvcb),
112 .paddr = paddr,
115 if (uv_call(0, (u64)&uvcb))
116 return -EINVAL;
117 return 0;
121 * Requests the Ultravisor to encrypt a guest page and make it
122 * accessible to the host for paging (export).
124 * @paddr: Absolute host address of page to be exported
126 int uv_convert_from_secure(unsigned long paddr)
128 struct uv_cb_cfs uvcb = {
129 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
130 .header.len = sizeof(uvcb),
131 .paddr = paddr
134 if (uv_call(0, (u64)&uvcb))
135 return -EINVAL;
136 return 0;
140 * Calculate the expected ref_count for a page that would otherwise have no
141 * further pins. This was cribbed from similar functions in other places in
142 * the kernel, but with some slight modifications. We know that a secure
143 * page can not be a huge page for example.
145 static int expected_page_refs(struct page *page)
147 int res;
149 res = page_mapcount(page);
150 if (PageSwapCache(page)) {
151 res++;
152 } else if (page_mapping(page)) {
153 res++;
154 if (page_has_private(page))
155 res++;
157 return res;
160 static int make_secure_pte(pte_t *ptep, unsigned long addr,
161 struct page *exp_page, struct uv_cb_header *uvcb)
163 pte_t entry = READ_ONCE(*ptep);
164 struct page *page;
165 int expected, rc = 0;
167 if (!pte_present(entry))
168 return -ENXIO;
169 if (pte_val(entry) & _PAGE_INVALID)
170 return -ENXIO;
172 page = pte_page(entry);
173 if (page != exp_page)
174 return -ENXIO;
175 if (PageWriteback(page))
176 return -EAGAIN;
177 expected = expected_page_refs(page);
178 if (!page_ref_freeze(page, expected))
179 return -EBUSY;
180 set_bit(PG_arch_1, &page->flags);
181 rc = uv_call(0, (u64)uvcb);
182 page_ref_unfreeze(page, expected);
183 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
184 if (rc)
185 rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
186 return rc;
190 * Requests the Ultravisor to make a page accessible to a guest.
191 * If it's brought in the first time, it will be cleared. If
192 * it has been exported before, it will be decrypted and integrity
193 * checked.
195 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
197 struct vm_area_struct *vma;
198 bool local_drain = false;
199 spinlock_t *ptelock;
200 unsigned long uaddr;
201 struct page *page;
202 pte_t *ptep;
203 int rc;
205 again:
206 rc = -EFAULT;
207 down_read(&gmap->mm->mmap_sem);
209 uaddr = __gmap_translate(gmap, gaddr);
210 if (IS_ERR_VALUE(uaddr))
211 goto out;
212 vma = find_vma(gmap->mm, uaddr);
213 if (!vma)
214 goto out;
216 * Secure pages cannot be huge and userspace should not combine both.
217 * In case userspace does it anyway this will result in an -EFAULT for
218 * the unpack. The guest is thus never reaching secure mode. If
219 * userspace is playing dirty tricky with mapping huge pages later
220 * on this will result in a segmentation fault.
222 if (is_vm_hugetlb_page(vma))
223 goto out;
225 rc = -ENXIO;
226 page = follow_page(vma, uaddr, FOLL_WRITE);
227 if (IS_ERR_OR_NULL(page))
228 goto out;
230 lock_page(page);
231 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
232 rc = make_secure_pte(ptep, uaddr, page, uvcb);
233 pte_unmap_unlock(ptep, ptelock);
234 unlock_page(page);
235 out:
236 up_read(&gmap->mm->mmap_sem);
238 if (rc == -EAGAIN) {
239 wait_on_page_writeback(page);
240 } else if (rc == -EBUSY) {
242 * If we have tried a local drain and the page refcount
243 * still does not match our expected safe value, try with a
244 * system wide drain. This is needed if the pagevecs holding
245 * the page are on a different CPU.
247 if (local_drain) {
248 lru_add_drain_all();
249 /* We give up here, and let the caller try again */
250 return -EAGAIN;
253 * We are here if the page refcount does not match the
254 * expected safe value. The main culprits are usually
255 * pagevecs. With lru_add_drain() we drain the pagevecs
256 * on the local CPU so that hopefully the refcount will
257 * reach the expected safe value.
259 lru_add_drain();
260 local_drain = true;
261 /* And now we try again immediately after draining */
262 goto again;
263 } else if (rc == -ENXIO) {
264 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
265 return -EFAULT;
266 return -EAGAIN;
268 return rc;
270 EXPORT_SYMBOL_GPL(gmap_make_secure);
272 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
274 struct uv_cb_cts uvcb = {
275 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
276 .header.len = sizeof(uvcb),
277 .guest_handle = gmap->guest_handle,
278 .gaddr = gaddr,
281 return gmap_make_secure(gmap, gaddr, &uvcb);
283 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
286 * To be called with the page locked or with an extra reference! This will
287 * prevent gmap_make_secure from touching the page concurrently. Having 2
288 * parallel make_page_accessible is fine, as the UV calls will become a
289 * no-op if the page is already exported.
291 int arch_make_page_accessible(struct page *page)
293 int rc = 0;
295 /* Hugepage cannot be protected, so nothing to do */
296 if (PageHuge(page))
297 return 0;
300 * PG_arch_1 is used in 3 places:
301 * 1. for kernel page tables during early boot
302 * 2. for storage keys of huge pages and KVM
303 * 3. As an indication that this page might be secure. This can
304 * overindicate, e.g. we set the bit before calling
305 * convert_to_secure.
306 * As secure pages are never huge, all 3 variants can co-exists.
308 if (!test_bit(PG_arch_1, &page->flags))
309 return 0;
311 rc = uv_pin_shared(page_to_phys(page));
312 if (!rc) {
313 clear_bit(PG_arch_1, &page->flags);
314 return 0;
317 rc = uv_convert_from_secure(page_to_phys(page));
318 if (!rc) {
319 clear_bit(PG_arch_1, &page->flags);
320 return 0;
323 return rc;
325 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
327 #endif
329 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
330 static ssize_t uv_query_facilities(struct kobject *kobj,
331 struct kobj_attribute *attr, char *page)
333 return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
334 uv_info.inst_calls_list[0],
335 uv_info.inst_calls_list[1],
336 uv_info.inst_calls_list[2],
337 uv_info.inst_calls_list[3]);
340 static struct kobj_attribute uv_query_facilities_attr =
341 __ATTR(facilities, 0444, uv_query_facilities, NULL);
343 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
344 struct kobj_attribute *attr, char *page)
346 return snprintf(page, PAGE_SIZE, "%d\n",
347 uv_info.max_guest_cpus);
350 static struct kobj_attribute uv_query_max_guest_cpus_attr =
351 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
353 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
354 struct kobj_attribute *attr, char *page)
356 return snprintf(page, PAGE_SIZE, "%d\n",
357 uv_info.max_num_sec_conf);
360 static struct kobj_attribute uv_query_max_guest_vms_attr =
361 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
363 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
364 struct kobj_attribute *attr, char *page)
366 return snprintf(page, PAGE_SIZE, "%lx\n",
367 uv_info.max_sec_stor_addr);
370 static struct kobj_attribute uv_query_max_guest_addr_attr =
371 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
373 static struct attribute *uv_query_attrs[] = {
374 &uv_query_facilities_attr.attr,
375 &uv_query_max_guest_cpus_attr.attr,
376 &uv_query_max_guest_vms_attr.attr,
377 &uv_query_max_guest_addr_attr.attr,
378 NULL,
381 static struct attribute_group uv_query_attr_group = {
382 .attrs = uv_query_attrs,
385 static struct kset *uv_query_kset;
386 static struct kobject *uv_kobj;
388 static int __init uv_info_init(void)
390 int rc = -ENOMEM;
392 if (!test_facility(158))
393 return 0;
395 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
396 if (!uv_kobj)
397 return -ENOMEM;
399 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
400 if (!uv_query_kset)
401 goto out_kobj;
403 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
404 if (!rc)
405 return 0;
407 kset_unregister(uv_query_kset);
408 out_kobj:
409 kobject_del(uv_kobj);
410 kobject_put(uv_kobj);
411 return rc;
413 device_initcall(uv_info_init);
414 #endif