treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / xen / mem-reservation.c
blob3782cf070338e3fa5f830184a784dd9e2d0c666a
1 // SPDX-License-Identifier: GPL-2.0
3 /******************************************************************************
4 * Xen memory reservation utilities.
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
9 * Copyright (c) 2010 Daniel Kiper
10 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
13 #include <asm/xen/hypercall.h>
15 #include <xen/interface/memory.h>
16 #include <xen/mem-reservation.h>
17 #include <linux/moduleparam.h>
19 bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
20 core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
23 * Use one extent per PAGE_SIZE to avoid to break down the page into
24 * multiple frame.
26 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
28 #ifdef CONFIG_XEN_HAVE_PVMMU
29 void __xenmem_reservation_va_mapping_update(unsigned long count,
30 struct page **pages,
31 xen_pfn_t *frames)
33 int i;
35 for (i = 0; i < count; i++) {
36 struct page *page = pages[i];
37 unsigned long pfn = page_to_pfn(page);
39 BUG_ON(!page);
42 * We don't support PV MMU when Linux and Xen is using
43 * different page granularity.
45 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
47 set_phys_to_machine(pfn, frames[i]);
49 /* Link back into the page tables if not highmem. */
50 if (!PageHighMem(page)) {
51 int ret;
53 ret = HYPERVISOR_update_va_mapping(
54 (unsigned long)__va(pfn << PAGE_SHIFT),
55 mfn_pte(frames[i], PAGE_KERNEL),
56 0);
57 BUG_ON(ret);
61 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);
63 void __xenmem_reservation_va_mapping_reset(unsigned long count,
64 struct page **pages)
66 int i;
68 for (i = 0; i < count; i++) {
69 struct page *page = pages[i];
70 unsigned long pfn = page_to_pfn(page);
73 * We don't support PV MMU when Linux and Xen are using
74 * different page granularity.
76 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
78 if (!PageHighMem(page)) {
79 int ret;
81 ret = HYPERVISOR_update_va_mapping(
82 (unsigned long)__va(pfn << PAGE_SHIFT),
83 __pte_ma(0), 0);
84 BUG_ON(ret);
86 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
89 EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
90 #endif /* CONFIG_XEN_HAVE_PVMMU */
92 /* @frames is an array of PFNs */
93 int xenmem_reservation_increase(int count, xen_pfn_t *frames)
95 struct xen_memory_reservation reservation = {
96 .address_bits = 0,
97 .extent_order = EXTENT_ORDER,
98 .domid = DOMID_SELF
101 /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
102 set_xen_guest_handle(reservation.extent_start, frames);
103 reservation.nr_extents = count;
104 return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
106 EXPORT_SYMBOL_GPL(xenmem_reservation_increase);
108 /* @frames is an array of GFNs */
109 int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
111 struct xen_memory_reservation reservation = {
112 .address_bits = 0,
113 .extent_order = EXTENT_ORDER,
114 .domid = DOMID_SELF
117 /* XENMEM_decrease_reservation requires a GFN */
118 set_xen_guest_handle(reservation.extent_start, frames);
119 reservation.nr_extents = count;
120 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
122 EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);