1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
4 #include <linux/kernel.h>
6 #include <linux/memremap.h>
7 #include <linux/slab.h>
14 static DEFINE_MUTEX(list_lock
);
15 static struct page
*page_list
;
16 static unsigned int list_count
;
18 static int fill_list(unsigned int nr_pages
)
20 struct dev_pagemap
*pgmap
;
23 unsigned int i
, alloc_pages
= round_up(nr_pages
, PAGES_PER_SECTION
);
26 res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
30 res
->name
= "Xen scratch";
31 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
33 ret
= allocate_resource(&iomem_resource
, res
,
34 alloc_pages
* PAGE_SIZE
, 0, -1,
35 PAGES_PER_SECTION
* PAGE_SIZE
, NULL
, NULL
);
37 pr_err("Cannot allocate new IOMEM resource\n");
41 pgmap
= kzalloc(sizeof(*pgmap
), GFP_KERNEL
);
45 pgmap
->type
= MEMORY_DEVICE_GENERIC
;
46 pgmap
->range
= (struct range
) {
53 #ifdef CONFIG_XEN_HAVE_PVMMU
55 * memremap will build page tables for the new memory so
56 * the p2m must contain invalid entries so the correct
57 * non-present PTEs will be written.
59 * If a failure occurs, the original (identity) p2m entries
60 * are not restored since this region is now known not to
61 * conflict with any devices.
63 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
64 xen_pfn_t pfn
= PFN_DOWN(res
->start
);
66 for (i
= 0; i
< alloc_pages
; i
++) {
67 if (!set_phys_to_machine(pfn
+ i
, INVALID_P2M_ENTRY
)) {
68 pr_warn("set_phys_to_machine() failed, no memory added\n");
76 vaddr
= memremap_pages(pgmap
, NUMA_NO_NODE
);
78 pr_err("Cannot remap memory range\n");
83 for (i
= 0; i
< alloc_pages
; i
++) {
84 struct page
*pg
= virt_to_page(vaddr
+ PAGE_SIZE
* i
);
86 BUG_ON(!virt_addr_valid(vaddr
+ PAGE_SIZE
* i
));
87 pg
->zone_device_data
= page_list
;
97 release_resource(res
);
104 * xen_alloc_unpopulated_pages - alloc unpopulated pages
105 * @nr_pages: Number of pages
106 * @pages: pages returned
107 * @return 0 on success, error otherwise
109 int xen_alloc_unpopulated_pages(unsigned int nr_pages
, struct page
**pages
)
114 mutex_lock(&list_lock
);
115 if (list_count
< nr_pages
) {
116 ret
= fill_list(nr_pages
- list_count
);
121 for (i
= 0; i
< nr_pages
; i
++) {
122 struct page
*pg
= page_list
;
125 page_list
= pg
->zone_device_data
;
129 #ifdef CONFIG_XEN_HAVE_PVMMU
130 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
131 ret
= xen_alloc_p2m_entry(page_to_pfn(pg
));
135 for (j
= 0; j
<= i
; j
++) {
136 pages
[j
]->zone_device_data
= page_list
;
137 page_list
= pages
[j
];
147 mutex_unlock(&list_lock
);
150 EXPORT_SYMBOL(xen_alloc_unpopulated_pages
);
153 * xen_free_unpopulated_pages - return unpopulated pages
154 * @nr_pages: Number of pages
155 * @pages: pages to return
157 void xen_free_unpopulated_pages(unsigned int nr_pages
, struct page
**pages
)
161 mutex_lock(&list_lock
);
162 for (i
= 0; i
< nr_pages
; i
++) {
163 pages
[i
]->zone_device_data
= page_list
;
164 page_list
= pages
[i
];
167 mutex_unlock(&list_lock
);
169 EXPORT_SYMBOL(xen_free_unpopulated_pages
);
172 static int __init
init(void)
179 if (!xen_pv_domain())
183 * Initialize with pages from the extra memory regions (see
184 * arch/x86/xen/setup.c).
186 for (i
= 0; i
< XEN_EXTRA_MEM_MAX_REGIONS
; i
++) {
189 for (j
= 0; j
< xen_extra_mem
[i
].n_pfns
; j
++) {
191 pfn_to_page(xen_extra_mem
[i
].start_pfn
+ j
);
193 pg
->zone_device_data
= page_list
;
201 subsys_initcall(init
);