1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/balloon_compaction.c
5 * Common interface for making balloon pages movable by compaction.
7 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/balloon_compaction.h>
15 * balloon_page_alloc - allocates a new page for insertion into the balloon
18 * Driver must call it to properly allocate a new enlisted balloon page.
19 * Driver must call balloon_page_enqueue before definitively removing it from
20 * the guest system. This function returns the page address for the recently
21 * allocated page or NULL in the case we fail to allocate a new page this turn.
23 struct page
*balloon_page_alloc(void)
25 struct page
*page
= alloc_page(balloon_mapping_gfp_mask() |
26 __GFP_NOMEMALLOC
| __GFP_NORETRY
);
29 EXPORT_SYMBOL_GPL(balloon_page_alloc
);
32 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
34 * @b_dev_info: balloon device descriptor where we will insert a new page to
35 * @page: new page to enqueue - allocated using balloon_page_alloc.
37 * Driver must call it to properly enqueue a new allocated balloon page
38 * before definitively removing it from the guest system.
39 * This function returns the page address for the recently enqueued page or
40 * NULL in the case we fail to allocate a new page this turn.
42 void balloon_page_enqueue(struct balloon_dev_info
*b_dev_info
,
48 * Block others from accessing the 'page' when we get around to
49 * establishing additional references. We should be the only one
50 * holding a reference to the 'page' at this point.
52 BUG_ON(!trylock_page(page
));
53 spin_lock_irqsave(&b_dev_info
->pages_lock
, flags
);
54 balloon_page_insert(b_dev_info
, page
);
55 __count_vm_event(BALLOON_INFLATE
);
56 spin_unlock_irqrestore(&b_dev_info
->pages_lock
, flags
);
59 EXPORT_SYMBOL_GPL(balloon_page_enqueue
);
62 * balloon_page_dequeue - removes a page from balloon's page list and returns
63 * the its address to allow the driver release the page.
64 * @b_dev_info: balloon device decriptor where we will grab a page from.
66 * Driver must call it to properly de-allocate a previous enlisted balloon page
67 * before definetively releasing it back to the guest system.
68 * This function returns the page address for the recently dequeued page or
69 * NULL in the case we find balloon's page list temporarily empty due to
70 * compaction isolated pages.
72 struct page
*balloon_page_dequeue(struct balloon_dev_info
*b_dev_info
)
74 struct page
*page
, *tmp
;
78 dequeued_page
= false;
79 spin_lock_irqsave(&b_dev_info
->pages_lock
, flags
);
80 list_for_each_entry_safe(page
, tmp
, &b_dev_info
->pages
, lru
) {
82 * Block others from accessing the 'page' while we get around
83 * establishing additional references and preparing the 'page'
84 * to be released by the balloon driver.
86 if (trylock_page(page
)) {
87 #ifdef CONFIG_BALLOON_COMPACTION
88 if (PageIsolated(page
)) {
89 /* raced with isolation */
94 balloon_page_delete(page
);
95 __count_vm_event(BALLOON_DEFLATE
);
101 spin_unlock_irqrestore(&b_dev_info
->pages_lock
, flags
);
103 if (!dequeued_page
) {
105 * If we are unable to dequeue a balloon page because the page
106 * list is empty and there is no isolated pages, then something
107 * went out of track and some balloon pages are lost.
108 * BUG() here, otherwise the balloon driver may get stuck into
109 * an infinite loop while attempting to release all its pages.
111 spin_lock_irqsave(&b_dev_info
->pages_lock
, flags
);
112 if (unlikely(list_empty(&b_dev_info
->pages
) &&
113 !b_dev_info
->isolated_pages
))
115 spin_unlock_irqrestore(&b_dev_info
->pages_lock
, flags
);
120 EXPORT_SYMBOL_GPL(balloon_page_dequeue
);
122 #ifdef CONFIG_BALLOON_COMPACTION
124 bool balloon_page_isolate(struct page
*page
, isolate_mode_t mode
)
127 struct balloon_dev_info
*b_dev_info
= balloon_page_device(page
);
130 spin_lock_irqsave(&b_dev_info
->pages_lock
, flags
);
131 list_del(&page
->lru
);
132 b_dev_info
->isolated_pages
++;
133 spin_unlock_irqrestore(&b_dev_info
->pages_lock
, flags
);
138 void balloon_page_putback(struct page
*page
)
140 struct balloon_dev_info
*b_dev_info
= balloon_page_device(page
);
143 spin_lock_irqsave(&b_dev_info
->pages_lock
, flags
);
144 list_add(&page
->lru
, &b_dev_info
->pages
);
145 b_dev_info
->isolated_pages
--;
146 spin_unlock_irqrestore(&b_dev_info
->pages_lock
, flags
);
150 /* move_to_new_page() counterpart for a ballooned page */
151 int balloon_page_migrate(struct address_space
*mapping
,
152 struct page
*newpage
, struct page
*page
,
153 enum migrate_mode mode
)
155 struct balloon_dev_info
*balloon
= balloon_page_device(page
);
158 * We can not easily support the no copy case here so ignore it as it
159 * is unlikely to be use with ballon pages. See include/linux/hmm.h for
160 * user of the MIGRATE_SYNC_NO_COPY mode.
162 if (mode
== MIGRATE_SYNC_NO_COPY
)
165 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
166 VM_BUG_ON_PAGE(!PageLocked(newpage
), newpage
);
168 return balloon
->migratepage(balloon
, newpage
, page
, mode
);
171 const struct address_space_operations balloon_aops
= {
172 .migratepage
= balloon_page_migrate
,
173 .isolate_page
= balloon_page_isolate
,
174 .putback_page
= balloon_page_putback
,
176 EXPORT_SYMBOL_GPL(balloon_aops
);
178 #endif /* CONFIG_BALLOON_COMPACTION */