1 // SPDX-License-Identifier: GPL-2.0
3 * VMware Balloon driver.
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/types.h>
20 #include <linux/kernel.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched.h>
24 #include <linux/module.h>
25 #include <linux/workqueue.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/vmw_vmci_defs.h>
29 #include <linux/vmw_vmci_api.h>
30 #include <asm/hypervisor.h>
32 MODULE_AUTHOR("VMware, Inc.");
33 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
34 MODULE_VERSION("1.5.0.0-k");
35 MODULE_ALIAS("dmi:*:svnVMware*:*");
36 MODULE_ALIAS("vmware_vmmemctl");
37 MODULE_LICENSE("GPL");
40 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
41 * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
42 * __GFP_NOWARN, to suppress page allocation failure warnings.
44 #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
47 * Use GFP_HIGHUSER when executing in a separate kernel thread
48 * context and allocation can sleep. This is less stressful to
49 * the guest memory system, since it allows the thread to block
50 * while memory is reclaimed, and won't take pages from emergency
53 #define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
55 /* Maximum number of refused pages we accumulate during inflation cycle */
56 #define VMW_BALLOON_MAX_REFUSED 16
59 * Hypervisor communication port definitions.
61 #define VMW_BALLOON_HV_PORT 0x5670
62 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
63 #define VMW_BALLOON_GUEST_ID 1 /* Linux */
65 enum vmwballoon_capabilities
{
67 * Bit 0 is reserved and not associated to any capability.
69 VMW_BALLOON_BASIC_CMDS
= (1 << 1),
70 VMW_BALLOON_BATCHED_CMDS
= (1 << 2),
71 VMW_BALLOON_BATCHED_2M_CMDS
= (1 << 3),
72 VMW_BALLOON_SIGNALLED_WAKEUP_CMD
= (1 << 4),
75 #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \
76 | VMW_BALLOON_BATCHED_CMDS \
77 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
80 #define VMW_BALLOON_2M_SHIFT (9)
81 #define VMW_BALLOON_NUM_PAGE_SIZES (2)
84 * Backdoor commands availability:
86 * START, GET_TARGET and GUEST_ID are always available,
88 * VMW_BALLOON_BASIC_CMDS:
89 * LOCK and UNLOCK commands,
90 * VMW_BALLOON_BATCHED_CMDS:
91 * BATCHED_LOCK and BATCHED_UNLOCK commands.
92 * VMW BALLOON_BATCHED_2M_CMDS:
93 * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
94 * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
95 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
97 #define VMW_BALLOON_CMD_START 0
98 #define VMW_BALLOON_CMD_GET_TARGET 1
99 #define VMW_BALLOON_CMD_LOCK 2
100 #define VMW_BALLOON_CMD_UNLOCK 3
101 #define VMW_BALLOON_CMD_GUEST_ID 4
102 #define VMW_BALLOON_CMD_BATCHED_LOCK 6
103 #define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
104 #define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
105 #define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
106 #define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
110 #define VMW_BALLOON_SUCCESS 0
111 #define VMW_BALLOON_FAILURE -1
112 #define VMW_BALLOON_ERROR_CMD_INVALID 1
113 #define VMW_BALLOON_ERROR_PPN_INVALID 2
114 #define VMW_BALLOON_ERROR_PPN_LOCKED 3
115 #define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
116 #define VMW_BALLOON_ERROR_PPN_PINNED 5
117 #define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
118 #define VMW_BALLOON_ERROR_RESET 7
119 #define VMW_BALLOON_ERROR_BUSY 8
121 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
123 /* Batch page description */
126 * Layout of a page in the batch page:
128 * +-------------+----------+--------+
130 * | Page number | Reserved | Status |
132 * +-------------+----------+--------+
135 * The reserved field should be set to 0.
137 #define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
138 #define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
139 #define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
141 struct vmballoon_batch_page
{
142 u64 pages
[VMW_BALLOON_BATCH_MAX_PAGES
];
145 static u64
vmballoon_batch_get_pa(struct vmballoon_batch_page
*batch
, int idx
)
147 return batch
->pages
[idx
] & VMW_BALLOON_BATCH_PAGE_MASK
;
150 static int vmballoon_batch_get_status(struct vmballoon_batch_page
*batch
,
153 return (int)(batch
->pages
[idx
] & VMW_BALLOON_BATCH_STATUS_MASK
);
156 static void vmballoon_batch_set_pa(struct vmballoon_batch_page
*batch
, int idx
,
159 batch
->pages
[idx
] = pa
;
163 #define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
165 unsigned long __status, __dummy1, __dummy2, __dummy3; \
166 __asm__ __volatile__ ("inl %%dx" : \
172 "0"(VMW_BALLOON_HV_MAGIC), \
173 "1"(VMW_BALLOON_CMD_##cmd), \
174 "2"(VMW_BALLOON_HV_PORT), \
178 if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
184 #ifdef CONFIG_DEBUG_FS
185 struct vmballoon_stats
{
187 unsigned int doorbell
;
189 /* allocation statistics */
190 unsigned int alloc
[VMW_BALLOON_NUM_PAGE_SIZES
];
191 unsigned int alloc_fail
[VMW_BALLOON_NUM_PAGE_SIZES
];
192 unsigned int sleep_alloc
;
193 unsigned int sleep_alloc_fail
;
194 unsigned int refused_alloc
[VMW_BALLOON_NUM_PAGE_SIZES
];
195 unsigned int refused_free
[VMW_BALLOON_NUM_PAGE_SIZES
];
196 unsigned int free
[VMW_BALLOON_NUM_PAGE_SIZES
];
198 /* monitor operations */
199 unsigned int lock
[VMW_BALLOON_NUM_PAGE_SIZES
];
200 unsigned int lock_fail
[VMW_BALLOON_NUM_PAGE_SIZES
];
201 unsigned int unlock
[VMW_BALLOON_NUM_PAGE_SIZES
];
202 unsigned int unlock_fail
[VMW_BALLOON_NUM_PAGE_SIZES
];
204 unsigned int target_fail
;
206 unsigned int start_fail
;
207 unsigned int guest_type
;
208 unsigned int guest_type_fail
;
209 unsigned int doorbell_set
;
210 unsigned int doorbell_unset
;
213 #define STATS_INC(stat) (stat)++
215 #define STATS_INC(stat)
220 struct vmballoon_ops
{
221 void (*add_page
)(struct vmballoon
*b
, int idx
, struct page
*p
);
222 int (*lock
)(struct vmballoon
*b
, unsigned int num_pages
,
223 bool is_2m_pages
, unsigned int *target
);
224 int (*unlock
)(struct vmballoon
*b
, unsigned int num_pages
,
225 bool is_2m_pages
, unsigned int *target
);
228 struct vmballoon_page_size
{
229 /* list of reserved physical pages */
230 struct list_head pages
;
232 /* transient list of non-balloonable pages */
233 struct list_head refused_pages
;
234 unsigned int n_refused_pages
;
238 struct vmballoon_page_size page_sizes
[VMW_BALLOON_NUM_PAGE_SIZES
];
240 /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
241 unsigned supported_page_sizes
;
243 /* balloon size in pages */
250 unsigned long capabilities
;
252 struct vmballoon_batch_page
*batch_page
;
253 unsigned int batch_max_pages
;
256 const struct vmballoon_ops
*ops
;
258 #ifdef CONFIG_DEBUG_FS
260 struct vmballoon_stats stats
;
262 /* debugfs file exporting statistics */
263 struct dentry
*dbg_entry
;
266 struct sysinfo sysinfo
;
268 struct delayed_work dwork
;
270 struct vmci_handle vmci_doorbell
;
273 static struct vmballoon balloon
;
276 * Send "start" command to the host, communicating supported version
279 static bool vmballoon_send_start(struct vmballoon
*b
, unsigned long req_caps
)
281 unsigned long status
, capabilities
, dummy
= 0;
284 STATS_INC(b
->stats
.start
);
286 status
= VMWARE_BALLOON_CMD(START
, req_caps
, dummy
, capabilities
);
289 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES
:
290 b
->capabilities
= capabilities
;
293 case VMW_BALLOON_SUCCESS
:
294 b
->capabilities
= VMW_BALLOON_BASIC_CMDS
;
302 * 2MB pages are only supported with batching. If batching is for some
303 * reason disabled, do not use 2MB pages, since otherwise the legacy
304 * mechanism is used with 2MB pages, causing a failure.
306 if ((b
->capabilities
& VMW_BALLOON_BATCHED_2M_CMDS
) &&
307 (b
->capabilities
& VMW_BALLOON_BATCHED_CMDS
))
308 b
->supported_page_sizes
= 2;
310 b
->supported_page_sizes
= 1;
313 pr_debug("%s - failed, hv returns %ld\n", __func__
, status
);
314 STATS_INC(b
->stats
.start_fail
);
319 static bool vmballoon_check_status(struct vmballoon
*b
, unsigned long status
)
322 case VMW_BALLOON_SUCCESS
:
325 case VMW_BALLOON_ERROR_RESET
:
326 b
->reset_required
= true;
335 * Communicate guest type to the host so that it can adjust ballooning
336 * algorithm to the one most appropriate for the guest. This command
337 * is normally issued after sending "start" command and is part of
338 * standard reset sequence.
340 static bool vmballoon_send_guest_id(struct vmballoon
*b
)
342 unsigned long status
, dummy
= 0;
344 status
= VMWARE_BALLOON_CMD(GUEST_ID
, VMW_BALLOON_GUEST_ID
, dummy
,
347 STATS_INC(b
->stats
.guest_type
);
349 if (vmballoon_check_status(b
, status
))
352 pr_debug("%s - failed, hv returns %ld\n", __func__
, status
);
353 STATS_INC(b
->stats
.guest_type_fail
);
357 static u16
vmballoon_page_size(bool is_2m_page
)
360 return 1 << VMW_BALLOON_2M_SHIFT
;
366 * Retrieve desired balloon size from the host.
368 static bool vmballoon_send_get_target(struct vmballoon
*b
, u32
*new_target
)
370 unsigned long status
;
371 unsigned long target
;
373 unsigned long dummy
= 0;
377 * si_meminfo() is cheap. Moreover, we want to provide dynamic
378 * max balloon size later. So let us call si_meminfo() every
381 si_meminfo(&b
->sysinfo
);
382 limit
= b
->sysinfo
.totalram
;
384 /* Ensure limit fits in 32-bits */
385 limit32
= (u32
)limit
;
386 if (limit
!= limit32
)
390 STATS_INC(b
->stats
.target
);
392 status
= VMWARE_BALLOON_CMD(GET_TARGET
, limit
, dummy
, target
);
393 if (vmballoon_check_status(b
, status
)) {
394 *new_target
= target
;
398 pr_debug("%s - failed, hv returns %ld\n", __func__
, status
);
399 STATS_INC(b
->stats
.target_fail
);
404 * Notify the host about allocated page so that host can use it without
405 * fear that guest will need it. Host may reject some pages, we need to
406 * check the return value and maybe submit a different page.
408 static int vmballoon_send_lock_page(struct vmballoon
*b
, unsigned long pfn
,
409 unsigned int *hv_status
, unsigned int *target
)
411 unsigned long status
, dummy
= 0;
418 STATS_INC(b
->stats
.lock
[false]);
420 *hv_status
= status
= VMWARE_BALLOON_CMD(LOCK
, pfn
, dummy
, *target
);
421 if (vmballoon_check_status(b
, status
))
424 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__
, pfn
, status
);
425 STATS_INC(b
->stats
.lock_fail
[false]);
429 static int vmballoon_send_batched_lock(struct vmballoon
*b
,
430 unsigned int num_pages
, bool is_2m_pages
, unsigned int *target
)
432 unsigned long status
;
433 unsigned long pfn
= PHYS_PFN(virt_to_phys(b
->batch_page
));
435 STATS_INC(b
->stats
.lock
[is_2m_pages
]);
438 status
= VMWARE_BALLOON_CMD(BATCHED_2M_LOCK
, pfn
, num_pages
,
441 status
= VMWARE_BALLOON_CMD(BATCHED_LOCK
, pfn
, num_pages
,
444 if (vmballoon_check_status(b
, status
))
447 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__
, pfn
, status
);
448 STATS_INC(b
->stats
.lock_fail
[is_2m_pages
]);
453 * Notify the host that guest intends to release given page back into
454 * the pool of available (to the guest) pages.
456 static bool vmballoon_send_unlock_page(struct vmballoon
*b
, unsigned long pfn
,
457 unsigned int *target
)
459 unsigned long status
, dummy
= 0;
466 STATS_INC(b
->stats
.unlock
[false]);
468 status
= VMWARE_BALLOON_CMD(UNLOCK
, pfn
, dummy
, *target
);
469 if (vmballoon_check_status(b
, status
))
472 pr_debug("%s - ppn %lx, hv returns %ld\n", __func__
, pfn
, status
);
473 STATS_INC(b
->stats
.unlock_fail
[false]);
477 static bool vmballoon_send_batched_unlock(struct vmballoon
*b
,
478 unsigned int num_pages
, bool is_2m_pages
, unsigned int *target
)
480 unsigned long status
;
481 unsigned long pfn
= PHYS_PFN(virt_to_phys(b
->batch_page
));
483 STATS_INC(b
->stats
.unlock
[is_2m_pages
]);
486 status
= VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK
, pfn
, num_pages
,
489 status
= VMWARE_BALLOON_CMD(BATCHED_UNLOCK
, pfn
, num_pages
,
492 if (vmballoon_check_status(b
, status
))
495 pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__
, pfn
, status
);
496 STATS_INC(b
->stats
.unlock_fail
[is_2m_pages
]);
500 static struct page
*vmballoon_alloc_page(gfp_t flags
, bool is_2m_page
)
503 return alloc_pages(flags
, VMW_BALLOON_2M_SHIFT
);
505 return alloc_page(flags
);
508 static void vmballoon_free_page(struct page
*page
, bool is_2m_page
)
511 __free_pages(page
, VMW_BALLOON_2M_SHIFT
);
517 * Quickly release all pages allocated for the balloon. This function is
518 * called when host decides to "reset" balloon for one reason or another.
519 * Unlike normal "deflate" we do not (shall not) notify host of the pages
522 static void vmballoon_pop(struct vmballoon
*b
)
524 struct page
*page
, *next
;
525 unsigned is_2m_pages
;
527 for (is_2m_pages
= 0; is_2m_pages
< VMW_BALLOON_NUM_PAGE_SIZES
;
529 struct vmballoon_page_size
*page_size
=
530 &b
->page_sizes
[is_2m_pages
];
531 u16 size_per_page
= vmballoon_page_size(is_2m_pages
);
533 list_for_each_entry_safe(page
, next
, &page_size
->pages
, lru
) {
534 list_del(&page
->lru
);
535 vmballoon_free_page(page
, is_2m_pages
);
536 STATS_INC(b
->stats
.free
[is_2m_pages
]);
537 b
->size
-= size_per_page
;
542 /* Clearing the batch_page unconditionally has no adverse effect */
543 free_page((unsigned long)b
->batch_page
);
544 b
->batch_page
= NULL
;
548 * Notify the host of a ballooned page. If host rejects the page put it on the
549 * refuse list, those refused page are then released at the end of the
552 static int vmballoon_lock_page(struct vmballoon
*b
, unsigned int num_pages
,
553 bool is_2m_pages
, unsigned int *target
)
555 int locked
, hv_status
;
556 struct page
*page
= b
->page
;
557 struct vmballoon_page_size
*page_size
= &b
->page_sizes
[false];
559 /* is_2m_pages can never happen as 2m pages support implies batching */
561 locked
= vmballoon_send_lock_page(b
, page_to_pfn(page
), &hv_status
,
564 STATS_INC(b
->stats
.refused_alloc
[false]);
566 if (locked
== -EIO
&&
567 (hv_status
== VMW_BALLOON_ERROR_RESET
||
568 hv_status
== VMW_BALLOON_ERROR_PPN_NOTNEEDED
)) {
569 vmballoon_free_page(page
, false);
574 * Place page on the list of non-balloonable pages
575 * and retry allocation, unless we already accumulated
576 * too many of them, in which case take a breather.
578 if (page_size
->n_refused_pages
< VMW_BALLOON_MAX_REFUSED
) {
579 page_size
->n_refused_pages
++;
580 list_add(&page
->lru
, &page_size
->refused_pages
);
582 vmballoon_free_page(page
, false);
587 /* track allocated page */
588 list_add(&page
->lru
, &page_size
->pages
);
590 /* update balloon size */
596 static int vmballoon_lock_batched_page(struct vmballoon
*b
,
597 unsigned int num_pages
, bool is_2m_pages
, unsigned int *target
)
600 u16 size_per_page
= vmballoon_page_size(is_2m_pages
);
602 locked
= vmballoon_send_batched_lock(b
, num_pages
, is_2m_pages
,
605 for (i
= 0; i
< num_pages
; i
++) {
606 u64 pa
= vmballoon_batch_get_pa(b
->batch_page
, i
);
607 struct page
*p
= pfn_to_page(pa
>> PAGE_SHIFT
);
609 vmballoon_free_page(p
, is_2m_pages
);
615 for (i
= 0; i
< num_pages
; i
++) {
616 u64 pa
= vmballoon_batch_get_pa(b
->batch_page
, i
);
617 struct page
*p
= pfn_to_page(pa
>> PAGE_SHIFT
);
618 struct vmballoon_page_size
*page_size
=
619 &b
->page_sizes
[is_2m_pages
];
621 locked
= vmballoon_batch_get_status(b
->batch_page
, i
);
624 case VMW_BALLOON_SUCCESS
:
625 list_add(&p
->lru
, &page_size
->pages
);
626 b
->size
+= size_per_page
;
628 case VMW_BALLOON_ERROR_PPN_PINNED
:
629 case VMW_BALLOON_ERROR_PPN_INVALID
:
630 if (page_size
->n_refused_pages
631 < VMW_BALLOON_MAX_REFUSED
) {
632 list_add(&p
->lru
, &page_size
->refused_pages
);
633 page_size
->n_refused_pages
++;
637 case VMW_BALLOON_ERROR_RESET
:
638 case VMW_BALLOON_ERROR_PPN_NOTNEEDED
:
639 vmballoon_free_page(p
, is_2m_pages
);
642 /* This should never happen */
651 * Release the page allocated for the balloon. Note that we first notify
652 * the host so it can make sure the page will be available for the guest
655 static int vmballoon_unlock_page(struct vmballoon
*b
, unsigned int num_pages
,
656 bool is_2m_pages
, unsigned int *target
)
658 struct page
*page
= b
->page
;
659 struct vmballoon_page_size
*page_size
= &b
->page_sizes
[false];
661 /* is_2m_pages can never happen as 2m pages support implies batching */
663 if (!vmballoon_send_unlock_page(b
, page_to_pfn(page
), target
)) {
664 list_add(&page
->lru
, &page_size
->pages
);
668 /* deallocate page */
669 vmballoon_free_page(page
, false);
670 STATS_INC(b
->stats
.free
[false]);
672 /* update balloon size */
678 static int vmballoon_unlock_batched_page(struct vmballoon
*b
,
679 unsigned int num_pages
, bool is_2m_pages
,
680 unsigned int *target
)
682 int locked
, i
, ret
= 0;
684 u16 size_per_page
= vmballoon_page_size(is_2m_pages
);
686 hv_success
= vmballoon_send_batched_unlock(b
, num_pages
, is_2m_pages
,
691 for (i
= 0; i
< num_pages
; i
++) {
692 u64 pa
= vmballoon_batch_get_pa(b
->batch_page
, i
);
693 struct page
*p
= pfn_to_page(pa
>> PAGE_SHIFT
);
694 struct vmballoon_page_size
*page_size
=
695 &b
->page_sizes
[is_2m_pages
];
697 locked
= vmballoon_batch_get_status(b
->batch_page
, i
);
698 if (!hv_success
|| locked
!= VMW_BALLOON_SUCCESS
) {
700 * That page wasn't successfully unlocked by the
701 * hypervisor, re-add it to the list of pages owned by
702 * the balloon driver.
704 list_add(&p
->lru
, &page_size
->pages
);
706 /* deallocate page */
707 vmballoon_free_page(p
, is_2m_pages
);
708 STATS_INC(b
->stats
.free
[is_2m_pages
]);
710 /* update balloon size */
711 b
->size
-= size_per_page
;
719 * Release pages that were allocated while attempting to inflate the
720 * balloon but were refused by the host for one reason or another.
722 static void vmballoon_release_refused_pages(struct vmballoon
*b
,
725 struct page
*page
, *next
;
726 struct vmballoon_page_size
*page_size
=
727 &b
->page_sizes
[is_2m_pages
];
729 list_for_each_entry_safe(page
, next
, &page_size
->refused_pages
, lru
) {
730 list_del(&page
->lru
);
731 vmballoon_free_page(page
, is_2m_pages
);
732 STATS_INC(b
->stats
.refused_free
[is_2m_pages
]);
735 page_size
->n_refused_pages
= 0;
738 static void vmballoon_add_page(struct vmballoon
*b
, int idx
, struct page
*p
)
743 static void vmballoon_add_batched_page(struct vmballoon
*b
, int idx
,
746 vmballoon_batch_set_pa(b
->batch_page
, idx
,
747 (u64
)page_to_pfn(p
) << PAGE_SHIFT
);
751 * Inflate the balloon towards its target size. Note that we try to limit
752 * the rate of allocation to make sure we are not choking the rest of the
755 static void vmballoon_inflate(struct vmballoon
*b
)
757 unsigned int num_pages
= 0;
759 gfp_t flags
= VMW_PAGE_ALLOC_NOSLEEP
;
762 pr_debug("%s - size: %d, target %d\n", __func__
, b
->size
, b
->target
);
765 * First try NOSLEEP page allocations to inflate balloon.
767 * If we do not throttle nosleep allocations, we can drain all
768 * free pages in the guest quickly (if the balloon target is high).
769 * As a side-effect, draining free pages helps to inform (force)
770 * the guest to start swapping if balloon target is not met yet,
771 * which is a desired behavior. However, balloon driver can consume
772 * all available CPU cycles if too many pages are allocated in a
773 * second. Therefore, we throttle nosleep allocations even when
774 * the guest is not under memory pressure. OTOH, if we have already
775 * predicted that the guest is under memory pressure, then we
776 * slowdown page allocations considerably.
780 * Start with no sleep allocation rate which may be higher
781 * than sleeping allocation rate.
783 is_2m_pages
= b
->supported_page_sizes
== VMW_BALLOON_NUM_PAGE_SIZES
;
785 pr_debug("%s - goal: %d", __func__
, b
->target
- b
->size
);
787 while (!b
->reset_required
&&
788 b
->size
+ num_pages
* vmballoon_page_size(is_2m_pages
)
792 if (flags
== VMW_PAGE_ALLOC_NOSLEEP
)
793 STATS_INC(b
->stats
.alloc
[is_2m_pages
]);
795 STATS_INC(b
->stats
.sleep_alloc
);
797 page
= vmballoon_alloc_page(flags
, is_2m_pages
);
799 STATS_INC(b
->stats
.alloc_fail
[is_2m_pages
]);
802 b
->ops
->lock(b
, num_pages
, true, &b
->target
);
805 * ignore errors from locking as we now switch
806 * to 4k pages and we might get different
815 if (flags
== VMW_PAGE_ALLOC_CANSLEEP
) {
817 * CANSLEEP page allocation failed, so guest
818 * is under severe memory pressure. We just log
819 * the event, but do not stop the inflation
820 * due to its negative impact on performance.
822 STATS_INC(b
->stats
.sleep_alloc_fail
);
827 * NOSLEEP page allocation failed, so the guest is
828 * under memory pressure. Slowing down page alloctions
829 * seems to be reasonable, but doing so might actually
830 * cause the hypervisor to throttle us down, resulting
831 * in degraded performance. We will count on the
832 * scheduler and standard memory management mechanisms
835 flags
= VMW_PAGE_ALLOC_CANSLEEP
;
839 b
->ops
->add_page(b
, num_pages
++, page
);
840 if (num_pages
== b
->batch_max_pages
) {
841 error
= b
->ops
->lock(b
, num_pages
, is_2m_pages
,
852 b
->ops
->lock(b
, num_pages
, is_2m_pages
, &b
->target
);
854 vmballoon_release_refused_pages(b
, true);
855 vmballoon_release_refused_pages(b
, false);
859 * Decrease the size of the balloon allowing guest to use more memory.
861 static void vmballoon_deflate(struct vmballoon
*b
)
863 unsigned is_2m_pages
;
865 pr_debug("%s - size: %d, target %d\n", __func__
, b
->size
, b
->target
);
867 /* free pages to reach target */
868 for (is_2m_pages
= 0; is_2m_pages
< b
->supported_page_sizes
;
870 struct page
*page
, *next
;
871 unsigned int num_pages
= 0;
872 struct vmballoon_page_size
*page_size
=
873 &b
->page_sizes
[is_2m_pages
];
875 list_for_each_entry_safe(page
, next
, &page_size
->pages
, lru
) {
876 if (b
->reset_required
||
879 * vmballoon_page_size(is_2m_pages
)
880 < b
->target
+ vmballoon_page_size(true)))
883 list_del(&page
->lru
);
884 b
->ops
->add_page(b
, num_pages
++, page
);
886 if (num_pages
== b
->batch_max_pages
) {
889 error
= b
->ops
->unlock(b
, num_pages
,
890 is_2m_pages
, &b
->target
);
900 b
->ops
->unlock(b
, num_pages
, is_2m_pages
, &b
->target
);
904 static const struct vmballoon_ops vmballoon_basic_ops
= {
905 .add_page
= vmballoon_add_page
,
906 .lock
= vmballoon_lock_page
,
907 .unlock
= vmballoon_unlock_page
910 static const struct vmballoon_ops vmballoon_batched_ops
= {
911 .add_page
= vmballoon_add_batched_page
,
912 .lock
= vmballoon_lock_batched_page
,
913 .unlock
= vmballoon_unlock_batched_page
916 static bool vmballoon_init_batching(struct vmballoon
*b
)
920 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
924 b
->batch_page
= page_address(page
);
929 * Receive notification and resize balloon
931 static void vmballoon_doorbell(void *client_data
)
933 struct vmballoon
*b
= client_data
;
935 STATS_INC(b
->stats
.doorbell
);
937 mod_delayed_work(system_freezable_wq
, &b
->dwork
, 0);
941 * Clean up vmci doorbell
943 static void vmballoon_vmci_cleanup(struct vmballoon
*b
)
947 VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET
, VMCI_INVALID_ID
,
948 VMCI_INVALID_ID
, error
);
949 STATS_INC(b
->stats
.doorbell_unset
);
951 if (!vmci_handle_is_invalid(b
->vmci_doorbell
)) {
952 vmci_doorbell_destroy(b
->vmci_doorbell
);
953 b
->vmci_doorbell
= VMCI_INVALID_HANDLE
;
958 * Initialize vmci doorbell, to get notified as soon as balloon changes
960 static int vmballoon_vmci_init(struct vmballoon
*b
)
962 unsigned long error
, dummy
;
964 if ((b
->capabilities
& VMW_BALLOON_SIGNALLED_WAKEUP_CMD
) == 0)
967 error
= vmci_doorbell_create(&b
->vmci_doorbell
, VMCI_FLAG_DELAYED_CB
,
968 VMCI_PRIVILEGE_FLAG_RESTRICTED
,
969 vmballoon_doorbell
, b
);
971 if (error
!= VMCI_SUCCESS
)
974 error
= VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET
, b
->vmci_doorbell
.context
,
975 b
->vmci_doorbell
.resource
, dummy
);
977 STATS_INC(b
->stats
.doorbell_set
);
979 if (error
!= VMW_BALLOON_SUCCESS
)
984 vmballoon_vmci_cleanup(b
);
989 * Perform standard reset sequence by popping the balloon (in case it
990 * is not empty) and then restarting protocol. This operation normally
991 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
993 static void vmballoon_reset(struct vmballoon
*b
)
997 vmballoon_vmci_cleanup(b
);
999 /* free all pages, skipping monitor unlock */
1002 if (!vmballoon_send_start(b
, VMW_BALLOON_CAPABILITIES
))
1005 if ((b
->capabilities
& VMW_BALLOON_BATCHED_CMDS
) != 0) {
1006 b
->ops
= &vmballoon_batched_ops
;
1007 b
->batch_max_pages
= VMW_BALLOON_BATCH_MAX_PAGES
;
1008 if (!vmballoon_init_batching(b
)) {
1010 * We failed to initialize batching, inform the monitor
1011 * about it by sending a null capability.
1013 * The guest will retry in one second.
1015 vmballoon_send_start(b
, 0);
1018 } else if ((b
->capabilities
& VMW_BALLOON_BASIC_CMDS
) != 0) {
1019 b
->ops
= &vmballoon_basic_ops
;
1020 b
->batch_max_pages
= 1;
1023 b
->reset_required
= false;
1025 error
= vmballoon_vmci_init(b
);
1027 pr_err("failed to initialize vmci doorbell\n");
1029 if (!vmballoon_send_guest_id(b
))
1030 pr_err("failed to send guest ID to the host\n");
1034 * Balloon work function: reset protocol, if needed, get the new size and
1035 * adjust balloon as needed. Repeat in 1 sec.
1037 static void vmballoon_work(struct work_struct
*work
)
1039 struct delayed_work
*dwork
= to_delayed_work(work
);
1040 struct vmballoon
*b
= container_of(dwork
, struct vmballoon
, dwork
);
1041 unsigned int target
;
1043 STATS_INC(b
->stats
.timer
);
1045 if (b
->reset_required
)
1048 if (!b
->reset_required
&& vmballoon_send_get_target(b
, &target
)) {
1049 /* update target, adjust size */
1052 if (b
->size
< target
)
1053 vmballoon_inflate(b
);
1054 else if (target
== 0 ||
1055 b
->size
> target
+ vmballoon_page_size(true))
1056 vmballoon_deflate(b
);
1060 * We are using a freezable workqueue so that balloon operations are
1061 * stopped while the system transitions to/from sleep/hibernation.
1063 queue_delayed_work(system_freezable_wq
,
1064 dwork
, round_jiffies_relative(HZ
));
1070 #ifdef CONFIG_DEBUG_FS
1072 static int vmballoon_debug_show(struct seq_file
*f
, void *offset
)
1074 struct vmballoon
*b
= f
->private;
1075 struct vmballoon_stats
*stats
= &b
->stats
;
1077 /* format capabilities info */
1079 "balloon capabilities: %#4x\n"
1080 "used capabilities: %#4lx\n"
1081 "is resetting: %c\n",
1082 VMW_BALLOON_CAPABILITIES
, b
->capabilities
,
1083 b
->reset_required
? 'y' : 'n');
1085 /* format size info */
1087 "target: %8d pages\n"
1088 "current: %8d pages\n",
1089 b
->target
, b
->size
);
1095 "start: %8u (%4u failed)\n"
1096 "guestType: %8u (%4u failed)\n"
1097 "2m-lock: %8u (%4u failed)\n"
1098 "lock: %8u (%4u failed)\n"
1099 "2m-unlock: %8u (%4u failed)\n"
1100 "unlock: %8u (%4u failed)\n"
1101 "target: %8u (%4u failed)\n"
1102 "prim2mAlloc: %8u (%4u failed)\n"
1103 "primNoSleepAlloc: %8u (%4u failed)\n"
1104 "primCanSleepAlloc: %8u (%4u failed)\n"
1111 "doorbellSet: %8u\n"
1112 "doorbellUnset: %8u\n",
1115 stats
->start
, stats
->start_fail
,
1116 stats
->guest_type
, stats
->guest_type_fail
,
1117 stats
->lock
[true], stats
->lock_fail
[true],
1118 stats
->lock
[false], stats
->lock_fail
[false],
1119 stats
->unlock
[true], stats
->unlock_fail
[true],
1120 stats
->unlock
[false], stats
->unlock_fail
[false],
1121 stats
->target
, stats
->target_fail
,
1122 stats
->alloc
[true], stats
->alloc_fail
[true],
1123 stats
->alloc
[false], stats
->alloc_fail
[false],
1124 stats
->sleep_alloc
, stats
->sleep_alloc_fail
,
1127 stats
->refused_alloc
[true], stats
->refused_alloc
[false],
1128 stats
->refused_free
[true], stats
->refused_free
[false],
1129 stats
->doorbell_set
, stats
->doorbell_unset
);
1134 static int vmballoon_debug_open(struct inode
*inode
, struct file
*file
)
1136 return single_open(file
, vmballoon_debug_show
, inode
->i_private
);
1139 static const struct file_operations vmballoon_debug_fops
= {
1140 .owner
= THIS_MODULE
,
1141 .open
= vmballoon_debug_open
,
1143 .llseek
= seq_lseek
,
1144 .release
= single_release
,
1147 static int __init
vmballoon_debugfs_init(struct vmballoon
*b
)
1151 b
->dbg_entry
= debugfs_create_file("vmmemctl", S_IRUGO
, NULL
, b
,
1152 &vmballoon_debug_fops
);
1153 if (IS_ERR(b
->dbg_entry
)) {
1154 error
= PTR_ERR(b
->dbg_entry
);
1155 pr_err("failed to create debugfs entry, error: %d\n", error
);
1162 static void __exit
vmballoon_debugfs_exit(struct vmballoon
*b
)
1164 debugfs_remove(b
->dbg_entry
);
1169 static inline int vmballoon_debugfs_init(struct vmballoon
*b
)
1174 static inline void vmballoon_debugfs_exit(struct vmballoon
*b
)
1178 #endif /* CONFIG_DEBUG_FS */
1180 static int __init
vmballoon_init(void)
1183 unsigned is_2m_pages
;
1185 * Check if we are running on VMware's hypervisor and bail out
1188 if (x86_hyper_type
!= X86_HYPER_VMWARE
)
1191 for (is_2m_pages
= 0; is_2m_pages
< VMW_BALLOON_NUM_PAGE_SIZES
;
1193 INIT_LIST_HEAD(&balloon
.page_sizes
[is_2m_pages
].pages
);
1194 INIT_LIST_HEAD(&balloon
.page_sizes
[is_2m_pages
].refused_pages
);
1197 INIT_DELAYED_WORK(&balloon
.dwork
, vmballoon_work
);
1199 error
= vmballoon_debugfs_init(&balloon
);
1203 balloon
.vmci_doorbell
= VMCI_INVALID_HANDLE
;
1204 balloon
.batch_page
= NULL
;
1205 balloon
.page
= NULL
;
1206 balloon
.reset_required
= true;
1208 queue_delayed_work(system_freezable_wq
, &balloon
.dwork
, 0);
1214 * Using late_initcall() instead of module_init() allows the balloon to use the
1215 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1216 * VMCI is probed only after the balloon is initialized. If the balloon is used
1217 * as a module, late_initcall() is equivalent to module_init().
1219 late_initcall(vmballoon_init
);
1221 static void __exit
vmballoon_exit(void)
1223 vmballoon_vmci_cleanup(&balloon
);
1224 cancel_delayed_work_sync(&balloon
.dwork
);
1226 vmballoon_debugfs_exit(&balloon
);
1229 * Deallocate all reserved memory, and reset connection with monitor.
1230 * Reset connection before deallocating memory to avoid potential for
1231 * additional spurious resets from guest touching deallocated pages.
1233 vmballoon_send_start(&balloon
, 0);
1234 vmballoon_pop(&balloon
);
1236 module_exit(vmballoon_exit
);