1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo
4 * Tosatti's implementations.
6 * Copyright 2008 Rusty Russell IBM Corporation
9 #include <linux/virtio.h>
10 #include <linux/virtio_balloon.h>
11 #include <linux/swap.h>
12 #include <linux/workqueue.h>
13 #include <linux/delay.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/balloon_compaction.h>
17 #include <linux/oom.h>
18 #include <linux/wait.h>
20 #include <linux/page_reporting.h>
23 * Balloon device works in 4K page units. So each page is pointed to by
24 * multiple balloon pages. All memory counters in this driver are in balloon
27 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned int)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
28 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
29 /* Maximum number of (4k) pages to deflate on OOM notifications. */
30 #define VIRTIO_BALLOON_OOM_NR_PAGES 256
31 #define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
33 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
35 /* The order of free page blocks to report to host */
36 #define VIRTIO_BALLOON_HINT_BLOCK_ORDER MAX_PAGE_ORDER
37 /* The size of a free page block in bytes */
38 #define VIRTIO_BALLOON_HINT_BLOCK_BYTES \
39 (1 << (VIRTIO_BALLOON_HINT_BLOCK_ORDER + PAGE_SHIFT))
40 #define VIRTIO_BALLOON_HINT_BLOCK_PAGES (1 << VIRTIO_BALLOON_HINT_BLOCK_ORDER)
42 enum virtio_balloon_vq
{
43 VIRTIO_BALLOON_VQ_INFLATE
,
44 VIRTIO_BALLOON_VQ_DEFLATE
,
45 VIRTIO_BALLOON_VQ_STATS
,
46 VIRTIO_BALLOON_VQ_FREE_PAGE
,
47 VIRTIO_BALLOON_VQ_REPORTING
,
51 enum virtio_balloon_config_read
{
52 VIRTIO_BALLOON_CONFIG_READ_CMD_ID
= 0,
55 struct virtio_balloon
{
56 struct virtio_device
*vdev
;
57 struct virtqueue
*inflate_vq
, *deflate_vq
, *stats_vq
, *free_page_vq
;
59 /* Balloon's own wq for cpu-intensive work items */
60 struct workqueue_struct
*balloon_wq
;
61 /* The free page reporting work item submitted to the balloon wq */
62 struct work_struct report_free_page_work
;
64 /* The balloon servicing is delegated to a freezable workqueue. */
65 struct work_struct update_balloon_stats_work
;
66 struct work_struct update_balloon_size_work
;
68 /* Prevent updating balloon when it is being canceled. */
69 spinlock_t stop_update_lock
;
71 /* Bitmap to indicate if reading the related config fields are needed */
72 unsigned long config_read_bitmap
;
74 /* The list of allocated free pages, waiting to be given back to mm */
75 struct list_head free_page_list
;
76 spinlock_t free_page_list_lock
;
77 /* The number of free page blocks on the above list */
78 unsigned long num_free_page_blocks
;
80 * The cmd id received from host.
81 * Read it via virtio_balloon_cmd_id_received to get the latest value
84 u32 cmd_id_received_cache
;
85 /* The cmd id that is actively in use */
86 __virtio32 cmd_id_active
;
87 /* Buffer to store the stop sign */
88 __virtio32 cmd_id_stop
;
90 /* Waiting for host to ack the pages we released. */
91 wait_queue_head_t acked
;
93 /* Number of balloon pages we've told the Host we're not using. */
94 unsigned int num_pages
;
96 * The pages we've told the Host we're not using are enqueued
97 * at vb_dev_info->pages list.
98 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
101 struct balloon_dev_info vb_dev_info
;
103 /* Synchronize access/update to this struct virtio_balloon elements */
104 struct mutex balloon_lock
;
106 /* The array of pfns we tell the Host about. */
107 unsigned int num_pfns
;
108 __virtio32 pfns
[VIRTIO_BALLOON_ARRAY_PFNS_MAX
];
110 /* Memory statistics */
111 struct virtio_balloon_stat stats
[VIRTIO_BALLOON_S_NR
];
113 /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
114 struct shrinker
*shrinker
;
116 /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
117 struct notifier_block oom_nb
;
119 /* Free page reporting device */
120 struct virtqueue
*reporting_vq
;
121 struct page_reporting_dev_info pr_dev_info
;
123 /* State for keeping the wakeup_source active while adjusting the balloon */
124 spinlock_t wakeup_lock
;
125 bool processing_wakeup_event
;
126 u32 wakeup_signal_mask
;
129 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST (1 << 0)
130 #define VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS (1 << 1)
132 static const struct virtio_device_id id_table
[] = {
133 { VIRTIO_ID_BALLOON
, VIRTIO_DEV_ANY_ID
},
137 static u32
page_to_balloon_pfn(struct page
*page
)
139 unsigned long pfn
= page_to_pfn(page
);
141 BUILD_BUG_ON(PAGE_SHIFT
< VIRTIO_BALLOON_PFN_SHIFT
);
142 /* Convert pfn from Linux page size to balloon page size. */
143 return pfn
* VIRTIO_BALLOON_PAGES_PER_PAGE
;
146 static void start_wakeup_event(struct virtio_balloon
*vb
, u32 mask
)
150 spin_lock_irqsave(&vb
->wakeup_lock
, flags
);
151 vb
->wakeup_signal_mask
|= mask
;
152 if (!vb
->processing_wakeup_event
) {
153 vb
->processing_wakeup_event
= true;
154 pm_stay_awake(&vb
->vdev
->dev
);
156 spin_unlock_irqrestore(&vb
->wakeup_lock
, flags
);
159 static void process_wakeup_event(struct virtio_balloon
*vb
, u32 mask
)
161 spin_lock_irq(&vb
->wakeup_lock
);
162 vb
->wakeup_signal_mask
&= ~mask
;
163 spin_unlock_irq(&vb
->wakeup_lock
);
166 static void finish_wakeup_event(struct virtio_balloon
*vb
)
168 spin_lock_irq(&vb
->wakeup_lock
);
169 if (!vb
->wakeup_signal_mask
&& vb
->processing_wakeup_event
) {
170 vb
->processing_wakeup_event
= false;
171 pm_relax(&vb
->vdev
->dev
);
173 spin_unlock_irq(&vb
->wakeup_lock
);
176 static void balloon_ack(struct virtqueue
*vq
)
178 struct virtio_balloon
*vb
= vq
->vdev
->priv
;
183 static void tell_host(struct virtio_balloon
*vb
, struct virtqueue
*vq
)
185 struct scatterlist sg
;
188 sg_init_one(&sg
, vb
->pfns
, sizeof(vb
->pfns
[0]) * vb
->num_pfns
);
190 /* We should always be able to add one buffer to an empty queue. */
191 virtqueue_add_outbuf(vq
, &sg
, 1, vb
, GFP_KERNEL
);
194 /* When host has read buffer, this completes via balloon_ack */
195 wait_event(vb
->acked
, virtqueue_get_buf(vq
, &len
));
199 static int virtballoon_free_page_report(struct page_reporting_dev_info
*pr_dev_info
,
200 struct scatterlist
*sg
, unsigned int nents
)
202 struct virtio_balloon
*vb
=
203 container_of(pr_dev_info
, struct virtio_balloon
, pr_dev_info
);
204 struct virtqueue
*vq
= vb
->reporting_vq
;
205 unsigned int unused
, err
;
207 /* We should always be able to add these buffers to an empty queue. */
208 err
= virtqueue_add_inbuf(vq
, sg
, nents
, vb
, GFP_NOWAIT
| __GFP_NOWARN
);
211 * In the extremely unlikely case that something has occurred and we
212 * are able to trigger an error we will simply display a warning
213 * and exit without actually processing the pages.
215 if (WARN_ON_ONCE(err
))
220 /* When host has read buffer, this completes via balloon_ack */
221 wait_event(vb
->acked
, virtqueue_get_buf(vq
, &unused
));
226 static void set_page_pfns(struct virtio_balloon
*vb
,
227 __virtio32 pfns
[], struct page
*page
)
231 BUILD_BUG_ON(VIRTIO_BALLOON_PAGES_PER_PAGE
> VIRTIO_BALLOON_ARRAY_PFNS_MAX
);
234 * Set balloon pfns pointing at this page.
235 * Note that the first pfn points at start of the page.
237 for (i
= 0; i
< VIRTIO_BALLOON_PAGES_PER_PAGE
; i
++)
238 pfns
[i
] = cpu_to_virtio32(vb
->vdev
,
239 page_to_balloon_pfn(page
) + i
);
242 static unsigned int fill_balloon(struct virtio_balloon
*vb
, size_t num
)
244 unsigned int num_allocated_pages
;
245 unsigned int num_pfns
;
249 /* We can only do one array worth at a time. */
250 num
= min(num
, ARRAY_SIZE(vb
->pfns
));
252 for (num_pfns
= 0; num_pfns
< num
;
253 num_pfns
+= VIRTIO_BALLOON_PAGES_PER_PAGE
) {
254 struct page
*page
= balloon_page_alloc();
257 dev_info_ratelimited(&vb
->vdev
->dev
,
258 "Out of puff! Can't get %u pages\n",
259 VIRTIO_BALLOON_PAGES_PER_PAGE
);
260 /* Sleep for at least 1/5 of a second before retry. */
265 balloon_page_push(&pages
, page
);
268 mutex_lock(&vb
->balloon_lock
);
272 while ((page
= balloon_page_pop(&pages
))) {
273 balloon_page_enqueue(&vb
->vb_dev_info
, page
);
275 set_page_pfns(vb
, vb
->pfns
+ vb
->num_pfns
, page
);
276 vb
->num_pages
+= VIRTIO_BALLOON_PAGES_PER_PAGE
;
277 if (!virtio_has_feature(vb
->vdev
,
278 VIRTIO_BALLOON_F_DEFLATE_ON_OOM
))
279 adjust_managed_page_count(page
, -1);
280 vb
->num_pfns
+= VIRTIO_BALLOON_PAGES_PER_PAGE
;
283 num_allocated_pages
= vb
->num_pfns
;
284 /* Did we get any? */
285 if (vb
->num_pfns
!= 0)
286 tell_host(vb
, vb
->inflate_vq
);
287 mutex_unlock(&vb
->balloon_lock
);
289 return num_allocated_pages
;
292 static void release_pages_balloon(struct virtio_balloon
*vb
,
293 struct list_head
*pages
)
295 struct page
*page
, *next
;
297 list_for_each_entry_safe(page
, next
, pages
, lru
) {
298 if (!virtio_has_feature(vb
->vdev
,
299 VIRTIO_BALLOON_F_DEFLATE_ON_OOM
))
300 adjust_managed_page_count(page
, 1);
301 list_del(&page
->lru
);
302 put_page(page
); /* balloon reference */
306 static unsigned int leak_balloon(struct virtio_balloon
*vb
, size_t num
)
308 unsigned int num_freed_pages
;
310 struct balloon_dev_info
*vb_dev_info
= &vb
->vb_dev_info
;
313 /* We can only do one array worth at a time. */
314 num
= min(num
, ARRAY_SIZE(vb
->pfns
));
316 mutex_lock(&vb
->balloon_lock
);
317 /* We can't release more pages than taken */
318 num
= min(num
, (size_t)vb
->num_pages
);
319 for (vb
->num_pfns
= 0; vb
->num_pfns
< num
;
320 vb
->num_pfns
+= VIRTIO_BALLOON_PAGES_PER_PAGE
) {
321 page
= balloon_page_dequeue(vb_dev_info
);
324 set_page_pfns(vb
, vb
->pfns
+ vb
->num_pfns
, page
);
325 list_add(&page
->lru
, &pages
);
326 vb
->num_pages
-= VIRTIO_BALLOON_PAGES_PER_PAGE
;
329 num_freed_pages
= vb
->num_pfns
;
332 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
333 * is true, we *have* to do it in this order
335 if (vb
->num_pfns
!= 0)
336 tell_host(vb
, vb
->deflate_vq
);
337 release_pages_balloon(vb
, &pages
);
338 mutex_unlock(&vb
->balloon_lock
);
339 return num_freed_pages
;
342 static inline void update_stat(struct virtio_balloon
*vb
, int idx
,
345 BUG_ON(idx
>= VIRTIO_BALLOON_S_NR
);
346 vb
->stats
[idx
].tag
= cpu_to_virtio16(vb
->vdev
, tag
);
347 vb
->stats
[idx
].val
= cpu_to_virtio64(vb
->vdev
, val
);
350 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
352 #ifdef CONFIG_VM_EVENT_COUNTERS
353 /* Return the number of entries filled by vm events */
354 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon
*vb
)
356 unsigned long events
[NR_VM_EVENT_ITEMS
];
357 unsigned int idx
= 0;
359 unsigned long stall
= 0;
361 all_vm_events(events
);
362 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_SWAP_IN
,
363 pages_to_bytes(events
[PSWPIN
]));
364 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_SWAP_OUT
,
365 pages_to_bytes(events
[PSWPOUT
]));
366 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_MAJFLT
, events
[PGMAJFAULT
]);
367 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_MINFLT
, events
[PGFAULT
]);
368 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_OOM_KILL
, events
[OOM_KILL
]);
370 /* sum all the stall events */
371 for (zid
= 0; zid
< MAX_NR_ZONES
; zid
++)
372 stall
+= events
[ALLOCSTALL_NORMAL
- ZONE_NORMAL
+ zid
];
374 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_ALLOC_STALL
, stall
);
376 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_ASYNC_SCAN
,
377 pages_to_bytes(events
[PGSCAN_KSWAPD
]));
378 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_DIRECT_SCAN
,
379 pages_to_bytes(events
[PGSCAN_DIRECT
]));
380 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_ASYNC_RECLAIM
,
381 pages_to_bytes(events
[PGSTEAL_KSWAPD
]));
382 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_DIRECT_RECLAIM
,
383 pages_to_bytes(events
[PGSTEAL_DIRECT
]));
385 #ifdef CONFIG_HUGETLB_PAGE
386 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_HTLB_PGALLOC
,
387 events
[HTLB_BUDDY_PGALLOC
]);
388 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_HTLB_PGFAIL
,
389 events
[HTLB_BUDDY_PGALLOC_FAIL
]);
390 #endif /* CONFIG_HUGETLB_PAGE */
394 #else /* CONFIG_VM_EVENT_COUNTERS */
395 static inline unsigned int update_balloon_vm_stats(struct virtio_balloon
*vb
)
399 #endif /* CONFIG_VM_EVENT_COUNTERS */
401 static unsigned int update_balloon_stats(struct virtio_balloon
*vb
)
406 unsigned long caches
;
408 idx
= update_balloon_vm_stats(vb
);
411 available
= si_mem_available();
412 caches
= global_node_page_state(NR_FILE_PAGES
);
413 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_MEMFREE
,
414 pages_to_bytes(i
.freeram
));
415 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_MEMTOT
,
416 pages_to_bytes(i
.totalram
));
417 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_AVAIL
,
418 pages_to_bytes(available
));
419 update_stat(vb
, idx
++, VIRTIO_BALLOON_S_CACHES
,
420 pages_to_bytes(caches
));
426 * While most virtqueues communicate guest-initiated requests to the hypervisor,
427 * the stats queue operates in reverse. The driver initializes the virtqueue
428 * with a single buffer. From that point forward, all conversations consist of
429 * a hypervisor request (a call to this function) which directs us to refill
430 * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
431 * we delegate the job to a freezable workqueue that will do the actual work via
432 * stats_handle_request().
434 static void stats_request(struct virtqueue
*vq
)
436 struct virtio_balloon
*vb
= vq
->vdev
->priv
;
438 spin_lock(&vb
->stop_update_lock
);
439 if (!vb
->stop_update
) {
440 start_wakeup_event(vb
, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS
);
441 queue_work(system_freezable_wq
, &vb
->update_balloon_stats_work
);
443 spin_unlock(&vb
->stop_update_lock
);
446 static void stats_handle_request(struct virtio_balloon
*vb
)
448 struct virtqueue
*vq
;
449 struct scatterlist sg
;
450 unsigned int len
, num_stats
;
452 num_stats
= update_balloon_stats(vb
);
455 if (!virtqueue_get_buf(vq
, &len
))
457 sg_init_one(&sg
, vb
->stats
, sizeof(vb
->stats
[0]) * num_stats
);
458 virtqueue_add_outbuf(vq
, &sg
, 1, vb
, GFP_KERNEL
);
462 static inline s64
towards_target(struct virtio_balloon
*vb
)
467 /* Legacy balloon config space is LE, unlike all other devices. */
468 virtio_cread_le(vb
->vdev
, struct virtio_balloon_config
, num_pages
,
472 * Aligned up to guest page size to avoid inflating and deflating
475 target
= ALIGN(num_pages
, VIRTIO_BALLOON_PAGES_PER_PAGE
);
476 return target
- vb
->num_pages
;
479 /* Gives back @num_to_return blocks of free pages to mm. */
480 static unsigned long return_free_pages_to_mm(struct virtio_balloon
*vb
,
481 unsigned long num_to_return
)
484 unsigned long num_returned
;
486 spin_lock_irq(&vb
->free_page_list_lock
);
487 for (num_returned
= 0; num_returned
< num_to_return
; num_returned
++) {
488 page
= balloon_page_pop(&vb
->free_page_list
);
491 free_pages((unsigned long)page_address(page
),
492 VIRTIO_BALLOON_HINT_BLOCK_ORDER
);
494 vb
->num_free_page_blocks
-= num_returned
;
495 spin_unlock_irq(&vb
->free_page_list_lock
);
500 static void virtio_balloon_queue_free_page_work(struct virtio_balloon
*vb
)
502 if (!virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
505 /* No need to queue the work if the bit was already set. */
506 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID
,
507 &vb
->config_read_bitmap
))
510 queue_work(vb
->balloon_wq
, &vb
->report_free_page_work
);
513 static void start_update_balloon_size(struct virtio_balloon
*vb
)
515 start_wakeup_event(vb
, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST
);
516 queue_work(system_freezable_wq
, &vb
->update_balloon_size_work
);
519 static void virtballoon_changed(struct virtio_device
*vdev
)
521 struct virtio_balloon
*vb
= vdev
->priv
;
524 spin_lock_irqsave(&vb
->stop_update_lock
, flags
);
525 if (!vb
->stop_update
) {
526 start_update_balloon_size(vb
);
527 virtio_balloon_queue_free_page_work(vb
);
529 spin_unlock_irqrestore(&vb
->stop_update_lock
, flags
);
532 static void update_balloon_size(struct virtio_balloon
*vb
)
534 u32 actual
= vb
->num_pages
;
536 /* Legacy balloon config space is LE, unlike all other devices. */
537 virtio_cwrite_le(vb
->vdev
, struct virtio_balloon_config
, actual
,
541 static void update_balloon_stats_func(struct work_struct
*work
)
543 struct virtio_balloon
*vb
;
545 vb
= container_of(work
, struct virtio_balloon
,
546 update_balloon_stats_work
);
548 process_wakeup_event(vb
, VIRTIO_BALLOON_WAKEUP_SIGNAL_STATS
);
549 stats_handle_request(vb
);
550 finish_wakeup_event(vb
);
553 static void update_balloon_size_func(struct work_struct
*work
)
555 struct virtio_balloon
*vb
;
558 vb
= container_of(work
, struct virtio_balloon
,
559 update_balloon_size_work
);
561 process_wakeup_event(vb
, VIRTIO_BALLOON_WAKEUP_SIGNAL_ADJUST
);
563 diff
= towards_target(vb
);
567 diff
-= fill_balloon(vb
, diff
);
569 diff
+= leak_balloon(vb
, -diff
);
570 update_balloon_size(vb
);
574 queue_work(system_freezable_wq
, work
);
576 finish_wakeup_event(vb
);
579 static int init_vqs(struct virtio_balloon
*vb
)
581 struct virtqueue_info vqs_info
[VIRTIO_BALLOON_VQ_MAX
] = {};
582 struct virtqueue
*vqs
[VIRTIO_BALLOON_VQ_MAX
];
586 * Inflateq and deflateq are used unconditionally. The names[]
587 * will be NULL if the related feature is not enabled, which will
588 * cause no allocation for the corresponding virtqueue in find_vqs.
590 vqs_info
[VIRTIO_BALLOON_VQ_INFLATE
].callback
= balloon_ack
;
591 vqs_info
[VIRTIO_BALLOON_VQ_INFLATE
].name
= "inflate";
592 vqs_info
[VIRTIO_BALLOON_VQ_DEFLATE
].callback
= balloon_ack
;
593 vqs_info
[VIRTIO_BALLOON_VQ_DEFLATE
].name
= "deflate";
595 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_STATS_VQ
)) {
596 vqs_info
[VIRTIO_BALLOON_VQ_STATS
].name
= "stats";
597 vqs_info
[VIRTIO_BALLOON_VQ_STATS
].callback
= stats_request
;
600 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
601 vqs_info
[VIRTIO_BALLOON_VQ_FREE_PAGE
].name
= "free_page_vq";
603 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_REPORTING
)) {
604 vqs_info
[VIRTIO_BALLOON_VQ_REPORTING
].name
= "reporting_vq";
605 vqs_info
[VIRTIO_BALLOON_VQ_REPORTING
].callback
= balloon_ack
;
608 err
= virtio_find_vqs(vb
->vdev
, VIRTIO_BALLOON_VQ_MAX
, vqs
,
613 vb
->inflate_vq
= vqs
[VIRTIO_BALLOON_VQ_INFLATE
];
614 vb
->deflate_vq
= vqs
[VIRTIO_BALLOON_VQ_DEFLATE
];
615 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_STATS_VQ
)) {
616 struct scatterlist sg
;
617 unsigned int num_stats
;
618 vb
->stats_vq
= vqs
[VIRTIO_BALLOON_VQ_STATS
];
621 * Prime this virtqueue with one buffer so the hypervisor can
622 * use it to signal us later (it can't be broken yet!).
624 num_stats
= update_balloon_stats(vb
);
626 sg_init_one(&sg
, vb
->stats
, sizeof(vb
->stats
[0]) * num_stats
);
627 err
= virtqueue_add_outbuf(vb
->stats_vq
, &sg
, 1, vb
,
630 dev_warn(&vb
->vdev
->dev
, "%s: add stat_vq failed\n",
634 virtqueue_kick(vb
->stats_vq
);
637 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
638 vb
->free_page_vq
= vqs
[VIRTIO_BALLOON_VQ_FREE_PAGE
];
640 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_REPORTING
))
641 vb
->reporting_vq
= vqs
[VIRTIO_BALLOON_VQ_REPORTING
];
646 static u32
virtio_balloon_cmd_id_received(struct virtio_balloon
*vb
)
648 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID
,
649 &vb
->config_read_bitmap
)) {
650 /* Legacy balloon config space is LE, unlike all other devices. */
651 virtio_cread_le(vb
->vdev
, struct virtio_balloon_config
,
652 free_page_hint_cmd_id
,
653 &vb
->cmd_id_received_cache
);
656 return vb
->cmd_id_received_cache
;
659 static int send_cmd_id_start(struct virtio_balloon
*vb
)
661 struct scatterlist sg
;
662 struct virtqueue
*vq
= vb
->free_page_vq
;
665 /* Detach all the used buffers from the vq */
666 while (virtqueue_get_buf(vq
, &unused
))
669 vb
->cmd_id_active
= cpu_to_virtio32(vb
->vdev
,
670 virtio_balloon_cmd_id_received(vb
));
671 sg_init_one(&sg
, &vb
->cmd_id_active
, sizeof(vb
->cmd_id_active
));
672 err
= virtqueue_add_outbuf(vq
, &sg
, 1, &vb
->cmd_id_active
, GFP_KERNEL
);
678 static int send_cmd_id_stop(struct virtio_balloon
*vb
)
680 struct scatterlist sg
;
681 struct virtqueue
*vq
= vb
->free_page_vq
;
684 /* Detach all the used buffers from the vq */
685 while (virtqueue_get_buf(vq
, &unused
))
688 sg_init_one(&sg
, &vb
->cmd_id_stop
, sizeof(vb
->cmd_id_stop
));
689 err
= virtqueue_add_outbuf(vq
, &sg
, 1, &vb
->cmd_id_stop
, GFP_KERNEL
);
695 static int get_free_page_and_send(struct virtio_balloon
*vb
)
697 struct virtqueue
*vq
= vb
->free_page_vq
;
699 struct scatterlist sg
;
703 /* Detach all the used buffers from the vq */
704 while (virtqueue_get_buf(vq
, &unused
))
707 page
= alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG
,
708 VIRTIO_BALLOON_HINT_BLOCK_ORDER
);
710 * When the allocation returns NULL, it indicates that we have got all
711 * the possible free pages, so return -EINTR to stop.
716 p
= page_address(page
);
717 sg_init_one(&sg
, p
, VIRTIO_BALLOON_HINT_BLOCK_BYTES
);
718 /* There is always 1 entry reserved for the cmd id to use. */
719 if (vq
->num_free
> 1) {
720 err
= virtqueue_add_inbuf(vq
, &sg
, 1, p
, GFP_KERNEL
);
722 free_pages((unsigned long)p
,
723 VIRTIO_BALLOON_HINT_BLOCK_ORDER
);
727 spin_lock_irq(&vb
->free_page_list_lock
);
728 balloon_page_push(&vb
->free_page_list
, page
);
729 vb
->num_free_page_blocks
++;
730 spin_unlock_irq(&vb
->free_page_list_lock
);
733 * The vq has no available entry to add this page block, so
736 free_pages((unsigned long)p
, VIRTIO_BALLOON_HINT_BLOCK_ORDER
);
742 static int send_free_pages(struct virtio_balloon
*vb
)
749 * If a stop id or a new cmd id was just received from host,
750 * stop the reporting.
752 cmd_id_active
= virtio32_to_cpu(vb
->vdev
, vb
->cmd_id_active
);
753 if (unlikely(cmd_id_active
!=
754 virtio_balloon_cmd_id_received(vb
)))
758 * The free page blocks are allocated and sent to host one by
761 err
= get_free_page_and_send(vb
);
764 else if (unlikely(err
))
771 static void virtio_balloon_report_free_page(struct virtio_balloon
*vb
)
774 struct device
*dev
= &vb
->vdev
->dev
;
776 /* Start by sending the received cmd id to host with an outbuf. */
777 err
= send_cmd_id_start(vb
);
779 dev_err(dev
, "Failed to send a start id, err = %d\n", err
);
781 err
= send_free_pages(vb
);
783 dev_err(dev
, "Failed to send a free page, err = %d\n", err
);
785 /* End by sending a stop id to host with an outbuf. */
786 err
= send_cmd_id_stop(vb
);
788 dev_err(dev
, "Failed to send a stop id, err = %d\n", err
);
791 static void report_free_page_func(struct work_struct
*work
)
793 struct virtio_balloon
*vb
= container_of(work
, struct virtio_balloon
,
794 report_free_page_work
);
797 cmd_id_received
= virtio_balloon_cmd_id_received(vb
);
798 if (cmd_id_received
== VIRTIO_BALLOON_CMD_ID_DONE
) {
799 /* Pass ULONG_MAX to give back all the free pages */
800 return_free_pages_to_mm(vb
, ULONG_MAX
);
801 } else if (cmd_id_received
!= VIRTIO_BALLOON_CMD_ID_STOP
&&
803 virtio32_to_cpu(vb
->vdev
, vb
->cmd_id_active
)) {
804 virtio_balloon_report_free_page(vb
);
808 #ifdef CONFIG_BALLOON_COMPACTION
810 * virtballoon_migratepage - perform the balloon page migration on behalf of
811 * a compaction thread. (called under page lock)
812 * @vb_dev_info: the balloon device
813 * @newpage: page that will replace the isolated page after migration finishes.
814 * @page : the isolated (old) page that is about to be migrated to newpage.
815 * @mode : compaction mode -- not used for balloon page migration.
817 * After a ballooned page gets isolated by compaction procedures, this is the
818 * function that performs the page migration on behalf of a compaction thread
819 * The page migration for virtio balloon is done in a simple swap fashion which
820 * follows these two macro steps:
821 * 1) insert newpage into vb->pages list and update the host about it;
822 * 2) update the host about the old page removed from vb->pages list;
824 * This function preforms the balloon page migration task.
825 * Called through movable_operations->migrate_page
827 static int virtballoon_migratepage(struct balloon_dev_info
*vb_dev_info
,
828 struct page
*newpage
, struct page
*page
, enum migrate_mode mode
)
830 struct virtio_balloon
*vb
= container_of(vb_dev_info
,
831 struct virtio_balloon
, vb_dev_info
);
835 * In order to avoid lock contention while migrating pages concurrently
836 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
837 * this turn, as it is easier to retry the page migration later.
838 * This also prevents fill_balloon() getting stuck into a mutex
839 * recursion in the case it ends up triggering memory compaction
840 * while it is attempting to inflate the ballon.
842 if (!mutex_trylock(&vb
->balloon_lock
))
845 get_page(newpage
); /* balloon reference */
848 * When we migrate a page to a different zone and adjusted the
849 * managed page count when inflating, we have to fixup the count of
850 * both involved zones.
852 if (!virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_DEFLATE_ON_OOM
) &&
853 page_zone(page
) != page_zone(newpage
)) {
854 adjust_managed_page_count(page
, 1);
855 adjust_managed_page_count(newpage
, -1);
858 /* balloon's page migration 1st step -- inflate "newpage" */
859 spin_lock_irqsave(&vb_dev_info
->pages_lock
, flags
);
860 balloon_page_insert(vb_dev_info
, newpage
);
861 vb_dev_info
->isolated_pages
--;
862 __count_vm_event(BALLOON_MIGRATE
);
863 spin_unlock_irqrestore(&vb_dev_info
->pages_lock
, flags
);
864 vb
->num_pfns
= VIRTIO_BALLOON_PAGES_PER_PAGE
;
865 set_page_pfns(vb
, vb
->pfns
, newpage
);
866 tell_host(vb
, vb
->inflate_vq
);
868 /* balloon's page migration 2nd step -- deflate "page" */
869 spin_lock_irqsave(&vb_dev_info
->pages_lock
, flags
);
870 balloon_page_delete(page
);
871 spin_unlock_irqrestore(&vb_dev_info
->pages_lock
, flags
);
872 vb
->num_pfns
= VIRTIO_BALLOON_PAGES_PER_PAGE
;
873 set_page_pfns(vb
, vb
->pfns
, page
);
874 tell_host(vb
, vb
->deflate_vq
);
876 mutex_unlock(&vb
->balloon_lock
);
878 put_page(page
); /* balloon reference */
880 return MIGRATEPAGE_SUCCESS
;
882 #endif /* CONFIG_BALLOON_COMPACTION */
884 static unsigned long shrink_free_pages(struct virtio_balloon
*vb
,
885 unsigned long pages_to_free
)
887 unsigned long blocks_to_free
, blocks_freed
;
889 pages_to_free
= round_up(pages_to_free
,
890 VIRTIO_BALLOON_HINT_BLOCK_PAGES
);
891 blocks_to_free
= pages_to_free
/ VIRTIO_BALLOON_HINT_BLOCK_PAGES
;
892 blocks_freed
= return_free_pages_to_mm(vb
, blocks_to_free
);
894 return blocks_freed
* VIRTIO_BALLOON_HINT_BLOCK_PAGES
;
897 static unsigned long virtio_balloon_shrinker_scan(struct shrinker
*shrinker
,
898 struct shrink_control
*sc
)
900 struct virtio_balloon
*vb
= shrinker
->private_data
;
902 return shrink_free_pages(vb
, sc
->nr_to_scan
);
905 static unsigned long virtio_balloon_shrinker_count(struct shrinker
*shrinker
,
906 struct shrink_control
*sc
)
908 struct virtio_balloon
*vb
= shrinker
->private_data
;
910 return vb
->num_free_page_blocks
* VIRTIO_BALLOON_HINT_BLOCK_PAGES
;
913 static int virtio_balloon_oom_notify(struct notifier_block
*nb
,
914 unsigned long dummy
, void *parm
)
916 struct virtio_balloon
*vb
= container_of(nb
,
917 struct virtio_balloon
, oom_nb
);
918 unsigned long *freed
= parm
;
920 *freed
+= leak_balloon(vb
, VIRTIO_BALLOON_OOM_NR_PAGES
) /
921 VIRTIO_BALLOON_PAGES_PER_PAGE
;
922 update_balloon_size(vb
);
927 static void virtio_balloon_unregister_shrinker(struct virtio_balloon
*vb
)
929 shrinker_free(vb
->shrinker
);
932 static int virtio_balloon_register_shrinker(struct virtio_balloon
*vb
)
934 vb
->shrinker
= shrinker_alloc(0, "virtio-balloon");
938 vb
->shrinker
->scan_objects
= virtio_balloon_shrinker_scan
;
939 vb
->shrinker
->count_objects
= virtio_balloon_shrinker_count
;
940 vb
->shrinker
->private_data
= vb
;
942 shrinker_register(vb
->shrinker
);
947 static int virtballoon_probe(struct virtio_device
*vdev
)
949 struct virtio_balloon
*vb
;
952 if (!vdev
->config
->get
) {
953 dev_err(&vdev
->dev
, "%s failure: config access disabled\n",
958 vdev
->priv
= vb
= kzalloc(sizeof(*vb
), GFP_KERNEL
);
964 INIT_WORK(&vb
->update_balloon_stats_work
, update_balloon_stats_func
);
965 INIT_WORK(&vb
->update_balloon_size_work
, update_balloon_size_func
);
966 spin_lock_init(&vb
->stop_update_lock
);
967 mutex_init(&vb
->balloon_lock
);
968 init_waitqueue_head(&vb
->acked
);
971 balloon_devinfo_init(&vb
->vb_dev_info
);
977 #ifdef CONFIG_BALLOON_COMPACTION
978 vb
->vb_dev_info
.migratepage
= virtballoon_migratepage
;
980 if (virtio_has_feature(vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
)) {
982 * There is always one entry reserved for cmd id, so the ring
983 * size needs to be at least two to report free page hints.
985 if (virtqueue_get_vring_size(vb
->free_page_vq
) < 2) {
989 vb
->balloon_wq
= alloc_workqueue("balloon-wq",
990 WQ_FREEZABLE
| WQ_CPU_INTENSIVE
, 0);
991 if (!vb
->balloon_wq
) {
995 INIT_WORK(&vb
->report_free_page_work
, report_free_page_func
);
996 vb
->cmd_id_received_cache
= VIRTIO_BALLOON_CMD_ID_STOP
;
997 vb
->cmd_id_active
= cpu_to_virtio32(vb
->vdev
,
998 VIRTIO_BALLOON_CMD_ID_STOP
);
999 vb
->cmd_id_stop
= cpu_to_virtio32(vb
->vdev
,
1000 VIRTIO_BALLOON_CMD_ID_STOP
);
1001 spin_lock_init(&vb
->free_page_list_lock
);
1002 INIT_LIST_HEAD(&vb
->free_page_list
);
1004 * We're allowed to reuse any free pages, even if they are
1005 * still to be processed by the host.
1007 err
= virtio_balloon_register_shrinker(vb
);
1009 goto out_del_balloon_wq
;
1012 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_DEFLATE_ON_OOM
)) {
1013 vb
->oom_nb
.notifier_call
= virtio_balloon_oom_notify
;
1014 vb
->oom_nb
.priority
= VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY
;
1015 err
= register_oom_notifier(&vb
->oom_nb
);
1017 goto out_unregister_shrinker
;
1020 if (virtio_has_feature(vdev
, VIRTIO_BALLOON_F_PAGE_POISON
)) {
1021 /* Start with poison val of 0 representing general init */
1022 __u32 poison_val
= 0;
1025 * Let the hypervisor know that we are expecting a
1026 * specific value to be written back in balloon pages.
1028 * If the PAGE_POISON value was larger than a byte we would
1029 * need to byte swap poison_val here to guarantee it is
1030 * little-endian. However for now it is a single byte so we
1031 * can pass it as-is.
1033 if (!want_init_on_free())
1034 memset(&poison_val
, PAGE_POISON
, sizeof(poison_val
));
1036 virtio_cwrite_le(vb
->vdev
, struct virtio_balloon_config
,
1037 poison_val
, &poison_val
);
1040 vb
->pr_dev_info
.report
= virtballoon_free_page_report
;
1041 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_REPORTING
)) {
1042 unsigned int capacity
;
1044 capacity
= virtqueue_get_vring_size(vb
->reporting_vq
);
1045 if (capacity
< PAGE_REPORTING_CAPACITY
) {
1047 goto out_unregister_oom
;
1051 * The default page reporting order is @pageblock_order, which
1052 * corresponds to 512MB in size on ARM64 when 64KB base page
1053 * size is used. The page reporting won't be triggered if the
1054 * freeing page can't come up with a free area like that huge.
1055 * So we specify the page reporting order to 5, corresponding
1056 * to 2MB. It helps to avoid THP splitting if 4KB base page
1057 * size is used by host.
1059 * Ideally, the page reporting order is selected based on the
1060 * host's base page size. However, it needs more work to report
1061 * that value. The hard-coded order would be fine currently.
1063 #if defined(CONFIG_ARM64) && defined(CONFIG_ARM64_64K_PAGES)
1064 vb
->pr_dev_info
.order
= 5;
1067 err
= page_reporting_register(&vb
->pr_dev_info
);
1069 goto out_unregister_oom
;
1072 spin_lock_init(&vb
->wakeup_lock
);
1075 * The virtio balloon itself can't wake up the device, but it is
1076 * responsible for processing wakeup events passed up from the transport
1077 * layer. Wakeup sources don't support nesting/chaining calls, so we use
1078 * our own wakeup source to ensure wakeup events are properly handled
1079 * without trampling on the transport layer's wakeup source.
1081 device_set_wakeup_capable(&vb
->vdev
->dev
, true);
1083 virtio_device_ready(vdev
);
1085 if (towards_target(vb
))
1086 virtballoon_changed(vdev
);
1090 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_DEFLATE_ON_OOM
))
1091 unregister_oom_notifier(&vb
->oom_nb
);
1092 out_unregister_shrinker
:
1093 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
1094 virtio_balloon_unregister_shrinker(vb
);
1096 if (virtio_has_feature(vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
1097 destroy_workqueue(vb
->balloon_wq
);
1099 vdev
->config
->del_vqs(vdev
);
1106 static void remove_common(struct virtio_balloon
*vb
)
1108 /* There might be pages left in the balloon: free them. */
1109 while (vb
->num_pages
)
1110 leak_balloon(vb
, vb
->num_pages
);
1111 update_balloon_size(vb
);
1113 /* There might be free pages that are being reported: release them. */
1114 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
1115 return_free_pages_to_mm(vb
, ULONG_MAX
);
1117 /* Now we reset the device so we can clean up the queues. */
1118 virtio_reset_device(vb
->vdev
);
1120 vb
->vdev
->config
->del_vqs(vb
->vdev
);
1123 static void virtballoon_remove(struct virtio_device
*vdev
)
1125 struct virtio_balloon
*vb
= vdev
->priv
;
1127 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_REPORTING
))
1128 page_reporting_unregister(&vb
->pr_dev_info
);
1129 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_DEFLATE_ON_OOM
))
1130 unregister_oom_notifier(&vb
->oom_nb
);
1131 if (virtio_has_feature(vb
->vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
))
1132 virtio_balloon_unregister_shrinker(vb
);
1133 spin_lock_irq(&vb
->stop_update_lock
);
1134 vb
->stop_update
= true;
1135 spin_unlock_irq(&vb
->stop_update_lock
);
1136 cancel_work_sync(&vb
->update_balloon_size_work
);
1137 cancel_work_sync(&vb
->update_balloon_stats_work
);
1139 if (virtio_has_feature(vdev
, VIRTIO_BALLOON_F_FREE_PAGE_HINT
)) {
1140 cancel_work_sync(&vb
->report_free_page_work
);
1141 destroy_workqueue(vb
->balloon_wq
);
1148 #ifdef CONFIG_PM_SLEEP
1149 static int virtballoon_freeze(struct virtio_device
*vdev
)
1151 struct virtio_balloon
*vb
= vdev
->priv
;
1154 * The workqueue is already frozen by the PM core before this
1155 * function is called.
1161 static int virtballoon_restore(struct virtio_device
*vdev
)
1163 struct virtio_balloon
*vb
= vdev
->priv
;
1166 ret
= init_vqs(vdev
->priv
);
1170 virtio_device_ready(vdev
);
1172 if (towards_target(vb
))
1173 virtballoon_changed(vdev
);
1174 update_balloon_size(vb
);
1179 static int virtballoon_validate(struct virtio_device
*vdev
)
1182 * Inform the hypervisor that our pages are poisoned or
1183 * initialized. If we cannot do that then we should disable
1184 * page reporting as it could potentially change the contents
1185 * of our free pages.
1187 if (!want_init_on_free() && !page_poisoning_enabled_static())
1188 __virtio_clear_bit(vdev
, VIRTIO_BALLOON_F_PAGE_POISON
);
1189 else if (!virtio_has_feature(vdev
, VIRTIO_BALLOON_F_PAGE_POISON
))
1190 __virtio_clear_bit(vdev
, VIRTIO_BALLOON_F_REPORTING
);
1192 __virtio_clear_bit(vdev
, VIRTIO_F_ACCESS_PLATFORM
);
1196 static unsigned int features
[] = {
1197 VIRTIO_BALLOON_F_MUST_TELL_HOST
,
1198 VIRTIO_BALLOON_F_STATS_VQ
,
1199 VIRTIO_BALLOON_F_DEFLATE_ON_OOM
,
1200 VIRTIO_BALLOON_F_FREE_PAGE_HINT
,
1201 VIRTIO_BALLOON_F_PAGE_POISON
,
1202 VIRTIO_BALLOON_F_REPORTING
,
1205 static struct virtio_driver virtio_balloon_driver
= {
1206 .feature_table
= features
,
1207 .feature_table_size
= ARRAY_SIZE(features
),
1208 .driver
.name
= KBUILD_MODNAME
,
1209 .id_table
= id_table
,
1210 .validate
= virtballoon_validate
,
1211 .probe
= virtballoon_probe
,
1212 .remove
= virtballoon_remove
,
1213 .config_changed
= virtballoon_changed
,
1214 #ifdef CONFIG_PM_SLEEP
1215 .freeze
= virtballoon_freeze
,
1216 .restore
= virtballoon_restore
,
1220 module_virtio_driver(virtio_balloon_driver
);
1221 MODULE_DEVICE_TABLE(virtio
, id_table
);
1222 MODULE_DESCRIPTION("Virtio balloon driver");
1223 MODULE_LICENSE("GPL");