1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
8 #include <linux/memcontrol.h>
9 #include <linux/mm_inline.h>
10 #include <linux/writeback.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/atomic.h>
14 #include <linux/module.h>
15 #include <linux/swap.h>
16 #include <linux/dax.h>
23 * Per node, two clock lists are maintained for file pages: the
24 * inactive and the active list. Freshly faulted pages start out at
25 * the head of the inactive list and page reclaim scans pages from the
26 * tail. Pages that are accessed multiple times on the inactive list
27 * are promoted to the active list, to protect them from reclaim,
28 * whereas active pages are demoted to the inactive list when the
29 * active list grows too big.
31 * fault ------------------------+
33 * +--------------+ | +-------------+
34 * reclaim <- | inactive | <-+-- demotion | active | <--+
35 * +--------------+ +-------------+ |
37 * +-------------- promotion ------------------+
40 * Access frequency and refault distance
42 * A workload is thrashing when its pages are frequently used but they
43 * are evicted from the inactive list every time before another access
44 * would have promoted them to the active list.
46 * In cases where the average access distance between thrashing pages
47 * is bigger than the size of memory there is nothing that can be
48 * done - the thrashing set could never fit into memory under any
51 * However, the average access distance could be bigger than the
52 * inactive list, yet smaller than the size of memory. In this case,
53 * the set could fit into memory if it weren't for the currently
54 * active pages - which may be used more, hopefully less frequently:
56 * +-memory available to cache-+
58 * +-inactive------+-active----+
59 * a b | c d e f g h i | J K L M N |
60 * +---------------+-----------+
62 * It is prohibitively expensive to accurately track access frequency
63 * of pages. But a reasonable approximation can be made to measure
64 * thrashing on the inactive list, after which refaulting pages can be
65 * activated optimistically to compete with the existing active pages.
67 * Approximating inactive page access frequency - Observations:
69 * 1. When a page is accessed for the first time, it is added to the
70 * head of the inactive list, slides every existing inactive page
71 * towards the tail by one slot, and pushes the current tail page
74 * 2. When a page is accessed for the second time, it is promoted to
75 * the active list, shrinking the inactive list by one slot. This
76 * also slides all inactive pages that were faulted into the cache
77 * more recently than the activated page towards the tail of the
82 * 1. The sum of evictions and activations between any two points in
83 * time indicate the minimum number of inactive pages accessed in
86 * 2. Moving one inactive page N page slots towards the tail of the
87 * list requires at least N inactive page accesses.
91 * 1. When a page is finally evicted from memory, the number of
92 * inactive pages accessed while the page was in cache is at least
93 * the number of page slots on the inactive list.
95 * 2. In addition, measuring the sum of evictions and activations (E)
96 * at the time of a page's eviction, and comparing it to another
97 * reading (R) at the time the page faults back into memory tells
98 * the minimum number of accesses while the page was not cached.
99 * This is called the refault distance.
101 * Because the first access of the page was the fault and the second
102 * access the refault, we combine the in-cache distance with the
103 * out-of-cache distance to get the complete minimum access distance
106 * NR_inactive + (R - E)
108 * And knowing the minimum access distance of a page, we can easily
109 * tell if the page would be able to stay in cache assuming all page
110 * slots in the cache were available:
112 * NR_inactive + (R - E) <= NR_inactive + NR_active
114 * which can be further simplified to
116 * (R - E) <= NR_active
118 * Put into words, the refault distance (out-of-cache) can be seen as
119 * a deficit in inactive list space (in-cache). If the inactive list
120 * had (R - E) more page slots, the page would not have been evicted
121 * in between accesses, but activated instead. And on a full system,
122 * the only thing eating into inactive list space is active pages.
125 * Refaulting inactive pages
127 * All that is known about the active list is that the pages have been
128 * accessed more than once in the past. This means that at any given
129 * time there is actually a good chance that pages on the active list
130 * are no longer in active use.
132 * So when a refault distance of (R - E) is observed and there are at
133 * least (R - E) active pages, the refaulting page is activated
134 * optimistically in the hope that (R - E) active pages are actually
135 * used less frequently than the refaulting page - or even not used at
138 * That means if inactive cache is refaulting with a suitable refault
139 * distance, we assume the cache workingset is transitioning and put
140 * pressure on the current active list.
142 * If this is wrong and demotion kicks in, the pages which are truly
143 * used more frequently will be reactivated while the less frequently
144 * used once will be evicted from memory.
146 * But if this is right, the stale pages will be pushed out of memory
147 * and the used pages get to stay in cache.
149 * Refaulting active pages
151 * If on the other hand the refaulting pages have recently been
152 * deactivated, it means that the active list is no longer protecting
153 * actively used cache from reclaim. The cache is NOT transitioning to
154 * a different workingset; the existing workingset is thrashing in the
155 * space allocated to the page cache.
160 * For each node's LRU lists, a counter for inactive evictions and
161 * activations is maintained (node->nonresident_age).
163 * On eviction, a snapshot of this counter (along with some bits to
164 * identify the node) is stored in the now empty page cache
165 * slot of the evicted page. This is called a shadow entry.
167 * On cache misses for which there are shadow entries, an eligible
168 * refault distance will immediately activate the refaulting page.
171 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
172 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
173 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
176 * Eviction timestamps need to be able to cover the full range of
177 * actionable refaults. However, bits are tight in the xarray
178 * entry, and after storing the identifier for the lruvec there might
179 * not be enough left to represent every single actionable refault. In
180 * that case, we have to sacrifice granularity for distance, and group
181 * evictions into coarser buckets by shaving off lower timestamp bits.
183 static unsigned int bucket_order __read_mostly
;
185 static void *pack_shadow(int memcgid
, pg_data_t
*pgdat
, unsigned long eviction
,
188 eviction
>>= bucket_order
;
189 eviction
&= EVICTION_MASK
;
190 eviction
= (eviction
<< MEM_CGROUP_ID_SHIFT
) | memcgid
;
191 eviction
= (eviction
<< NODES_SHIFT
) | pgdat
->node_id
;
192 eviction
= (eviction
<< 1) | workingset
;
194 return xa_mk_value(eviction
);
197 static void unpack_shadow(void *shadow
, int *memcgidp
, pg_data_t
**pgdat
,
198 unsigned long *evictionp
, bool *workingsetp
)
200 unsigned long entry
= xa_to_value(shadow
);
204 workingset
= entry
& 1;
206 nid
= entry
& ((1UL << NODES_SHIFT
) - 1);
207 entry
>>= NODES_SHIFT
;
208 memcgid
= entry
& ((1UL << MEM_CGROUP_ID_SHIFT
) - 1);
209 entry
>>= MEM_CGROUP_ID_SHIFT
;
212 *pgdat
= NODE_DATA(nid
);
213 *evictionp
= entry
<< bucket_order
;
214 *workingsetp
= workingset
;
218 * workingset_age_nonresident - age non-resident entries as LRU ages
219 * @lruvec: the lruvec that was aged
220 * @nr_pages: the number of pages to count
222 * As in-memory pages are aged, non-resident pages need to be aged as
223 * well, in order for the refault distances later on to be comparable
224 * to the in-memory dimensions. This function allows reclaim and LRU
225 * operations to drive the non-resident aging along in parallel.
227 void workingset_age_nonresident(struct lruvec
*lruvec
, unsigned long nr_pages
)
230 * Reclaiming a cgroup means reclaiming all its children in a
231 * round-robin fashion. That means that each cgroup has an LRU
232 * order that is composed of the LRU orders of its child
233 * cgroups; and every page has an LRU position not just in the
234 * cgroup that owns it, but in all of that group's ancestors.
236 * So when the physical inactive list of a leaf cgroup ages,
237 * the virtual inactive lists of all its parents, including
238 * the root cgroup's, age as well.
241 atomic_long_add(nr_pages
, &lruvec
->nonresident_age
);
242 } while ((lruvec
= parent_lruvec(lruvec
)));
246 * workingset_eviction - note the eviction of a page from memory
247 * @target_memcg: the cgroup that is causing the reclaim
248 * @page: the page being evicted
250 * Returns a shadow entry to be stored in @page->mapping->i_pages in place
251 * of the evicted @page so that a later refault can be detected.
253 void *workingset_eviction(struct page
*page
, struct mem_cgroup
*target_memcg
)
255 struct pglist_data
*pgdat
= page_pgdat(page
);
256 unsigned long eviction
;
257 struct lruvec
*lruvec
;
260 /* Page is fully exclusive and pins page's memory cgroup pointer */
261 VM_BUG_ON_PAGE(PageLRU(page
), page
);
262 VM_BUG_ON_PAGE(page_count(page
), page
);
263 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
265 lruvec
= mem_cgroup_lruvec(target_memcg
, pgdat
);
266 workingset_age_nonresident(lruvec
, thp_nr_pages(page
));
267 /* XXX: target_memcg can be NULL, go through lruvec */
268 memcgid
= mem_cgroup_id(lruvec_memcg(lruvec
));
269 eviction
= atomic_long_read(&lruvec
->nonresident_age
);
270 return pack_shadow(memcgid
, pgdat
, eviction
, PageWorkingset(page
));
274 * workingset_refault - evaluate the refault of a previously evicted page
275 * @page: the freshly allocated replacement page
276 * @shadow: shadow entry of the evicted page
278 * Calculates and evaluates the refault distance of the previously
279 * evicted page in the context of the node and the memcg whose memory
280 * pressure caused the eviction.
282 void workingset_refault(struct page
*page
, void *shadow
)
284 bool file
= page_is_file_lru(page
);
285 struct mem_cgroup
*eviction_memcg
;
286 struct lruvec
*eviction_lruvec
;
287 unsigned long refault_distance
;
288 unsigned long workingset_size
;
289 struct pglist_data
*pgdat
;
290 struct mem_cgroup
*memcg
;
291 unsigned long eviction
;
292 struct lruvec
*lruvec
;
293 unsigned long refault
;
297 unpack_shadow(shadow
, &memcgid
, &pgdat
, &eviction
, &workingset
);
301 * Look up the memcg associated with the stored ID. It might
302 * have been deleted since the page's eviction.
304 * Note that in rare events the ID could have been recycled
305 * for a new cgroup that refaults a shared page. This is
306 * impossible to tell from the available data. However, this
307 * should be a rare and limited disturbance, and activations
308 * are always speculative anyway. Ultimately, it's the aging
309 * algorithm's job to shake out the minimum access frequency
310 * for the active cache.
312 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
313 * would be better if the root_mem_cgroup existed in all
314 * configurations instead.
316 eviction_memcg
= mem_cgroup_from_id(memcgid
);
317 if (!mem_cgroup_disabled() && !eviction_memcg
)
319 eviction_lruvec
= mem_cgroup_lruvec(eviction_memcg
, pgdat
);
320 refault
= atomic_long_read(&eviction_lruvec
->nonresident_age
);
323 * Calculate the refault distance
325 * The unsigned subtraction here gives an accurate distance
326 * across nonresident_age overflows in most cases. There is a
327 * special case: usually, shadow entries have a short lifetime
328 * and are either refaulted or reclaimed along with the inode
329 * before they get too old. But it is not impossible for the
330 * nonresident_age to lap a shadow entry in the field, which
331 * can then result in a false small refault distance, leading
332 * to a false activation should this old entry actually
333 * refault again. However, earlier kernels used to deactivate
334 * unconditionally with *every* reclaim invocation for the
335 * longest time, so the occasional inappropriate activation
336 * leading to pressure on the active list is not a problem.
338 refault_distance
= (refault
- eviction
) & EVICTION_MASK
;
341 * The activation decision for this page is made at the level
342 * where the eviction occurred, as that is where the LRU order
343 * during page reclaim is being determined.
345 * However, the cgroup that will own the page is the one that
346 * is actually experiencing the refault event.
348 memcg
= page_memcg(page
);
349 lruvec
= mem_cgroup_lruvec(memcg
, pgdat
);
351 inc_lruvec_state(lruvec
, WORKINGSET_REFAULT_BASE
+ file
);
354 * Compare the distance to the existing workingset size. We
355 * don't activate pages that couldn't stay resident even if
356 * all the memory was available to the workingset. Whether
357 * workingset competition needs to consider anon or not depends
360 workingset_size
= lruvec_page_state(eviction_lruvec
, NR_ACTIVE_FILE
);
362 workingset_size
+= lruvec_page_state(eviction_lruvec
,
365 if (mem_cgroup_get_nr_swap_pages(memcg
) > 0) {
366 workingset_size
+= lruvec_page_state(eviction_lruvec
,
369 workingset_size
+= lruvec_page_state(eviction_lruvec
,
373 if (refault_distance
> workingset_size
)
377 workingset_age_nonresident(lruvec
, thp_nr_pages(page
));
378 inc_lruvec_state(lruvec
, WORKINGSET_ACTIVATE_BASE
+ file
);
380 /* Page was active prior to eviction */
382 SetPageWorkingset(page
);
383 /* XXX: Move to lru_cache_add() when it supports new vs putback */
384 lru_note_cost_page(page
);
385 inc_lruvec_state(lruvec
, WORKINGSET_RESTORE_BASE
+ file
);
392 * workingset_activation - note a page activation
393 * @page: page that is being activated
395 void workingset_activation(struct page
*page
)
397 struct mem_cgroup
*memcg
;
398 struct lruvec
*lruvec
;
402 * Filter non-memcg pages here, e.g. unmap can call
403 * mark_page_accessed() on VDSO pages.
405 * XXX: See workingset_refault() - this should return
406 * root_mem_cgroup even for !CONFIG_MEMCG.
408 memcg
= page_memcg_rcu(page
);
409 if (!mem_cgroup_disabled() && !memcg
)
411 lruvec
= mem_cgroup_page_lruvec(page
, page_pgdat(page
));
412 workingset_age_nonresident(lruvec
, thp_nr_pages(page
));
418 * Shadow entries reflect the share of the working set that does not
419 * fit into memory, so their number depends on the access pattern of
420 * the workload. In most cases, they will refault or get reclaimed
421 * along with the inode, but a (malicious) workload that streams
422 * through files with a total size several times that of available
423 * memory, while preventing the inodes from being reclaimed, can
424 * create excessive amounts of shadow nodes. To keep a lid on this,
425 * track shadow nodes and reclaim them when they grow way past the
426 * point where they would still be useful.
429 static struct list_lru shadow_nodes
;
431 void workingset_update_node(struct xa_node
*node
)
434 * Track non-empty nodes that contain only shadow entries;
435 * unlink those that contain pages or are being freed.
437 * Avoid acquiring the list_lru lock when the nodes are
438 * already where they should be. The list_empty() test is safe
439 * as node->private_list is protected by the i_pages lock.
441 VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
443 if (node
->count
&& node
->count
== node
->nr_values
) {
444 if (list_empty(&node
->private_list
)) {
445 list_lru_add(&shadow_nodes
, &node
->private_list
);
446 __inc_lruvec_kmem_state(node
, WORKINGSET_NODES
);
449 if (!list_empty(&node
->private_list
)) {
450 list_lru_del(&shadow_nodes
, &node
->private_list
);
451 __dec_lruvec_kmem_state(node
, WORKINGSET_NODES
);
456 static unsigned long count_shadow_nodes(struct shrinker
*shrinker
,
457 struct shrink_control
*sc
)
459 unsigned long max_nodes
;
463 nodes
= list_lru_shrink_count(&shadow_nodes
, sc
);
466 * Approximate a reasonable limit for the nodes
467 * containing shadow entries. We don't need to keep more
468 * shadow entries than possible pages on the active list,
469 * since refault distances bigger than that are dismissed.
471 * The size of the active list converges toward 100% of
472 * overall page cache as memory grows, with only a tiny
473 * inactive list. Assume the total cache size for that.
475 * Nodes might be sparsely populated, with only one shadow
476 * entry in the extreme case. Obviously, we cannot keep one
477 * node for every eligible shadow entry, so compromise on a
478 * worst-case density of 1/8th. Below that, not all eligible
479 * refaults can be detected anymore.
481 * On 64-bit with 7 xa_nodes per page and 64 slots
482 * each, this will reclaim shadow entries when they consume
483 * ~1.8% of available memory:
485 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
489 struct lruvec
*lruvec
;
492 lruvec
= mem_cgroup_lruvec(sc
->memcg
, NODE_DATA(sc
->nid
));
493 for (pages
= 0, i
= 0; i
< NR_LRU_LISTS
; i
++)
494 pages
+= lruvec_page_state_local(lruvec
,
496 pages
+= lruvec_page_state_local(
497 lruvec
, NR_SLAB_RECLAIMABLE_B
) >> PAGE_SHIFT
;
498 pages
+= lruvec_page_state_local(
499 lruvec
, NR_SLAB_UNRECLAIMABLE_B
) >> PAGE_SHIFT
;
502 pages
= node_present_pages(sc
->nid
);
504 max_nodes
= pages
>> (XA_CHUNK_SHIFT
- 3);
509 if (nodes
<= max_nodes
)
511 return nodes
- max_nodes
;
514 static enum lru_status
shadow_lru_isolate(struct list_head
*item
,
515 struct list_lru_one
*lru
,
516 spinlock_t
*lru_lock
,
517 void *arg
) __must_hold(lru_lock
)
519 struct xa_node
*node
= container_of(item
, struct xa_node
, private_list
);
520 struct address_space
*mapping
;
524 * Page cache insertions and deletions synchronously maintain
525 * the shadow node LRU under the i_pages lock and the
526 * lru_lock. Because the page cache tree is emptied before
527 * the inode can be destroyed, holding the lru_lock pins any
528 * address_space that has nodes on the LRU.
530 * We can then safely transition to the i_pages lock to
531 * pin only the address_space of the particular node we want
532 * to reclaim, take the node off-LRU, and drop the lru_lock.
535 mapping
= container_of(node
->array
, struct address_space
, i_pages
);
537 /* Coming from the list, invert the lock order */
538 if (!xa_trylock(&mapping
->i_pages
)) {
539 spin_unlock_irq(lru_lock
);
544 list_lru_isolate(lru
, item
);
545 __dec_lruvec_kmem_state(node
, WORKINGSET_NODES
);
547 spin_unlock(lru_lock
);
550 * The nodes should only contain one or more shadow entries,
551 * no pages, so we expect to be able to remove them all and
552 * delete and free the empty node afterwards.
554 if (WARN_ON_ONCE(!node
->nr_values
))
556 if (WARN_ON_ONCE(node
->count
!= node
->nr_values
))
558 mapping
->nrexceptional
-= node
->nr_values
;
559 xa_delete_node(node
, workingset_update_node
);
560 __inc_lruvec_kmem_state(node
, WORKINGSET_NODERECLAIM
);
563 xa_unlock_irq(&mapping
->i_pages
);
564 ret
= LRU_REMOVED_RETRY
;
567 spin_lock_irq(lru_lock
);
571 static unsigned long scan_shadow_nodes(struct shrinker
*shrinker
,
572 struct shrink_control
*sc
)
574 /* list_lru lock nests inside the IRQ-safe i_pages lock */
575 return list_lru_shrink_walk_irq(&shadow_nodes
, sc
, shadow_lru_isolate
,
579 static struct shrinker workingset_shadow_shrinker
= {
580 .count_objects
= count_shadow_nodes
,
581 .scan_objects
= scan_shadow_nodes
,
582 .seeks
= 0, /* ->count reports only fully expendable nodes */
583 .flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
,
587 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
590 static struct lock_class_key shadow_nodes_key
;
592 static int __init
workingset_init(void)
594 unsigned int timestamp_bits
;
595 unsigned int max_order
;
598 BUILD_BUG_ON(BITS_PER_LONG
< EVICTION_SHIFT
);
600 * Calculate the eviction bucket size to cover the longest
601 * actionable refault distance, which is currently half of
602 * memory (totalram_pages/2). However, memory hotplug may add
603 * some more pages at runtime, so keep working with up to
604 * double the initial memory by using totalram_pages as-is.
606 timestamp_bits
= BITS_PER_LONG
- EVICTION_SHIFT
;
607 max_order
= fls_long(totalram_pages() - 1);
608 if (max_order
> timestamp_bits
)
609 bucket_order
= max_order
- timestamp_bits
;
610 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
611 timestamp_bits
, max_order
, bucket_order
);
613 ret
= prealloc_shrinker(&workingset_shadow_shrinker
);
616 ret
= __list_lru_init(&shadow_nodes
, true, &shadow_nodes_key
,
617 &workingset_shadow_shrinker
);
620 register_shrinker_prepared(&workingset_shadow_shrinker
);
623 free_prealloced_shrinker(&workingset_shadow_shrinker
);
627 module_init(workingset_init
);