Merge tag 'rtc-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux/fpc-iii.git] / mm / workingset.c
blobdcb994f2acc2e600f39e2338d16f7bd839aefd2d
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Workingset detection
5 * Copyright (C) 2013 Red Hat, Inc., Johannes Weiner
6 */
8 #include <linux/memcontrol.h>
9 #include <linux/writeback.h>
10 #include <linux/shmem_fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/atomic.h>
13 #include <linux/module.h>
14 #include <linux/swap.h>
15 #include <linux/dax.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
20 * Double CLOCK lists
22 * Per node, two clock lists are maintained for file pages: the
23 * inactive and the active list. Freshly faulted pages start out at
24 * the head of the inactive list and page reclaim scans pages from the
25 * tail. Pages that are accessed multiple times on the inactive list
26 * are promoted to the active list, to protect them from reclaim,
27 * whereas active pages are demoted to the inactive list when the
28 * active list grows too big.
30 * fault ------------------------+
31 * |
32 * +--------------+ | +-------------+
33 * reclaim <- | inactive | <-+-- demotion | active | <--+
34 * +--------------+ +-------------+ |
35 * | |
36 * +-------------- promotion ------------------+
39 * Access frequency and refault distance
41 * A workload is thrashing when its pages are frequently used but they
42 * are evicted from the inactive list every time before another access
43 * would have promoted them to the active list.
45 * In cases where the average access distance between thrashing pages
46 * is bigger than the size of memory there is nothing that can be
47 * done - the thrashing set could never fit into memory under any
48 * circumstance.
50 * However, the average access distance could be bigger than the
51 * inactive list, yet smaller than the size of memory. In this case,
52 * the set could fit into memory if it weren't for the currently
53 * active pages - which may be used more, hopefully less frequently:
55 * +-memory available to cache-+
56 * | |
57 * +-inactive------+-active----+
58 * a b | c d e f g h i | J K L M N |
59 * +---------------+-----------+
61 * It is prohibitively expensive to accurately track access frequency
62 * of pages. But a reasonable approximation can be made to measure
63 * thrashing on the inactive list, after which refaulting pages can be
64 * activated optimistically to compete with the existing active pages.
66 * Approximating inactive page access frequency - Observations:
68 * 1. When a page is accessed for the first time, it is added to the
69 * head of the inactive list, slides every existing inactive page
70 * towards the tail by one slot, and pushes the current tail page
71 * out of memory.
73 * 2. When a page is accessed for the second time, it is promoted to
74 * the active list, shrinking the inactive list by one slot. This
75 * also slides all inactive pages that were faulted into the cache
76 * more recently than the activated page towards the tail of the
77 * inactive list.
79 * Thus:
81 * 1. The sum of evictions and activations between any two points in
82 * time indicate the minimum number of inactive pages accessed in
83 * between.
85 * 2. Moving one inactive page N page slots towards the tail of the
86 * list requires at least N inactive page accesses.
88 * Combining these:
90 * 1. When a page is finally evicted from memory, the number of
91 * inactive pages accessed while the page was in cache is at least
92 * the number of page slots on the inactive list.
94 * 2. In addition, measuring the sum of evictions and activations (E)
95 * at the time of a page's eviction, and comparing it to another
96 * reading (R) at the time the page faults back into memory tells
97 * the minimum number of accesses while the page was not cached.
98 * This is called the refault distance.
100 * Because the first access of the page was the fault and the second
101 * access the refault, we combine the in-cache distance with the
102 * out-of-cache distance to get the complete minimum access distance
103 * of this page:
105 * NR_inactive + (R - E)
107 * And knowing the minimum access distance of a page, we can easily
108 * tell if the page would be able to stay in cache assuming all page
109 * slots in the cache were available:
111 * NR_inactive + (R - E) <= NR_inactive + NR_active
113 * which can be further simplified to
115 * (R - E) <= NR_active
117 * Put into words, the refault distance (out-of-cache) can be seen as
118 * a deficit in inactive list space (in-cache). If the inactive list
119 * had (R - E) more page slots, the page would not have been evicted
120 * in between accesses, but activated instead. And on a full system,
121 * the only thing eating into inactive list space is active pages.
124 * Refaulting inactive pages
126 * All that is known about the active list is that the pages have been
127 * accessed more than once in the past. This means that at any given
128 * time there is actually a good chance that pages on the active list
129 * are no longer in active use.
131 * So when a refault distance of (R - E) is observed and there are at
132 * least (R - E) active pages, the refaulting page is activated
133 * optimistically in the hope that (R - E) active pages are actually
134 * used less frequently than the refaulting page - or even not used at
135 * all anymore.
137 * That means if inactive cache is refaulting with a suitable refault
138 * distance, we assume the cache workingset is transitioning and put
139 * pressure on the current active list.
141 * If this is wrong and demotion kicks in, the pages which are truly
142 * used more frequently will be reactivated while the less frequently
143 * used once will be evicted from memory.
145 * But if this is right, the stale pages will be pushed out of memory
146 * and the used pages get to stay in cache.
148 * Refaulting active pages
150 * If on the other hand the refaulting pages have recently been
151 * deactivated, it means that the active list is no longer protecting
152 * actively used cache from reclaim. The cache is NOT transitioning to
153 * a different workingset; the existing workingset is thrashing in the
154 * space allocated to the page cache.
157 * Implementation
159 * For each node's file LRU lists, a counter for inactive evictions
160 * and activations is maintained (node->inactive_age).
162 * On eviction, a snapshot of this counter (along with some bits to
163 * identify the node) is stored in the now empty page cache
164 * slot of the evicted page. This is called a shadow entry.
166 * On cache misses for which there are shadow entries, an eligible
167 * refault distance will immediately activate the refaulting page.
170 #define EVICTION_SHIFT ((BITS_PER_LONG - BITS_PER_XA_VALUE) + \
171 1 + NODES_SHIFT + MEM_CGROUP_ID_SHIFT)
172 #define EVICTION_MASK (~0UL >> EVICTION_SHIFT)
175 * Eviction timestamps need to be able to cover the full range of
176 * actionable refaults. However, bits are tight in the xarray
177 * entry, and after storing the identifier for the lruvec there might
178 * not be enough left to represent every single actionable refault. In
179 * that case, we have to sacrifice granularity for distance, and group
180 * evictions into coarser buckets by shaving off lower timestamp bits.
182 static unsigned int bucket_order __read_mostly;
184 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
185 bool workingset)
187 eviction >>= bucket_order;
188 eviction &= EVICTION_MASK;
189 eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
190 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
191 eviction = (eviction << 1) | workingset;
193 return xa_mk_value(eviction);
196 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
197 unsigned long *evictionp, bool *workingsetp)
199 unsigned long entry = xa_to_value(shadow);
200 int memcgid, nid;
201 bool workingset;
203 workingset = entry & 1;
204 entry >>= 1;
205 nid = entry & ((1UL << NODES_SHIFT) - 1);
206 entry >>= NODES_SHIFT;
207 memcgid = entry & ((1UL << MEM_CGROUP_ID_SHIFT) - 1);
208 entry >>= MEM_CGROUP_ID_SHIFT;
210 *memcgidp = memcgid;
211 *pgdat = NODE_DATA(nid);
212 *evictionp = entry << bucket_order;
213 *workingsetp = workingset;
217 * workingset_eviction - note the eviction of a page from memory
218 * @mapping: address space the page was backing
219 * @page: the page being evicted
221 * Returns a shadow entry to be stored in @mapping->i_pages in place
222 * of the evicted @page so that a later refault can be detected.
224 void *workingset_eviction(struct address_space *mapping, struct page *page)
226 struct pglist_data *pgdat = page_pgdat(page);
227 struct mem_cgroup *memcg = page_memcg(page);
228 int memcgid = mem_cgroup_id(memcg);
229 unsigned long eviction;
230 struct lruvec *lruvec;
232 /* Page is fully exclusive and pins page->mem_cgroup */
233 VM_BUG_ON_PAGE(PageLRU(page), page);
234 VM_BUG_ON_PAGE(page_count(page), page);
235 VM_BUG_ON_PAGE(!PageLocked(page), page);
237 lruvec = mem_cgroup_lruvec(pgdat, memcg);
238 eviction = atomic_long_inc_return(&lruvec->inactive_age);
239 return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
243 * workingset_refault - evaluate the refault of a previously evicted page
244 * @page: the freshly allocated replacement page
245 * @shadow: shadow entry of the evicted page
247 * Calculates and evaluates the refault distance of the previously
248 * evicted page in the context of the node it was allocated in.
250 void workingset_refault(struct page *page, void *shadow)
252 unsigned long refault_distance;
253 struct pglist_data *pgdat;
254 unsigned long active_file;
255 struct mem_cgroup *memcg;
256 unsigned long eviction;
257 struct lruvec *lruvec;
258 unsigned long refault;
259 bool workingset;
260 int memcgid;
262 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
264 rcu_read_lock();
266 * Look up the memcg associated with the stored ID. It might
267 * have been deleted since the page's eviction.
269 * Note that in rare events the ID could have been recycled
270 * for a new cgroup that refaults a shared page. This is
271 * impossible to tell from the available data. However, this
272 * should be a rare and limited disturbance, and activations
273 * are always speculative anyway. Ultimately, it's the aging
274 * algorithm's job to shake out the minimum access frequency
275 * for the active cache.
277 * XXX: On !CONFIG_MEMCG, this will always return NULL; it
278 * would be better if the root_mem_cgroup existed in all
279 * configurations instead.
281 memcg = mem_cgroup_from_id(memcgid);
282 if (!mem_cgroup_disabled() && !memcg)
283 goto out;
284 lruvec = mem_cgroup_lruvec(pgdat, memcg);
285 refault = atomic_long_read(&lruvec->inactive_age);
286 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES);
289 * Calculate the refault distance
291 * The unsigned subtraction here gives an accurate distance
292 * across inactive_age overflows in most cases. There is a
293 * special case: usually, shadow entries have a short lifetime
294 * and are either refaulted or reclaimed along with the inode
295 * before they get too old. But it is not impossible for the
296 * inactive_age to lap a shadow entry in the field, which can
297 * then result in a false small refault distance, leading to a
298 * false activation should this old entry actually refault
299 * again. However, earlier kernels used to deactivate
300 * unconditionally with *every* reclaim invocation for the
301 * longest time, so the occasional inappropriate activation
302 * leading to pressure on the active list is not a problem.
304 refault_distance = (refault - eviction) & EVICTION_MASK;
306 inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
309 * Compare the distance to the existing workingset size. We
310 * don't act on pages that couldn't stay resident even if all
311 * the memory was available to the page cache.
313 if (refault_distance > active_file)
314 goto out;
316 SetPageActive(page);
317 atomic_long_inc(&lruvec->inactive_age);
318 inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
320 /* Page was active prior to eviction */
321 if (workingset) {
322 SetPageWorkingset(page);
323 inc_lruvec_state(lruvec, WORKINGSET_RESTORE);
325 out:
326 rcu_read_unlock();
330 * workingset_activation - note a page activation
331 * @page: page that is being activated
333 void workingset_activation(struct page *page)
335 struct mem_cgroup *memcg;
336 struct lruvec *lruvec;
338 rcu_read_lock();
340 * Filter non-memcg pages here, e.g. unmap can call
341 * mark_page_accessed() on VDSO pages.
343 * XXX: See workingset_refault() - this should return
344 * root_mem_cgroup even for !CONFIG_MEMCG.
346 memcg = page_memcg_rcu(page);
347 if (!mem_cgroup_disabled() && !memcg)
348 goto out;
349 lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
350 atomic_long_inc(&lruvec->inactive_age);
351 out:
352 rcu_read_unlock();
356 * Shadow entries reflect the share of the working set that does not
357 * fit into memory, so their number depends on the access pattern of
358 * the workload. In most cases, they will refault or get reclaimed
359 * along with the inode, but a (malicious) workload that streams
360 * through files with a total size several times that of available
361 * memory, while preventing the inodes from being reclaimed, can
362 * create excessive amounts of shadow nodes. To keep a lid on this,
363 * track shadow nodes and reclaim them when they grow way past the
364 * point where they would still be useful.
367 static struct list_lru shadow_nodes;
369 void workingset_update_node(struct xa_node *node)
372 * Track non-empty nodes that contain only shadow entries;
373 * unlink those that contain pages or are being freed.
375 * Avoid acquiring the list_lru lock when the nodes are
376 * already where they should be. The list_empty() test is safe
377 * as node->private_list is protected by the i_pages lock.
379 VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
381 if (node->count && node->count == node->nr_values) {
382 if (list_empty(&node->private_list)) {
383 list_lru_add(&shadow_nodes, &node->private_list);
384 __inc_lruvec_page_state(virt_to_page(node),
385 WORKINGSET_NODES);
387 } else {
388 if (!list_empty(&node->private_list)) {
389 list_lru_del(&shadow_nodes, &node->private_list);
390 __dec_lruvec_page_state(virt_to_page(node),
391 WORKINGSET_NODES);
396 static unsigned long count_shadow_nodes(struct shrinker *shrinker,
397 struct shrink_control *sc)
399 unsigned long max_nodes;
400 unsigned long nodes;
401 unsigned long pages;
403 nodes = list_lru_shrink_count(&shadow_nodes, sc);
406 * Approximate a reasonable limit for the nodes
407 * containing shadow entries. We don't need to keep more
408 * shadow entries than possible pages on the active list,
409 * since refault distances bigger than that are dismissed.
411 * The size of the active list converges toward 100% of
412 * overall page cache as memory grows, with only a tiny
413 * inactive list. Assume the total cache size for that.
415 * Nodes might be sparsely populated, with only one shadow
416 * entry in the extreme case. Obviously, we cannot keep one
417 * node for every eligible shadow entry, so compromise on a
418 * worst-case density of 1/8th. Below that, not all eligible
419 * refaults can be detected anymore.
421 * On 64-bit with 7 xa_nodes per page and 64 slots
422 * each, this will reclaim shadow entries when they consume
423 * ~1.8% of available memory:
425 * PAGE_SIZE / xa_nodes / node_entries * 8 / PAGE_SIZE
427 #ifdef CONFIG_MEMCG
428 if (sc->memcg) {
429 struct lruvec *lruvec;
431 pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
432 LRU_ALL);
433 lruvec = mem_cgroup_lruvec(NODE_DATA(sc->nid), sc->memcg);
434 pages += lruvec_page_state(lruvec, NR_SLAB_RECLAIMABLE);
435 pages += lruvec_page_state(lruvec, NR_SLAB_UNRECLAIMABLE);
436 } else
437 #endif
438 pages = node_present_pages(sc->nid);
440 max_nodes = pages >> (XA_CHUNK_SHIFT - 3);
442 if (!nodes)
443 return SHRINK_EMPTY;
445 if (nodes <= max_nodes)
446 return 0;
447 return nodes - max_nodes;
450 static enum lru_status shadow_lru_isolate(struct list_head *item,
451 struct list_lru_one *lru,
452 spinlock_t *lru_lock,
453 void *arg) __must_hold(lru_lock)
455 struct xa_node *node = container_of(item, struct xa_node, private_list);
456 XA_STATE(xas, node->array, 0);
457 struct address_space *mapping;
458 int ret;
461 * Page cache insertions and deletions synchroneously maintain
462 * the shadow node LRU under the i_pages lock and the
463 * lru_lock. Because the page cache tree is emptied before
464 * the inode can be destroyed, holding the lru_lock pins any
465 * address_space that has nodes on the LRU.
467 * We can then safely transition to the i_pages lock to
468 * pin only the address_space of the particular node we want
469 * to reclaim, take the node off-LRU, and drop the lru_lock.
472 mapping = container_of(node->array, struct address_space, i_pages);
474 /* Coming from the list, invert the lock order */
475 if (!xa_trylock(&mapping->i_pages)) {
476 spin_unlock_irq(lru_lock);
477 ret = LRU_RETRY;
478 goto out;
481 list_lru_isolate(lru, item);
482 __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES);
484 spin_unlock(lru_lock);
487 * The nodes should only contain one or more shadow entries,
488 * no pages, so we expect to be able to remove them all and
489 * delete and free the empty node afterwards.
491 if (WARN_ON_ONCE(!node->nr_values))
492 goto out_invalid;
493 if (WARN_ON_ONCE(node->count != node->nr_values))
494 goto out_invalid;
495 mapping->nrexceptional -= node->nr_values;
496 xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
497 xas.xa_offset = node->offset;
498 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
499 xas_set_update(&xas, workingset_update_node);
501 * We could store a shadow entry here which was the minimum of the
502 * shadow entries we were tracking ...
504 xas_store(&xas, NULL);
505 __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
507 out_invalid:
508 xa_unlock_irq(&mapping->i_pages);
509 ret = LRU_REMOVED_RETRY;
510 out:
511 cond_resched();
512 spin_lock_irq(lru_lock);
513 return ret;
516 static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
517 struct shrink_control *sc)
519 /* list_lru lock nests inside the IRQ-safe i_pages lock */
520 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
521 NULL);
524 static struct shrinker workingset_shadow_shrinker = {
525 .count_objects = count_shadow_nodes,
526 .scan_objects = scan_shadow_nodes,
527 .seeks = 0, /* ->count reports only fully expendable nodes */
528 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
532 * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
533 * i_pages lock.
535 static struct lock_class_key shadow_nodes_key;
537 static int __init workingset_init(void)
539 unsigned int timestamp_bits;
540 unsigned int max_order;
541 int ret;
543 BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
545 * Calculate the eviction bucket size to cover the longest
546 * actionable refault distance, which is currently half of
547 * memory (totalram_pages/2). However, memory hotplug may add
548 * some more pages at runtime, so keep working with up to
549 * double the initial memory by using totalram_pages as-is.
551 timestamp_bits = BITS_PER_LONG - EVICTION_SHIFT;
552 max_order = fls_long(totalram_pages() - 1);
553 if (max_order > timestamp_bits)
554 bucket_order = max_order - timestamp_bits;
555 pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
556 timestamp_bits, max_order, bucket_order);
558 ret = prealloc_shrinker(&workingset_shadow_shrinker);
559 if (ret)
560 goto err;
561 ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key,
562 &workingset_shadow_shrinker);
563 if (ret)
564 goto err_list_lru;
565 register_shrinker_prepared(&workingset_shadow_shrinker);
566 return 0;
567 err_list_lru:
568 free_prealloced_shrinker(&workingset_shadow_shrinker);
569 err:
570 return ret;
572 module_init(workingset_init);