1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/list_lru.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/memcontrol.h>
16 #ifdef CONFIG_MEMCG_KMEM
17 static LIST_HEAD(list_lrus
);
18 static DEFINE_MUTEX(list_lrus_mutex
);
20 static void list_lru_register(struct list_lru
*lru
)
22 mutex_lock(&list_lrus_mutex
);
23 list_add(&lru
->list
, &list_lrus
);
24 mutex_unlock(&list_lrus_mutex
);
27 static void list_lru_unregister(struct list_lru
*lru
)
29 mutex_lock(&list_lrus_mutex
);
31 mutex_unlock(&list_lrus_mutex
);
34 static int lru_shrinker_id(struct list_lru
*lru
)
36 return lru
->shrinker_id
;
39 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
42 * This needs node 0 to be always present, even
43 * in the systems supporting sparse numa ids.
45 return !!lru
->node
[0].memcg_lrus
;
48 static inline struct list_lru_one
*
49 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
51 struct list_lru_memcg
*memcg_lrus
;
53 * Either lock or RCU protects the array of per cgroup lists
54 * from relocation (see memcg_update_list_lru_node).
56 memcg_lrus
= rcu_dereference_check(nlru
->memcg_lrus
,
57 lockdep_is_held(&nlru
->lock
));
58 if (memcg_lrus
&& idx
>= 0)
59 return memcg_lrus
->lru
[idx
];
63 static __always_inline
struct mem_cgroup
*mem_cgroup_from_kmem(void *ptr
)
67 if (!memcg_kmem_enabled())
69 page
= virt_to_head_page(ptr
);
70 return page
->mem_cgroup
;
73 static inline struct list_lru_one
*
74 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
75 struct mem_cgroup
**memcg_ptr
)
77 struct list_lru_one
*l
= &nlru
->lru
;
78 struct mem_cgroup
*memcg
= NULL
;
80 if (!nlru
->memcg_lrus
)
83 memcg
= mem_cgroup_from_kmem(ptr
);
87 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
94 static void list_lru_register(struct list_lru
*lru
)
98 static void list_lru_unregister(struct list_lru
*lru
)
102 static int lru_shrinker_id(struct list_lru
*lru
)
107 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
112 static inline struct list_lru_one
*
113 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
118 static inline struct list_lru_one
*
119 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
,
120 struct mem_cgroup
**memcg_ptr
)
126 #endif /* CONFIG_MEMCG_KMEM */
128 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
130 int nid
= page_to_nid(virt_to_page(item
));
131 struct list_lru_node
*nlru
= &lru
->node
[nid
];
132 struct mem_cgroup
*memcg
;
133 struct list_lru_one
*l
;
135 spin_lock(&nlru
->lock
);
136 if (list_empty(item
)) {
137 l
= list_lru_from_kmem(nlru
, item
, &memcg
);
138 list_add_tail(item
, &l
->list
);
139 /* Set shrinker bit if the first element was added */
141 memcg_set_shrinker_bit(memcg
, nid
,
142 lru_shrinker_id(lru
));
144 spin_unlock(&nlru
->lock
);
147 spin_unlock(&nlru
->lock
);
150 EXPORT_SYMBOL_GPL(list_lru_add
);
152 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
154 int nid
= page_to_nid(virt_to_page(item
));
155 struct list_lru_node
*nlru
= &lru
->node
[nid
];
156 struct list_lru_one
*l
;
158 spin_lock(&nlru
->lock
);
159 if (!list_empty(item
)) {
160 l
= list_lru_from_kmem(nlru
, item
, NULL
);
164 spin_unlock(&nlru
->lock
);
167 spin_unlock(&nlru
->lock
);
170 EXPORT_SYMBOL_GPL(list_lru_del
);
172 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
)
177 EXPORT_SYMBOL_GPL(list_lru_isolate
);
179 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
180 struct list_head
*head
)
182 list_move(item
, head
);
185 EXPORT_SYMBOL_GPL(list_lru_isolate_move
);
187 unsigned long list_lru_count_one(struct list_lru
*lru
,
188 int nid
, struct mem_cgroup
*memcg
)
190 struct list_lru_node
*nlru
= &lru
->node
[nid
];
191 struct list_lru_one
*l
;
195 l
= list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
201 EXPORT_SYMBOL_GPL(list_lru_count_one
);
203 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
)
205 struct list_lru_node
*nlru
;
207 nlru
= &lru
->node
[nid
];
208 return nlru
->nr_items
;
210 EXPORT_SYMBOL_GPL(list_lru_count_node
);
213 __list_lru_walk_one(struct list_lru_node
*nlru
, int memcg_idx
,
214 list_lru_walk_cb isolate
, void *cb_arg
,
215 unsigned long *nr_to_walk
)
218 struct list_lru_one
*l
;
219 struct list_head
*item
, *n
;
220 unsigned long isolated
= 0;
222 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
224 list_for_each_safe(item
, n
, &l
->list
) {
228 * decrement nr_to_walk first so that we don't livelock if we
229 * get stuck on large numbesr of LRU_RETRY items
235 ret
= isolate(item
, l
, &nlru
->lock
, cb_arg
);
237 case LRU_REMOVED_RETRY
:
238 assert_spin_locked(&nlru
->lock
);
244 * If the lru lock has been dropped, our list
245 * traversal is now invalid and so we have to
246 * restart from scratch.
248 if (ret
== LRU_REMOVED_RETRY
)
252 list_move_tail(item
, &l
->list
);
258 * The lru lock has been dropped, our list traversal is
259 * now invalid and so we have to restart from scratch.
261 assert_spin_locked(&nlru
->lock
);
271 list_lru_walk_one(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
272 list_lru_walk_cb isolate
, void *cb_arg
,
273 unsigned long *nr_to_walk
)
275 struct list_lru_node
*nlru
= &lru
->node
[nid
];
278 spin_lock(&nlru
->lock
);
279 ret
= __list_lru_walk_one(nlru
, memcg_cache_id(memcg
), isolate
, cb_arg
,
281 spin_unlock(&nlru
->lock
);
284 EXPORT_SYMBOL_GPL(list_lru_walk_one
);
287 list_lru_walk_one_irq(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
288 list_lru_walk_cb isolate
, void *cb_arg
,
289 unsigned long *nr_to_walk
)
291 struct list_lru_node
*nlru
= &lru
->node
[nid
];
294 spin_lock_irq(&nlru
->lock
);
295 ret
= __list_lru_walk_one(nlru
, memcg_cache_id(memcg
), isolate
, cb_arg
,
297 spin_unlock_irq(&nlru
->lock
);
301 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
302 list_lru_walk_cb isolate
, void *cb_arg
,
303 unsigned long *nr_to_walk
)
308 isolated
+= list_lru_walk_one(lru
, nid
, NULL
, isolate
, cb_arg
,
310 if (*nr_to_walk
> 0 && list_lru_memcg_aware(lru
)) {
311 for_each_memcg_cache_index(memcg_idx
) {
312 struct list_lru_node
*nlru
= &lru
->node
[nid
];
314 spin_lock(&nlru
->lock
);
315 isolated
+= __list_lru_walk_one(nlru
, memcg_idx
,
318 spin_unlock(&nlru
->lock
);
320 if (*nr_to_walk
<= 0)
326 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
328 static void init_one_lru(struct list_lru_one
*l
)
330 INIT_LIST_HEAD(&l
->list
);
334 #ifdef CONFIG_MEMCG_KMEM
335 static void __memcg_destroy_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
340 for (i
= begin
; i
< end
; i
++)
341 kfree(memcg_lrus
->lru
[i
]);
344 static int __memcg_init_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
349 for (i
= begin
; i
< end
; i
++) {
350 struct list_lru_one
*l
;
352 l
= kmalloc(sizeof(struct list_lru_one
), GFP_KERNEL
);
357 memcg_lrus
->lru
[i
] = l
;
361 __memcg_destroy_list_lru_node(memcg_lrus
, begin
, i
- 1);
365 static int memcg_init_list_lru_node(struct list_lru_node
*nlru
)
367 struct list_lru_memcg
*memcg_lrus
;
368 int size
= memcg_nr_cache_ids
;
370 memcg_lrus
= kvmalloc(sizeof(*memcg_lrus
) +
371 size
* sizeof(void *), GFP_KERNEL
);
375 if (__memcg_init_list_lru_node(memcg_lrus
, 0, size
)) {
379 RCU_INIT_POINTER(nlru
->memcg_lrus
, memcg_lrus
);
384 static void memcg_destroy_list_lru_node(struct list_lru_node
*nlru
)
386 struct list_lru_memcg
*memcg_lrus
;
388 * This is called when shrinker has already been unregistered,
389 * and nobody can use it. So, there is no need to use kvfree_rcu().
391 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
, true);
392 __memcg_destroy_list_lru_node(memcg_lrus
, 0, memcg_nr_cache_ids
);
396 static void kvfree_rcu(struct rcu_head
*head
)
398 struct list_lru_memcg
*mlru
;
400 mlru
= container_of(head
, struct list_lru_memcg
, rcu
);
404 static int memcg_update_list_lru_node(struct list_lru_node
*nlru
,
405 int old_size
, int new_size
)
407 struct list_lru_memcg
*old
, *new;
409 BUG_ON(old_size
> new_size
);
411 old
= rcu_dereference_protected(nlru
->memcg_lrus
,
412 lockdep_is_held(&list_lrus_mutex
));
413 new = kvmalloc(sizeof(*new) + new_size
* sizeof(void *), GFP_KERNEL
);
417 if (__memcg_init_list_lru_node(new, old_size
, new_size
)) {
422 memcpy(&new->lru
, &old
->lru
, old_size
* sizeof(void *));
425 * The locking below allows readers that hold nlru->lock avoid taking
426 * rcu_read_lock (see list_lru_from_memcg_idx).
428 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
429 * we have to use IRQ-safe primitives here to avoid deadlock.
431 spin_lock_irq(&nlru
->lock
);
432 rcu_assign_pointer(nlru
->memcg_lrus
, new);
433 spin_unlock_irq(&nlru
->lock
);
435 call_rcu(&old
->rcu
, kvfree_rcu
);
439 static void memcg_cancel_update_list_lru_node(struct list_lru_node
*nlru
,
440 int old_size
, int new_size
)
442 struct list_lru_memcg
*memcg_lrus
;
444 memcg_lrus
= rcu_dereference_protected(nlru
->memcg_lrus
,
445 lockdep_is_held(&list_lrus_mutex
));
446 /* do not bother shrinking the array back to the old size, because we
447 * cannot handle allocation failures here */
448 __memcg_destroy_list_lru_node(memcg_lrus
, old_size
, new_size
);
451 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
459 if (memcg_init_list_lru_node(&lru
->node
[i
]))
464 for (i
= i
- 1; i
>= 0; i
--) {
465 if (!lru
->node
[i
].memcg_lrus
)
467 memcg_destroy_list_lru_node(&lru
->node
[i
]);
472 static void memcg_destroy_list_lru(struct list_lru
*lru
)
476 if (!list_lru_memcg_aware(lru
))
480 memcg_destroy_list_lru_node(&lru
->node
[i
]);
483 static int memcg_update_list_lru(struct list_lru
*lru
,
484 int old_size
, int new_size
)
488 if (!list_lru_memcg_aware(lru
))
492 if (memcg_update_list_lru_node(&lru
->node
[i
],
498 for (i
= i
- 1; i
>= 0; i
--) {
499 if (!lru
->node
[i
].memcg_lrus
)
502 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
508 static void memcg_cancel_update_list_lru(struct list_lru
*lru
,
509 int old_size
, int new_size
)
513 if (!list_lru_memcg_aware(lru
))
517 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
521 int memcg_update_all_list_lrus(int new_size
)
524 struct list_lru
*lru
;
525 int old_size
= memcg_nr_cache_ids
;
527 mutex_lock(&list_lrus_mutex
);
528 list_for_each_entry(lru
, &list_lrus
, list
) {
529 ret
= memcg_update_list_lru(lru
, old_size
, new_size
);
534 mutex_unlock(&list_lrus_mutex
);
537 list_for_each_entry_continue_reverse(lru
, &list_lrus
, list
)
538 memcg_cancel_update_list_lru(lru
, old_size
, new_size
);
542 static void memcg_drain_list_lru_node(struct list_lru
*lru
, int nid
,
543 int src_idx
, struct mem_cgroup
*dst_memcg
)
545 struct list_lru_node
*nlru
= &lru
->node
[nid
];
546 int dst_idx
= dst_memcg
->kmemcg_id
;
547 struct list_lru_one
*src
, *dst
;
551 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
552 * we have to use IRQ-safe primitives here to avoid deadlock.
554 spin_lock_irq(&nlru
->lock
);
556 src
= list_lru_from_memcg_idx(nlru
, src_idx
);
557 dst
= list_lru_from_memcg_idx(nlru
, dst_idx
);
559 list_splice_init(&src
->list
, &dst
->list
);
560 set
= (!dst
->nr_items
&& src
->nr_items
);
561 dst
->nr_items
+= src
->nr_items
;
563 memcg_set_shrinker_bit(dst_memcg
, nid
, lru_shrinker_id(lru
));
566 spin_unlock_irq(&nlru
->lock
);
569 static void memcg_drain_list_lru(struct list_lru
*lru
,
570 int src_idx
, struct mem_cgroup
*dst_memcg
)
574 if (!list_lru_memcg_aware(lru
))
578 memcg_drain_list_lru_node(lru
, i
, src_idx
, dst_memcg
);
581 void memcg_drain_all_list_lrus(int src_idx
, struct mem_cgroup
*dst_memcg
)
583 struct list_lru
*lru
;
585 mutex_lock(&list_lrus_mutex
);
586 list_for_each_entry(lru
, &list_lrus
, list
)
587 memcg_drain_list_lru(lru
, src_idx
, dst_memcg
);
588 mutex_unlock(&list_lrus_mutex
);
591 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
596 static void memcg_destroy_list_lru(struct list_lru
*lru
)
599 #endif /* CONFIG_MEMCG_KMEM */
601 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
602 struct lock_class_key
*key
, struct shrinker
*shrinker
)
607 #ifdef CONFIG_MEMCG_KMEM
609 lru
->shrinker_id
= shrinker
->id
;
611 lru
->shrinker_id
= -1;
613 memcg_get_cache_ids();
615 lru
->node
= kcalloc(nr_node_ids
, sizeof(*lru
->node
), GFP_KERNEL
);
620 spin_lock_init(&lru
->node
[i
].lock
);
622 lockdep_set_class(&lru
->node
[i
].lock
, key
);
623 init_one_lru(&lru
->node
[i
].lru
);
626 err
= memcg_init_list_lru(lru
, memcg_aware
);
629 /* Do this so a list_lru_destroy() doesn't crash: */
634 list_lru_register(lru
);
636 memcg_put_cache_ids();
639 EXPORT_SYMBOL_GPL(__list_lru_init
);
641 void list_lru_destroy(struct list_lru
*lru
)
643 /* Already destroyed or not yet initialized? */
647 memcg_get_cache_ids();
649 list_lru_unregister(lru
);
651 memcg_destroy_list_lru(lru
);
655 #ifdef CONFIG_MEMCG_KMEM
656 lru
->shrinker_id
= -1;
658 memcg_put_cache_ids();
660 EXPORT_SYMBOL_GPL(list_lru_destroy
);