2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16 static LIST_HEAD(list_lrus
);
17 static DEFINE_MUTEX(list_lrus_mutex
);
19 static void list_lru_register(struct list_lru
*lru
)
21 mutex_lock(&list_lrus_mutex
);
22 list_add(&lru
->list
, &list_lrus
);
23 mutex_unlock(&list_lrus_mutex
);
26 static void list_lru_unregister(struct list_lru
*lru
)
28 mutex_lock(&list_lrus_mutex
);
30 mutex_unlock(&list_lrus_mutex
);
33 static void list_lru_register(struct list_lru
*lru
)
37 static void list_lru_unregister(struct list_lru
*lru
)
40 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
42 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
45 return lru
->memcg_aware
;
48 static inline struct list_lru_one
*
49 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
52 * The lock protects the array of per cgroup lists from relocation
53 * (see memcg_update_list_lru_node).
55 lockdep_assert_held(&nlru
->lock
);
56 if (nlru
->memcg_lrus
&& idx
>= 0)
57 return nlru
->memcg_lrus
->lru
[idx
];
62 static __always_inline
struct mem_cgroup
*mem_cgroup_from_kmem(void *ptr
)
66 if (!memcg_kmem_enabled())
68 page
= virt_to_head_page(ptr
);
69 return page
->mem_cgroup
;
72 static inline struct list_lru_one
*
73 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
)
75 struct mem_cgroup
*memcg
;
77 if (!nlru
->memcg_lrus
)
80 memcg
= mem_cgroup_from_kmem(ptr
);
84 return list_lru_from_memcg_idx(nlru
, memcg_cache_id(memcg
));
87 static inline bool list_lru_memcg_aware(struct list_lru
*lru
)
92 static inline struct list_lru_one
*
93 list_lru_from_memcg_idx(struct list_lru_node
*nlru
, int idx
)
98 static inline struct list_lru_one
*
99 list_lru_from_kmem(struct list_lru_node
*nlru
, void *ptr
)
103 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
105 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
107 int nid
= page_to_nid(virt_to_page(item
));
108 struct list_lru_node
*nlru
= &lru
->node
[nid
];
109 struct list_lru_one
*l
;
111 spin_lock(&nlru
->lock
);
112 if (list_empty(item
)) {
113 l
= list_lru_from_kmem(nlru
, item
);
114 list_add_tail(item
, &l
->list
);
117 spin_unlock(&nlru
->lock
);
120 spin_unlock(&nlru
->lock
);
123 EXPORT_SYMBOL_GPL(list_lru_add
);
125 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
127 int nid
= page_to_nid(virt_to_page(item
));
128 struct list_lru_node
*nlru
= &lru
->node
[nid
];
129 struct list_lru_one
*l
;
131 spin_lock(&nlru
->lock
);
132 if (!list_empty(item
)) {
133 l
= list_lru_from_kmem(nlru
, item
);
137 spin_unlock(&nlru
->lock
);
140 spin_unlock(&nlru
->lock
);
143 EXPORT_SYMBOL_GPL(list_lru_del
);
145 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
)
150 EXPORT_SYMBOL_GPL(list_lru_isolate
);
152 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
153 struct list_head
*head
)
155 list_move(item
, head
);
158 EXPORT_SYMBOL_GPL(list_lru_isolate_move
);
160 static unsigned long __list_lru_count_one(struct list_lru
*lru
,
161 int nid
, int memcg_idx
)
163 struct list_lru_node
*nlru
= &lru
->node
[nid
];
164 struct list_lru_one
*l
;
167 spin_lock(&nlru
->lock
);
168 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
170 spin_unlock(&nlru
->lock
);
175 unsigned long list_lru_count_one(struct list_lru
*lru
,
176 int nid
, struct mem_cgroup
*memcg
)
178 return __list_lru_count_one(lru
, nid
, memcg_cache_id(memcg
));
180 EXPORT_SYMBOL_GPL(list_lru_count_one
);
182 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
)
184 struct list_lru_node
*nlru
;
186 nlru
= &lru
->node
[nid
];
187 return nlru
->nr_items
;
189 EXPORT_SYMBOL_GPL(list_lru_count_node
);
192 __list_lru_walk_one(struct list_lru
*lru
, int nid
, int memcg_idx
,
193 list_lru_walk_cb isolate
, void *cb_arg
,
194 unsigned long *nr_to_walk
)
197 struct list_lru_node
*nlru
= &lru
->node
[nid
];
198 struct list_lru_one
*l
;
199 struct list_head
*item
, *n
;
200 unsigned long isolated
= 0;
202 spin_lock(&nlru
->lock
);
203 l
= list_lru_from_memcg_idx(nlru
, memcg_idx
);
205 list_for_each_safe(item
, n
, &l
->list
) {
209 * decrement nr_to_walk first so that we don't livelock if we
210 * get stuck on large numbesr of LRU_RETRY items
216 ret
= isolate(item
, l
, &nlru
->lock
, cb_arg
);
218 case LRU_REMOVED_RETRY
:
219 assert_spin_locked(&nlru
->lock
);
224 * If the lru lock has been dropped, our list
225 * traversal is now invalid and so we have to
226 * restart from scratch.
228 if (ret
== LRU_REMOVED_RETRY
)
232 list_move_tail(item
, &l
->list
);
238 * The lru lock has been dropped, our list traversal is
239 * now invalid and so we have to restart from scratch.
241 assert_spin_locked(&nlru
->lock
);
248 spin_unlock(&nlru
->lock
);
253 list_lru_walk_one(struct list_lru
*lru
, int nid
, struct mem_cgroup
*memcg
,
254 list_lru_walk_cb isolate
, void *cb_arg
,
255 unsigned long *nr_to_walk
)
257 return __list_lru_walk_one(lru
, nid
, memcg_cache_id(memcg
),
258 isolate
, cb_arg
, nr_to_walk
);
260 EXPORT_SYMBOL_GPL(list_lru_walk_one
);
262 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
263 list_lru_walk_cb isolate
, void *cb_arg
,
264 unsigned long *nr_to_walk
)
269 isolated
+= __list_lru_walk_one(lru
, nid
, -1, isolate
, cb_arg
,
271 if (*nr_to_walk
> 0 && list_lru_memcg_aware(lru
)) {
272 for_each_memcg_cache_index(memcg_idx
) {
273 isolated
+= __list_lru_walk_one(lru
, nid
, memcg_idx
,
274 isolate
, cb_arg
, nr_to_walk
);
275 if (*nr_to_walk
<= 0)
281 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
283 static void init_one_lru(struct list_lru_one
*l
)
285 INIT_LIST_HEAD(&l
->list
);
289 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
290 static void __memcg_destroy_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
295 for (i
= begin
; i
< end
; i
++)
296 kfree(memcg_lrus
->lru
[i
]);
299 static int __memcg_init_list_lru_node(struct list_lru_memcg
*memcg_lrus
,
304 for (i
= begin
; i
< end
; i
++) {
305 struct list_lru_one
*l
;
307 l
= kmalloc(sizeof(struct list_lru_one
), GFP_KERNEL
);
312 memcg_lrus
->lru
[i
] = l
;
316 __memcg_destroy_list_lru_node(memcg_lrus
, begin
, i
);
320 static int memcg_init_list_lru_node(struct list_lru_node
*nlru
)
322 int size
= memcg_nr_cache_ids
;
324 nlru
->memcg_lrus
= kmalloc(size
* sizeof(void *), GFP_KERNEL
);
325 if (!nlru
->memcg_lrus
)
328 if (__memcg_init_list_lru_node(nlru
->memcg_lrus
, 0, size
)) {
329 kfree(nlru
->memcg_lrus
);
336 static void memcg_destroy_list_lru_node(struct list_lru_node
*nlru
)
338 __memcg_destroy_list_lru_node(nlru
->memcg_lrus
, 0, memcg_nr_cache_ids
);
339 kfree(nlru
->memcg_lrus
);
342 static int memcg_update_list_lru_node(struct list_lru_node
*nlru
,
343 int old_size
, int new_size
)
345 struct list_lru_memcg
*old
, *new;
347 BUG_ON(old_size
> new_size
);
349 old
= nlru
->memcg_lrus
;
350 new = kmalloc(new_size
* sizeof(void *), GFP_KERNEL
);
354 if (__memcg_init_list_lru_node(new, old_size
, new_size
)) {
359 memcpy(new, old
, old_size
* sizeof(void *));
362 * The lock guarantees that we won't race with a reader
363 * (see list_lru_from_memcg_idx).
365 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
366 * we have to use IRQ-safe primitives here to avoid deadlock.
368 spin_lock_irq(&nlru
->lock
);
369 nlru
->memcg_lrus
= new;
370 spin_unlock_irq(&nlru
->lock
);
376 static void memcg_cancel_update_list_lru_node(struct list_lru_node
*nlru
,
377 int old_size
, int new_size
)
379 /* do not bother shrinking the array back to the old size, because we
380 * cannot handle allocation failures here */
381 __memcg_destroy_list_lru_node(nlru
->memcg_lrus
, old_size
, new_size
);
384 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
388 lru
->memcg_aware
= memcg_aware
;
394 if (memcg_init_list_lru_node(&lru
->node
[i
]))
399 for (i
= i
- 1; i
>= 0; i
--) {
400 if (!lru
->node
[i
].memcg_lrus
)
402 memcg_destroy_list_lru_node(&lru
->node
[i
]);
407 static void memcg_destroy_list_lru(struct list_lru
*lru
)
411 if (!list_lru_memcg_aware(lru
))
415 memcg_destroy_list_lru_node(&lru
->node
[i
]);
418 static int memcg_update_list_lru(struct list_lru
*lru
,
419 int old_size
, int new_size
)
423 if (!list_lru_memcg_aware(lru
))
427 if (memcg_update_list_lru_node(&lru
->node
[i
],
433 for (i
= i
- 1; i
>= 0; i
--) {
434 if (!lru
->node
[i
].memcg_lrus
)
437 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
443 static void memcg_cancel_update_list_lru(struct list_lru
*lru
,
444 int old_size
, int new_size
)
448 if (!list_lru_memcg_aware(lru
))
452 memcg_cancel_update_list_lru_node(&lru
->node
[i
],
456 int memcg_update_all_list_lrus(int new_size
)
459 struct list_lru
*lru
;
460 int old_size
= memcg_nr_cache_ids
;
462 mutex_lock(&list_lrus_mutex
);
463 list_for_each_entry(lru
, &list_lrus
, list
) {
464 ret
= memcg_update_list_lru(lru
, old_size
, new_size
);
469 mutex_unlock(&list_lrus_mutex
);
472 list_for_each_entry_continue_reverse(lru
, &list_lrus
, list
)
473 memcg_cancel_update_list_lru(lru
, old_size
, new_size
);
477 static void memcg_drain_list_lru_node(struct list_lru_node
*nlru
,
478 int src_idx
, int dst_idx
)
480 struct list_lru_one
*src
, *dst
;
483 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
484 * we have to use IRQ-safe primitives here to avoid deadlock.
486 spin_lock_irq(&nlru
->lock
);
488 src
= list_lru_from_memcg_idx(nlru
, src_idx
);
489 dst
= list_lru_from_memcg_idx(nlru
, dst_idx
);
491 list_splice_init(&src
->list
, &dst
->list
);
492 dst
->nr_items
+= src
->nr_items
;
495 spin_unlock_irq(&nlru
->lock
);
498 static void memcg_drain_list_lru(struct list_lru
*lru
,
499 int src_idx
, int dst_idx
)
503 if (!list_lru_memcg_aware(lru
))
507 memcg_drain_list_lru_node(&lru
->node
[i
], src_idx
, dst_idx
);
510 void memcg_drain_all_list_lrus(int src_idx
, int dst_idx
)
512 struct list_lru
*lru
;
514 mutex_lock(&list_lrus_mutex
);
515 list_for_each_entry(lru
, &list_lrus
, list
)
516 memcg_drain_list_lru(lru
, src_idx
, dst_idx
);
517 mutex_unlock(&list_lrus_mutex
);
520 static int memcg_init_list_lru(struct list_lru
*lru
, bool memcg_aware
)
525 static void memcg_destroy_list_lru(struct list_lru
*lru
)
528 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
530 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
531 struct lock_class_key
*key
)
534 size_t size
= sizeof(*lru
->node
) * nr_node_ids
;
537 memcg_get_cache_ids();
539 lru
->node
= kzalloc(size
, GFP_KERNEL
);
544 spin_lock_init(&lru
->node
[i
].lock
);
546 lockdep_set_class(&lru
->node
[i
].lock
, key
);
547 init_one_lru(&lru
->node
[i
].lru
);
550 err
= memcg_init_list_lru(lru
, memcg_aware
);
553 /* Do this so a list_lru_destroy() doesn't crash: */
558 list_lru_register(lru
);
560 memcg_put_cache_ids();
563 EXPORT_SYMBOL_GPL(__list_lru_init
);
565 void list_lru_destroy(struct list_lru
*lru
)
567 /* Already destroyed or not yet initialized? */
571 memcg_get_cache_ids();
573 list_lru_unregister(lru
);
575 memcg_destroy_list_lru(lru
);
579 memcg_put_cache_ids();
581 EXPORT_SYMBOL_GPL(list_lru_destroy
);