xfs: push buffer of flush locked dquot to avoid quotacheck deadlock
[linux/fpc-iii.git] / mm / list_lru.c
blob7a40fa2be858acbc79cc79d887c1af575a3d2026
1 /*
2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
6 */
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
29 list_del(&lru->list);
30 mutex_unlock(&list_lrus_mutex);
32 #else
33 static void list_lru_register(struct list_lru *lru)
37 static void list_lru_unregister(struct list_lru *lru)
40 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
42 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43 static inline bool list_lru_memcg_aware(struct list_lru *lru)
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
49 return !!lru->node[0].memcg_lrus;
52 static inline struct list_lru_one *
53 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
56 * The lock protects the array of per cgroup lists from relocation
57 * (see memcg_update_list_lru_node).
59 lockdep_assert_held(&nlru->lock);
60 if (nlru->memcg_lrus && idx >= 0)
61 return nlru->memcg_lrus->lru[idx];
63 return &nlru->lru;
66 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
68 struct page *page;
70 if (!memcg_kmem_enabled())
71 return NULL;
72 page = virt_to_head_page(ptr);
73 return page->mem_cgroup;
76 static inline struct list_lru_one *
77 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
79 struct mem_cgroup *memcg;
81 if (!nlru->memcg_lrus)
82 return &nlru->lru;
84 memcg = mem_cgroup_from_kmem(ptr);
85 if (!memcg)
86 return &nlru->lru;
88 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
90 #else
91 static inline bool list_lru_memcg_aware(struct list_lru *lru)
93 return false;
96 static inline struct list_lru_one *
97 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
99 return &nlru->lru;
102 static inline struct list_lru_one *
103 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
105 return &nlru->lru;
107 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
109 bool list_lru_add(struct list_lru *lru, struct list_head *item)
111 int nid = page_to_nid(virt_to_page(item));
112 struct list_lru_node *nlru = &lru->node[nid];
113 struct list_lru_one *l;
115 spin_lock(&nlru->lock);
116 if (list_empty(item)) {
117 l = list_lru_from_kmem(nlru, item);
118 list_add_tail(item, &l->list);
119 l->nr_items++;
120 nlru->nr_items++;
121 spin_unlock(&nlru->lock);
122 return true;
124 spin_unlock(&nlru->lock);
125 return false;
127 EXPORT_SYMBOL_GPL(list_lru_add);
129 bool list_lru_del(struct list_lru *lru, struct list_head *item)
131 int nid = page_to_nid(virt_to_page(item));
132 struct list_lru_node *nlru = &lru->node[nid];
133 struct list_lru_one *l;
135 spin_lock(&nlru->lock);
136 if (!list_empty(item)) {
137 l = list_lru_from_kmem(nlru, item);
138 list_del_init(item);
139 l->nr_items--;
140 nlru->nr_items--;
141 spin_unlock(&nlru->lock);
142 return true;
144 spin_unlock(&nlru->lock);
145 return false;
147 EXPORT_SYMBOL_GPL(list_lru_del);
149 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
151 list_del_init(item);
152 list->nr_items--;
154 EXPORT_SYMBOL_GPL(list_lru_isolate);
156 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
157 struct list_head *head)
159 list_move(item, head);
160 list->nr_items--;
162 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
164 static unsigned long __list_lru_count_one(struct list_lru *lru,
165 int nid, int memcg_idx)
167 struct list_lru_node *nlru = &lru->node[nid];
168 struct list_lru_one *l;
169 unsigned long count;
171 spin_lock(&nlru->lock);
172 l = list_lru_from_memcg_idx(nlru, memcg_idx);
173 count = l->nr_items;
174 spin_unlock(&nlru->lock);
176 return count;
179 unsigned long list_lru_count_one(struct list_lru *lru,
180 int nid, struct mem_cgroup *memcg)
182 return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
184 EXPORT_SYMBOL_GPL(list_lru_count_one);
186 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
188 struct list_lru_node *nlru;
190 nlru = &lru->node[nid];
191 return nlru->nr_items;
193 EXPORT_SYMBOL_GPL(list_lru_count_node);
195 static unsigned long
196 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
197 list_lru_walk_cb isolate, void *cb_arg,
198 unsigned long *nr_to_walk)
201 struct list_lru_node *nlru = &lru->node[nid];
202 struct list_lru_one *l;
203 struct list_head *item, *n;
204 unsigned long isolated = 0;
206 spin_lock(&nlru->lock);
207 l = list_lru_from_memcg_idx(nlru, memcg_idx);
208 restart:
209 list_for_each_safe(item, n, &l->list) {
210 enum lru_status ret;
213 * decrement nr_to_walk first so that we don't livelock if we
214 * get stuck on large numbesr of LRU_RETRY items
216 if (!*nr_to_walk)
217 break;
218 --*nr_to_walk;
220 ret = isolate(item, l, &nlru->lock, cb_arg);
221 switch (ret) {
222 case LRU_REMOVED_RETRY:
223 assert_spin_locked(&nlru->lock);
224 case LRU_REMOVED:
225 isolated++;
226 nlru->nr_items--;
228 * If the lru lock has been dropped, our list
229 * traversal is now invalid and so we have to
230 * restart from scratch.
232 if (ret == LRU_REMOVED_RETRY)
233 goto restart;
234 break;
235 case LRU_ROTATE:
236 list_move_tail(item, &l->list);
237 break;
238 case LRU_SKIP:
239 break;
240 case LRU_RETRY:
242 * The lru lock has been dropped, our list traversal is
243 * now invalid and so we have to restart from scratch.
245 assert_spin_locked(&nlru->lock);
246 goto restart;
247 default:
248 BUG();
252 spin_unlock(&nlru->lock);
253 return isolated;
256 unsigned long
257 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
258 list_lru_walk_cb isolate, void *cb_arg,
259 unsigned long *nr_to_walk)
261 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
262 isolate, cb_arg, nr_to_walk);
264 EXPORT_SYMBOL_GPL(list_lru_walk_one);
266 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
267 list_lru_walk_cb isolate, void *cb_arg,
268 unsigned long *nr_to_walk)
270 long isolated = 0;
271 int memcg_idx;
273 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
274 nr_to_walk);
275 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
276 for_each_memcg_cache_index(memcg_idx) {
277 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
278 isolate, cb_arg, nr_to_walk);
279 if (*nr_to_walk <= 0)
280 break;
283 return isolated;
285 EXPORT_SYMBOL_GPL(list_lru_walk_node);
287 static void init_one_lru(struct list_lru_one *l)
289 INIT_LIST_HEAD(&l->list);
290 l->nr_items = 0;
293 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
294 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
295 int begin, int end)
297 int i;
299 for (i = begin; i < end; i++)
300 kfree(memcg_lrus->lru[i]);
303 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
304 int begin, int end)
306 int i;
308 for (i = begin; i < end; i++) {
309 struct list_lru_one *l;
311 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
312 if (!l)
313 goto fail;
315 init_one_lru(l);
316 memcg_lrus->lru[i] = l;
318 return 0;
319 fail:
320 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
321 return -ENOMEM;
324 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
326 int size = memcg_nr_cache_ids;
328 nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
329 if (!nlru->memcg_lrus)
330 return -ENOMEM;
332 if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
333 kfree(nlru->memcg_lrus);
334 return -ENOMEM;
337 return 0;
340 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
342 __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
343 kfree(nlru->memcg_lrus);
346 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
347 int old_size, int new_size)
349 struct list_lru_memcg *old, *new;
351 BUG_ON(old_size > new_size);
353 old = nlru->memcg_lrus;
354 new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
355 if (!new)
356 return -ENOMEM;
358 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
359 kfree(new);
360 return -ENOMEM;
363 memcpy(new, old, old_size * sizeof(void *));
366 * The lock guarantees that we won't race with a reader
367 * (see list_lru_from_memcg_idx).
369 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
370 * we have to use IRQ-safe primitives here to avoid deadlock.
372 spin_lock_irq(&nlru->lock);
373 nlru->memcg_lrus = new;
374 spin_unlock_irq(&nlru->lock);
376 kfree(old);
377 return 0;
380 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
381 int old_size, int new_size)
383 /* do not bother shrinking the array back to the old size, because we
384 * cannot handle allocation failures here */
385 __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
388 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
390 int i;
392 if (!memcg_aware)
393 return 0;
395 for_each_node(i) {
396 if (memcg_init_list_lru_node(&lru->node[i]))
397 goto fail;
399 return 0;
400 fail:
401 for (i = i - 1; i >= 0; i--) {
402 if (!lru->node[i].memcg_lrus)
403 continue;
404 memcg_destroy_list_lru_node(&lru->node[i]);
406 return -ENOMEM;
409 static void memcg_destroy_list_lru(struct list_lru *lru)
411 int i;
413 if (!list_lru_memcg_aware(lru))
414 return;
416 for_each_node(i)
417 memcg_destroy_list_lru_node(&lru->node[i]);
420 static int memcg_update_list_lru(struct list_lru *lru,
421 int old_size, int new_size)
423 int i;
425 if (!list_lru_memcg_aware(lru))
426 return 0;
428 for_each_node(i) {
429 if (memcg_update_list_lru_node(&lru->node[i],
430 old_size, new_size))
431 goto fail;
433 return 0;
434 fail:
435 for (i = i - 1; i >= 0; i--) {
436 if (!lru->node[i].memcg_lrus)
437 continue;
439 memcg_cancel_update_list_lru_node(&lru->node[i],
440 old_size, new_size);
442 return -ENOMEM;
445 static void memcg_cancel_update_list_lru(struct list_lru *lru,
446 int old_size, int new_size)
448 int i;
450 if (!list_lru_memcg_aware(lru))
451 return;
453 for_each_node(i)
454 memcg_cancel_update_list_lru_node(&lru->node[i],
455 old_size, new_size);
458 int memcg_update_all_list_lrus(int new_size)
460 int ret = 0;
461 struct list_lru *lru;
462 int old_size = memcg_nr_cache_ids;
464 mutex_lock(&list_lrus_mutex);
465 list_for_each_entry(lru, &list_lrus, list) {
466 ret = memcg_update_list_lru(lru, old_size, new_size);
467 if (ret)
468 goto fail;
470 out:
471 mutex_unlock(&list_lrus_mutex);
472 return ret;
473 fail:
474 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
475 memcg_cancel_update_list_lru(lru, old_size, new_size);
476 goto out;
479 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
480 int src_idx, int dst_idx)
482 struct list_lru_one *src, *dst;
485 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
486 * we have to use IRQ-safe primitives here to avoid deadlock.
488 spin_lock_irq(&nlru->lock);
490 src = list_lru_from_memcg_idx(nlru, src_idx);
491 dst = list_lru_from_memcg_idx(nlru, dst_idx);
493 list_splice_init(&src->list, &dst->list);
494 dst->nr_items += src->nr_items;
495 src->nr_items = 0;
497 spin_unlock_irq(&nlru->lock);
500 static void memcg_drain_list_lru(struct list_lru *lru,
501 int src_idx, int dst_idx)
503 int i;
505 if (!list_lru_memcg_aware(lru))
506 return;
508 for_each_node(i)
509 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
512 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
514 struct list_lru *lru;
516 mutex_lock(&list_lrus_mutex);
517 list_for_each_entry(lru, &list_lrus, list)
518 memcg_drain_list_lru(lru, src_idx, dst_idx);
519 mutex_unlock(&list_lrus_mutex);
521 #else
522 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
524 return 0;
527 static void memcg_destroy_list_lru(struct list_lru *lru)
530 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
532 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
533 struct lock_class_key *key)
535 int i;
536 size_t size = sizeof(*lru->node) * nr_node_ids;
537 int err = -ENOMEM;
539 memcg_get_cache_ids();
541 lru->node = kzalloc(size, GFP_KERNEL);
542 if (!lru->node)
543 goto out;
545 for_each_node(i) {
546 spin_lock_init(&lru->node[i].lock);
547 if (key)
548 lockdep_set_class(&lru->node[i].lock, key);
549 init_one_lru(&lru->node[i].lru);
552 err = memcg_init_list_lru(lru, memcg_aware);
553 if (err) {
554 kfree(lru->node);
555 /* Do this so a list_lru_destroy() doesn't crash: */
556 lru->node = NULL;
557 goto out;
560 list_lru_register(lru);
561 out:
562 memcg_put_cache_ids();
563 return err;
565 EXPORT_SYMBOL_GPL(__list_lru_init);
567 void list_lru_destroy(struct list_lru *lru)
569 /* Already destroyed or not yet initialized? */
570 if (!lru->node)
571 return;
573 memcg_get_cache_ids();
575 list_lru_unregister(lru);
577 memcg_destroy_list_lru(lru);
578 kfree(lru->node);
579 lru->node = NULL;
581 memcg_put_cache_ids();
583 EXPORT_SYMBOL_GPL(list_lru_destroy);