2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
13 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
)
15 int nid
= page_to_nid(virt_to_page(item
));
16 struct list_lru_node
*nlru
= &lru
->node
[nid
];
18 spin_lock(&nlru
->lock
);
19 WARN_ON_ONCE(nlru
->nr_items
< 0);
20 if (list_empty(item
)) {
21 list_add_tail(item
, &nlru
->list
);
22 if (nlru
->nr_items
++ == 0)
23 node_set(nid
, lru
->active_nodes
);
24 spin_unlock(&nlru
->lock
);
27 spin_unlock(&nlru
->lock
);
30 EXPORT_SYMBOL_GPL(list_lru_add
);
32 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
)
34 int nid
= page_to_nid(virt_to_page(item
));
35 struct list_lru_node
*nlru
= &lru
->node
[nid
];
37 spin_lock(&nlru
->lock
);
38 if (!list_empty(item
)) {
40 if (--nlru
->nr_items
== 0)
41 node_clear(nid
, lru
->active_nodes
);
42 WARN_ON_ONCE(nlru
->nr_items
< 0);
43 spin_unlock(&nlru
->lock
);
46 spin_unlock(&nlru
->lock
);
49 EXPORT_SYMBOL_GPL(list_lru_del
);
52 list_lru_count_node(struct list_lru
*lru
, int nid
)
54 unsigned long count
= 0;
55 struct list_lru_node
*nlru
= &lru
->node
[nid
];
57 spin_lock(&nlru
->lock
);
58 WARN_ON_ONCE(nlru
->nr_items
< 0);
59 count
+= nlru
->nr_items
;
60 spin_unlock(&nlru
->lock
);
64 EXPORT_SYMBOL_GPL(list_lru_count_node
);
67 list_lru_walk_node(struct list_lru
*lru
, int nid
, list_lru_walk_cb isolate
,
68 void *cb_arg
, unsigned long *nr_to_walk
)
71 struct list_lru_node
*nlru
= &lru
->node
[nid
];
72 struct list_head
*item
, *n
;
73 unsigned long isolated
= 0;
75 spin_lock(&nlru
->lock
);
77 list_for_each_safe(item
, n
, &nlru
->list
) {
81 * decrement nr_to_walk first so that we don't livelock if we
82 * get stuck on large numbesr of LRU_RETRY items
88 ret
= isolate(item
, &nlru
->lock
, cb_arg
);
90 case LRU_REMOVED_RETRY
:
91 assert_spin_locked(&nlru
->lock
);
93 if (--nlru
->nr_items
== 0)
94 node_clear(nid
, lru
->active_nodes
);
95 WARN_ON_ONCE(nlru
->nr_items
< 0);
98 * If the lru lock has been dropped, our list
99 * traversal is now invalid and so we have to
100 * restart from scratch.
102 if (ret
== LRU_REMOVED_RETRY
)
106 list_move_tail(item
, &nlru
->list
);
112 * The lru lock has been dropped, our list traversal is
113 * now invalid and so we have to restart from scratch.
115 assert_spin_locked(&nlru
->lock
);
122 spin_unlock(&nlru
->lock
);
125 EXPORT_SYMBOL_GPL(list_lru_walk_node
);
127 int list_lru_init_key(struct list_lru
*lru
, struct lock_class_key
*key
)
130 size_t size
= sizeof(*lru
->node
) * nr_node_ids
;
132 lru
->node
= kzalloc(size
, GFP_KERNEL
);
136 nodes_clear(lru
->active_nodes
);
137 for (i
= 0; i
< nr_node_ids
; i
++) {
138 spin_lock_init(&lru
->node
[i
].lock
);
140 lockdep_set_class(&lru
->node
[i
].lock
, key
);
141 INIT_LIST_HEAD(&lru
->node
[i
].list
);
142 lru
->node
[i
].nr_items
= 0;
146 EXPORT_SYMBOL_GPL(list_lru_init_key
);
148 void list_lru_destroy(struct list_lru
*lru
)
152 EXPORT_SYMBOL_GPL(list_lru_destroy
);