1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
11 #include <linux/list.h>
12 #include <linux/nodemask.h>
13 #include <linux/shrinker.h>
17 /* list_lru_walk_cb has to always return one of those */
19 LRU_REMOVED
, /* item removed from list */
20 LRU_REMOVED_RETRY
, /* item removed, but lock has been
21 dropped and reacquired */
22 LRU_ROTATE
, /* item referenced, give another pass */
23 LRU_SKIP
, /* item cannot be locked, skip */
24 LRU_RETRY
, /* item not freeable. May drop the lock
25 internally, but has to return locked. */
29 struct list_head list
;
30 /* may become negative during memcg reparenting */
34 struct list_lru_memcg
{
36 /* array of per cgroup lists, indexed by memcg_cache_id */
37 struct list_lru_one
*lru
[0];
40 struct list_lru_node
{
41 /* protects all lists on the node, including per cgroup */
43 /* global list, used for the root cgroup in cgroup aware lrus */
44 struct list_lru_one lru
;
45 #ifdef CONFIG_MEMCG_KMEM
46 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
47 struct list_lru_memcg __rcu
*memcg_lrus
;
50 } ____cacheline_aligned_in_smp
;
53 struct list_lru_node
*node
;
54 #ifdef CONFIG_MEMCG_KMEM
55 struct list_head list
;
61 void list_lru_destroy(struct list_lru
*lru
);
62 int __list_lru_init(struct list_lru
*lru
, bool memcg_aware
,
63 struct lock_class_key
*key
, struct shrinker
*shrinker
);
65 #define list_lru_init(lru) \
66 __list_lru_init((lru), false, NULL, NULL)
67 #define list_lru_init_key(lru, key) \
68 __list_lru_init((lru), false, (key), NULL)
69 #define list_lru_init_memcg(lru, shrinker) \
70 __list_lru_init((lru), true, NULL, shrinker)
72 int memcg_update_all_list_lrus(int num_memcgs
);
73 void memcg_drain_all_list_lrus(int src_idx
, struct mem_cgroup
*dst_memcg
);
76 * list_lru_add: add an element to the lru list's tail
77 * @list_lru: the lru pointer
78 * @item: the item to be added.
80 * If the element is already part of a list, this function returns doing
81 * nothing. Therefore the caller does not need to keep state about whether or
82 * not the element already belongs in the list and is allowed to lazy update
83 * it. Note however that this is valid for *a* list, not *this* list. If
84 * the caller organize itself in a way that elements can be in more than
85 * one type of list, it is up to the caller to fully remove the item from
86 * the previous list (with list_lru_del() for instance) before moving it
89 * Return value: true if the list was updated, false otherwise
91 bool list_lru_add(struct list_lru
*lru
, struct list_head
*item
);
94 * list_lru_del: delete an element to the lru list
95 * @list_lru: the lru pointer
96 * @item: the item to be deleted.
98 * This function works analogously as list_lru_add in terms of list
99 * manipulation. The comments about an element already pertaining to
100 * a list are also valid for list_lru_del.
102 * Return value: true if the list was updated, false otherwise
104 bool list_lru_del(struct list_lru
*lru
, struct list_head
*item
);
107 * list_lru_count_one: return the number of objects currently held by @lru
108 * @lru: the lru pointer.
109 * @nid: the node id to count from.
110 * @memcg: the cgroup to count from.
112 * Always return a non-negative number, 0 for empty lists. There is no
113 * guarantee that the list is not updated while the count is being computed.
114 * Callers that want such a guarantee need to provide an outer lock.
116 unsigned long list_lru_count_one(struct list_lru
*lru
,
117 int nid
, struct mem_cgroup
*memcg
);
118 unsigned long list_lru_count_node(struct list_lru
*lru
, int nid
);
120 static inline unsigned long list_lru_shrink_count(struct list_lru
*lru
,
121 struct shrink_control
*sc
)
123 return list_lru_count_one(lru
, sc
->nid
, sc
->memcg
);
126 static inline unsigned long list_lru_count(struct list_lru
*lru
)
131 for_each_node_state(nid
, N_NORMAL_MEMORY
)
132 count
+= list_lru_count_node(lru
, nid
);
137 void list_lru_isolate(struct list_lru_one
*list
, struct list_head
*item
);
138 void list_lru_isolate_move(struct list_lru_one
*list
, struct list_head
*item
,
139 struct list_head
*head
);
141 typedef enum lru_status (*list_lru_walk_cb
)(struct list_head
*item
,
142 struct list_lru_one
*list
, spinlock_t
*lock
, void *cb_arg
);
145 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
146 * @lru: the lru pointer.
147 * @nid: the node id to scan from.
148 * @memcg: the cgroup to scan from.
149 * @isolate: callback function that is resposible for deciding what to do with
150 * the item currently being scanned
151 * @cb_arg: opaque type that will be passed to @isolate
152 * @nr_to_walk: how many items to scan.
154 * This function will scan all elements in a particular list_lru, calling the
155 * @isolate callback for each of those items, along with the current list
156 * spinlock and a caller-provided opaque. The @isolate callback can choose to
157 * drop the lock internally, but *must* return with the lock held. The callback
158 * will return an enum lru_status telling the list_lru infrastructure what to
159 * do with the object being scanned.
161 * Please note that nr_to_walk does not mean how many objects will be freed,
162 * just how many objects will be scanned.
164 * Return value: the number of objects effectively removed from the LRU.
166 unsigned long list_lru_walk_one(struct list_lru
*lru
,
167 int nid
, struct mem_cgroup
*memcg
,
168 list_lru_walk_cb isolate
, void *cb_arg
,
169 unsigned long *nr_to_walk
);
171 * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
172 * @lru: the lru pointer.
173 * @nid: the node id to scan from.
174 * @memcg: the cgroup to scan from.
175 * @isolate: callback function that is resposible for deciding what to do with
176 * the item currently being scanned
177 * @cb_arg: opaque type that will be passed to @isolate
178 * @nr_to_walk: how many items to scan.
180 * Same as @list_lru_walk_one except that the spinlock is acquired with
183 unsigned long list_lru_walk_one_irq(struct list_lru
*lru
,
184 int nid
, struct mem_cgroup
*memcg
,
185 list_lru_walk_cb isolate
, void *cb_arg
,
186 unsigned long *nr_to_walk
);
187 unsigned long list_lru_walk_node(struct list_lru
*lru
, int nid
,
188 list_lru_walk_cb isolate
, void *cb_arg
,
189 unsigned long *nr_to_walk
);
191 static inline unsigned long
192 list_lru_shrink_walk(struct list_lru
*lru
, struct shrink_control
*sc
,
193 list_lru_walk_cb isolate
, void *cb_arg
)
195 return list_lru_walk_one(lru
, sc
->nid
, sc
->memcg
, isolate
, cb_arg
,
199 static inline unsigned long
200 list_lru_shrink_walk_irq(struct list_lru
*lru
, struct shrink_control
*sc
,
201 list_lru_walk_cb isolate
, void *cb_arg
)
203 return list_lru_walk_one_irq(lru
, sc
->nid
, sc
->memcg
, isolate
, cb_arg
,
207 static inline unsigned long
208 list_lru_walk(struct list_lru
*lru
, list_lru_walk_cb isolate
,
209 void *cb_arg
, unsigned long nr_to_walk
)
214 for_each_node_state(nid
, N_NORMAL_MEMORY
) {
215 isolated
+= list_lru_walk_node(lru
, nid
, isolate
,
216 cb_arg
, &nr_to_walk
);
222 #endif /* _LRU_LIST_H */