4 * Author: Alexander Potapenko <glider@google.com>
5 * Copyright (C) 2016 Google, Inc.
7 * Based on code by Dmitry Chernenkov.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 #include <linux/gfp.h>
21 #include <linux/hash.h>
22 #include <linux/kernel.h>
24 #include <linux/percpu.h>
25 #include <linux/printk.h>
26 #include <linux/shrinker.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
34 /* Data structure and operations for quarantine queues. */
37 * Each queue is a signle-linked list, which also stores the total size of
38 * objects inside of it.
41 struct qlist_node
*head
;
42 struct qlist_node
*tail
;
46 #define QLIST_INIT { NULL, NULL, 0 }
48 static bool qlist_empty(struct qlist_head
*q
)
53 static void qlist_init(struct qlist_head
*q
)
55 q
->head
= q
->tail
= NULL
;
59 static void qlist_put(struct qlist_head
*q
, struct qlist_node
*qlink
,
62 if (unlikely(qlist_empty(q
)))
65 q
->tail
->next
= qlink
;
71 static void qlist_move_all(struct qlist_head
*from
, struct qlist_head
*to
)
73 if (unlikely(qlist_empty(from
)))
76 if (qlist_empty(to
)) {
82 to
->tail
->next
= from
->head
;
83 to
->tail
= from
->tail
;
84 to
->bytes
+= from
->bytes
;
89 #define QUARANTINE_PERCPU_SIZE (1 << 20)
90 #define QUARANTINE_BATCHES \
91 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
94 * The object quarantine consists of per-cpu queues and a global queue,
95 * guarded by quarantine_lock.
97 static DEFINE_PER_CPU(struct qlist_head
, cpu_quarantine
);
99 /* Round-robin FIFO array of batches. */
100 static struct qlist_head global_quarantine
[QUARANTINE_BATCHES
];
101 static int quarantine_head
;
102 static int quarantine_tail
;
103 /* Total size of all objects in global_quarantine across all batches. */
104 static unsigned long quarantine_size
;
105 static DEFINE_SPINLOCK(quarantine_lock
);
107 /* Maximum size of the global queue. */
108 static unsigned long quarantine_max_size
;
111 * Target size of a batch in global_quarantine.
112 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
114 static unsigned long quarantine_batch_size
;
117 * The fraction of physical memory the quarantine is allowed to occupy.
118 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
119 * the ratio low to avoid OOM.
121 #define QUARANTINE_FRACTION 32
123 static struct kmem_cache
*qlink_to_cache(struct qlist_node
*qlink
)
125 return virt_to_head_page(qlink
)->slab_cache
;
128 static void *qlink_to_object(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
130 struct kasan_free_meta
*free_info
=
131 container_of(qlink
, struct kasan_free_meta
,
134 return ((void *)free_info
) - cache
->kasan_info
.free_meta_offset
;
137 static void qlink_free(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
139 void *object
= qlink_to_object(qlink
, cache
);
142 if (IS_ENABLED(CONFIG_SLAB
))
143 local_irq_save(flags
);
145 ___cache_free(cache
, object
, _THIS_IP_
);
147 if (IS_ENABLED(CONFIG_SLAB
))
148 local_irq_restore(flags
);
151 static void qlist_free_all(struct qlist_head
*q
, struct kmem_cache
*cache
)
153 struct qlist_node
*qlink
;
155 if (unlikely(qlist_empty(q
)))
160 struct kmem_cache
*obj_cache
=
161 cache
? cache
: qlink_to_cache(qlink
);
162 struct qlist_node
*next
= qlink
->next
;
164 qlink_free(qlink
, obj_cache
);
170 void quarantine_put(struct kasan_free_meta
*info
, struct kmem_cache
*cache
)
173 struct qlist_head
*q
;
174 struct qlist_head temp
= QLIST_INIT
;
176 local_irq_save(flags
);
178 q
= this_cpu_ptr(&cpu_quarantine
);
179 qlist_put(q
, &info
->quarantine_link
, cache
->size
);
180 if (unlikely(q
->bytes
> QUARANTINE_PERCPU_SIZE
))
181 qlist_move_all(q
, &temp
);
183 local_irq_restore(flags
);
185 if (unlikely(!qlist_empty(&temp
))) {
186 spin_lock_irqsave(&quarantine_lock
, flags
);
187 WRITE_ONCE(quarantine_size
, quarantine_size
+ temp
.bytes
);
188 qlist_move_all(&temp
, &global_quarantine
[quarantine_tail
]);
189 if (global_quarantine
[quarantine_tail
].bytes
>=
190 READ_ONCE(quarantine_batch_size
)) {
193 new_tail
= quarantine_tail
+ 1;
194 if (new_tail
== QUARANTINE_BATCHES
)
196 if (new_tail
!= quarantine_head
)
197 quarantine_tail
= new_tail
;
199 spin_unlock_irqrestore(&quarantine_lock
, flags
);
203 void quarantine_reduce(void)
205 size_t total_size
, new_quarantine_size
, percpu_quarantines
;
207 struct qlist_head to_free
= QLIST_INIT
;
209 if (likely(READ_ONCE(quarantine_size
) <=
210 READ_ONCE(quarantine_max_size
)))
213 spin_lock_irqsave(&quarantine_lock
, flags
);
216 * Update quarantine size in case of hotplug. Allocate a fraction of
217 * the installed memory to quarantine minus per-cpu queue limits.
219 total_size
= (READ_ONCE(totalram_pages
) << PAGE_SHIFT
) /
221 percpu_quarantines
= QUARANTINE_PERCPU_SIZE
* num_online_cpus();
222 new_quarantine_size
= (total_size
< percpu_quarantines
) ?
223 0 : total_size
- percpu_quarantines
;
224 WRITE_ONCE(quarantine_max_size
, new_quarantine_size
);
225 /* Aim at consuming at most 1/2 of slots in quarantine. */
226 WRITE_ONCE(quarantine_batch_size
, max((size_t)QUARANTINE_PERCPU_SIZE
,
227 2 * total_size
/ QUARANTINE_BATCHES
));
229 if (likely(quarantine_size
> quarantine_max_size
)) {
230 qlist_move_all(&global_quarantine
[quarantine_head
], &to_free
);
231 WRITE_ONCE(quarantine_size
, quarantine_size
- to_free
.bytes
);
233 if (quarantine_head
== QUARANTINE_BATCHES
)
237 spin_unlock_irqrestore(&quarantine_lock
, flags
);
239 qlist_free_all(&to_free
, NULL
);
242 static void qlist_move_cache(struct qlist_head
*from
,
243 struct qlist_head
*to
,
244 struct kmem_cache
*cache
)
246 struct qlist_node
*curr
;
248 if (unlikely(qlist_empty(from
)))
254 struct qlist_node
*next
= curr
->next
;
255 struct kmem_cache
*obj_cache
= qlink_to_cache(curr
);
257 if (obj_cache
== cache
)
258 qlist_put(to
, curr
, obj_cache
->size
);
260 qlist_put(from
, curr
, obj_cache
->size
);
266 static void per_cpu_remove_cache(void *arg
)
268 struct kmem_cache
*cache
= arg
;
269 struct qlist_head to_free
= QLIST_INIT
;
270 struct qlist_head
*q
;
272 q
= this_cpu_ptr(&cpu_quarantine
);
273 qlist_move_cache(q
, &to_free
, cache
);
274 qlist_free_all(&to_free
, cache
);
277 void quarantine_remove_cache(struct kmem_cache
*cache
)
279 unsigned long flags
, i
;
280 struct qlist_head to_free
= QLIST_INIT
;
282 on_each_cpu(per_cpu_remove_cache
, cache
, 1);
284 spin_lock_irqsave(&quarantine_lock
, flags
);
285 for (i
= 0; i
< QUARANTINE_BATCHES
; i
++)
286 qlist_move_cache(&global_quarantine
[i
], &to_free
, cache
);
287 spin_unlock_irqrestore(&quarantine_lock
, flags
);
289 qlist_free_all(&to_free
, cache
);