1 // SPDX-License-Identifier: GPL-2.0
5 * Author: Alexander Potapenko <glider@google.com>
6 * Copyright (C) 2016 Google, Inc.
8 * Based on code by Dmitry Chernenkov.
11 #define pr_fmt(fmt) "kasan: " fmt
13 #include <linux/gfp.h>
14 #include <linux/hash.h>
15 #include <linux/kernel.h>
17 #include <linux/percpu.h>
18 #include <linux/printk.h>
19 #include <linux/shrinker.h>
20 #include <linux/slab.h>
21 #include <linux/srcu.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/cpuhotplug.h>
29 /* Data structure and operations for quarantine queues. */
32 * Each queue is a single-linked list, which also stores the total size of
33 * objects inside of it.
36 struct qlist_node
*head
;
37 struct qlist_node
*tail
;
42 #define QLIST_INIT { NULL, NULL, 0 }
44 static bool qlist_empty(struct qlist_head
*q
)
49 static void qlist_init(struct qlist_head
*q
)
51 q
->head
= q
->tail
= NULL
;
55 static void qlist_put(struct qlist_head
*q
, struct qlist_node
*qlink
,
58 if (unlikely(qlist_empty(q
)))
61 q
->tail
->next
= qlink
;
67 static void qlist_move_all(struct qlist_head
*from
, struct qlist_head
*to
)
69 if (unlikely(qlist_empty(from
)))
72 if (qlist_empty(to
)) {
78 to
->tail
->next
= from
->head
;
79 to
->tail
= from
->tail
;
80 to
->bytes
+= from
->bytes
;
85 #define QUARANTINE_PERCPU_SIZE (1 << 20)
86 #define QUARANTINE_BATCHES \
87 (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
90 * The object quarantine consists of per-cpu queues and a global queue,
91 * guarded by quarantine_lock.
93 static DEFINE_PER_CPU(struct qlist_head
, cpu_quarantine
);
95 /* Round-robin FIFO array of batches. */
96 static struct qlist_head global_quarantine
[QUARANTINE_BATCHES
];
97 static int quarantine_head
;
98 static int quarantine_tail
;
99 /* Total size of all objects in global_quarantine across all batches. */
100 static unsigned long quarantine_size
;
101 static DEFINE_RAW_SPINLOCK(quarantine_lock
);
102 DEFINE_STATIC_SRCU(remove_cache_srcu
);
104 struct cpu_shrink_qlist
{
106 struct qlist_head qlist
;
109 static DEFINE_PER_CPU(struct cpu_shrink_qlist
, shrink_qlist
) = {
110 .lock
= __RAW_SPIN_LOCK_UNLOCKED(shrink_qlist
.lock
),
113 /* Maximum size of the global queue. */
114 static unsigned long quarantine_max_size
;
117 * Target size of a batch in global_quarantine.
118 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
120 static unsigned long quarantine_batch_size
;
123 * The fraction of physical memory the quarantine is allowed to occupy.
124 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
125 * the ratio low to avoid OOM.
127 #define QUARANTINE_FRACTION 32
129 static struct kmem_cache
*qlink_to_cache(struct qlist_node
*qlink
)
131 return virt_to_slab(qlink
)->slab_cache
;
134 static void *qlink_to_object(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
136 struct kasan_free_meta
*free_info
=
137 container_of(qlink
, struct kasan_free_meta
,
140 return ((void *)free_info
) - cache
->kasan_info
.free_meta_offset
;
143 static void qlink_free(struct qlist_node
*qlink
, struct kmem_cache
*cache
)
145 void *object
= qlink_to_object(qlink
, cache
);
146 struct kasan_free_meta
*free_meta
= kasan_get_free_meta(cache
, object
);
149 * Note: Keep per-object metadata to allow KASAN print stack traces for
150 * use-after-free-before-realloc bugs.
154 * If init_on_free is enabled and KASAN's free metadata is stored in
155 * the object, zero the metadata. Otherwise, the object's memory will
156 * not be properly zeroed, as KASAN saves the metadata after the slab
157 * allocator zeroes the object.
159 if (slab_want_init_on_free(cache
) &&
160 cache
->kasan_info
.free_meta_offset
== 0)
161 memzero_explicit(free_meta
, sizeof(*free_meta
));
163 ___cache_free(cache
, object
, _THIS_IP_
);
166 static void qlist_free_all(struct qlist_head
*q
, struct kmem_cache
*cache
)
168 struct qlist_node
*qlink
;
170 if (unlikely(qlist_empty(q
)))
175 struct kmem_cache
*obj_cache
=
176 cache
? cache
: qlink_to_cache(qlink
);
177 struct qlist_node
*next
= qlink
->next
;
179 qlink_free(qlink
, obj_cache
);
185 bool kasan_quarantine_put(struct kmem_cache
*cache
, void *object
)
188 struct qlist_head
*q
;
189 struct qlist_head temp
= QLIST_INIT
;
190 struct kasan_free_meta
*meta
= kasan_get_free_meta(cache
, object
);
193 * If there's no metadata for this object, don't put it into
200 * Note: irq must be disabled until after we move the batch to the
201 * global quarantine. Otherwise kasan_quarantine_remove_cache() can
202 * miss some objects belonging to the cache if they are in our local
203 * temp list. kasan_quarantine_remove_cache() executes on_each_cpu()
204 * at the beginning which ensures that it either sees the objects in
205 * per-cpu lists or in the global quarantine.
207 local_irq_save(flags
);
209 q
= this_cpu_ptr(&cpu_quarantine
);
211 local_irq_restore(flags
);
214 qlist_put(q
, &meta
->quarantine_link
, cache
->size
);
215 if (unlikely(q
->bytes
> QUARANTINE_PERCPU_SIZE
)) {
216 qlist_move_all(q
, &temp
);
218 raw_spin_lock(&quarantine_lock
);
219 WRITE_ONCE(quarantine_size
, quarantine_size
+ temp
.bytes
);
220 qlist_move_all(&temp
, &global_quarantine
[quarantine_tail
]);
221 if (global_quarantine
[quarantine_tail
].bytes
>=
222 READ_ONCE(quarantine_batch_size
)) {
225 new_tail
= quarantine_tail
+ 1;
226 if (new_tail
== QUARANTINE_BATCHES
)
228 if (new_tail
!= quarantine_head
)
229 quarantine_tail
= new_tail
;
231 raw_spin_unlock(&quarantine_lock
);
234 local_irq_restore(flags
);
239 void kasan_quarantine_reduce(void)
241 size_t total_size
, new_quarantine_size
, percpu_quarantines
;
244 struct qlist_head to_free
= QLIST_INIT
;
246 if (likely(READ_ONCE(quarantine_size
) <=
247 READ_ONCE(quarantine_max_size
)))
251 * srcu critical section ensures that kasan_quarantine_remove_cache()
252 * will not miss objects belonging to the cache while they are in our
253 * local to_free list. srcu is chosen because (1) it gives us private
254 * grace period domain that does not interfere with anything else,
255 * and (2) it allows synchronize_srcu() to return without waiting
256 * if there are no pending read critical sections (which is the
259 srcu_idx
= srcu_read_lock(&remove_cache_srcu
);
260 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
263 * Update quarantine size in case of hotplug. Allocate a fraction of
264 * the installed memory to quarantine minus per-cpu queue limits.
266 total_size
= (totalram_pages() << PAGE_SHIFT
) /
268 percpu_quarantines
= QUARANTINE_PERCPU_SIZE
* num_online_cpus();
269 new_quarantine_size
= (total_size
< percpu_quarantines
) ?
270 0 : total_size
- percpu_quarantines
;
271 WRITE_ONCE(quarantine_max_size
, new_quarantine_size
);
272 /* Aim at consuming at most 1/2 of slots in quarantine. */
273 WRITE_ONCE(quarantine_batch_size
, max((size_t)QUARANTINE_PERCPU_SIZE
,
274 2 * total_size
/ QUARANTINE_BATCHES
));
276 if (likely(quarantine_size
> quarantine_max_size
)) {
277 qlist_move_all(&global_quarantine
[quarantine_head
], &to_free
);
278 WRITE_ONCE(quarantine_size
, quarantine_size
- to_free
.bytes
);
280 if (quarantine_head
== QUARANTINE_BATCHES
)
284 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
286 qlist_free_all(&to_free
, NULL
);
287 srcu_read_unlock(&remove_cache_srcu
, srcu_idx
);
290 static void qlist_move_cache(struct qlist_head
*from
,
291 struct qlist_head
*to
,
292 struct kmem_cache
*cache
)
294 struct qlist_node
*curr
;
296 if (unlikely(qlist_empty(from
)))
302 struct qlist_node
*next
= curr
->next
;
303 struct kmem_cache
*obj_cache
= qlink_to_cache(curr
);
305 if (obj_cache
== cache
)
306 qlist_put(to
, curr
, obj_cache
->size
);
308 qlist_put(from
, curr
, obj_cache
->size
);
314 static void __per_cpu_remove_cache(struct qlist_head
*q
, void *arg
)
316 struct kmem_cache
*cache
= arg
;
318 struct cpu_shrink_qlist
*sq
;
320 sq
= this_cpu_ptr(&shrink_qlist
);
321 raw_spin_lock_irqsave(&sq
->lock
, flags
);
322 qlist_move_cache(q
, &sq
->qlist
, cache
);
323 raw_spin_unlock_irqrestore(&sq
->lock
, flags
);
326 static void per_cpu_remove_cache(void *arg
)
328 struct qlist_head
*q
;
330 q
= this_cpu_ptr(&cpu_quarantine
);
332 * Ensure the ordering between the writing to q->offline and
333 * per_cpu_remove_cache. Prevent cpu_quarantine from being corrupted
336 if (READ_ONCE(q
->offline
))
338 __per_cpu_remove_cache(q
, arg
);
341 /* Free all quarantined objects belonging to cache. */
342 void kasan_quarantine_remove_cache(struct kmem_cache
*cache
)
344 unsigned long flags
, i
;
345 struct qlist_head to_free
= QLIST_INIT
;
347 struct cpu_shrink_qlist
*sq
;
350 * Must be careful to not miss any objects that are being moved from
351 * per-cpu list to the global quarantine in kasan_quarantine_put(),
352 * nor objects being freed in kasan_quarantine_reduce(). on_each_cpu()
353 * achieves the first goal, while synchronize_srcu() achieves the
356 on_each_cpu(per_cpu_remove_cache
, cache
, 1);
358 for_each_online_cpu(cpu
) {
359 sq
= per_cpu_ptr(&shrink_qlist
, cpu
);
360 raw_spin_lock_irqsave(&sq
->lock
, flags
);
361 qlist_move_cache(&sq
->qlist
, &to_free
, cache
);
362 raw_spin_unlock_irqrestore(&sq
->lock
, flags
);
364 qlist_free_all(&to_free
, cache
);
366 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
367 for (i
= 0; i
< QUARANTINE_BATCHES
; i
++) {
368 if (qlist_empty(&global_quarantine
[i
]))
370 qlist_move_cache(&global_quarantine
[i
], &to_free
, cache
);
371 /* Scanning whole quarantine can take a while. */
372 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
374 raw_spin_lock_irqsave(&quarantine_lock
, flags
);
376 raw_spin_unlock_irqrestore(&quarantine_lock
, flags
);
378 qlist_free_all(&to_free
, cache
);
380 synchronize_srcu(&remove_cache_srcu
);
383 static int kasan_cpu_online(unsigned int cpu
)
385 this_cpu_ptr(&cpu_quarantine
)->offline
= false;
389 static int kasan_cpu_offline(unsigned int cpu
)
391 struct qlist_head
*q
;
393 q
= this_cpu_ptr(&cpu_quarantine
);
394 /* Ensure the ordering between the writing to q->offline and
395 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
398 WRITE_ONCE(q
->offline
, true);
400 qlist_free_all(q
, NULL
);
404 static int __init
kasan_cpu_quarantine_init(void)
408 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
, "mm/kasan:online",
409 kasan_cpu_online
, kasan_cpu_offline
);
411 pr_err("cpu quarantine register failed [%d]\n", ret
);
414 late_initcall(kasan_cpu_quarantine_init
);