Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / mm / kmsan / core.c
bloba495debf143632fb115ef376374e74fc12c50339
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KMSAN runtime library.
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
8 */
10 #include <asm/page.h>
11 #include <linux/compiler.h>
12 #include <linux/export.h>
13 #include <linux/highmem.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel.h>
16 #include <linux/kmsan_types.h>
17 #include <linux/memory.h>
18 #include <linux/mm.h>
19 #include <linux/mm_types.h>
20 #include <linux/mmzone.h>
21 #include <linux/percpu-defs.h>
22 #include <linux/preempt.h>
23 #include <linux/slab.h>
24 #include <linux/stackdepot.h>
25 #include <linux/stacktrace.h>
26 #include <linux/types.h>
27 #include <linux/vmalloc.h>
29 #include "../slab.h"
30 #include "kmsan.h"
32 bool kmsan_enabled __read_mostly;
35 * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
36 * unavaliable.
38 DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
40 void kmsan_internal_task_create(struct task_struct *task)
42 struct kmsan_ctx *ctx = &task->kmsan_ctx;
43 struct thread_info *info = current_thread_info();
45 __memset(ctx, 0, sizeof(*ctx));
46 kmsan_internal_unpoison_memory(info, sizeof(*info), false);
49 void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
50 unsigned int poison_flags)
52 u32 extra_bits =
53 kmsan_extra_bits(/*depth*/ 0, poison_flags & KMSAN_POISON_FREE);
54 bool checked = poison_flags & KMSAN_POISON_CHECK;
55 depot_stack_handle_t handle;
57 handle = kmsan_save_stack_with_flags(flags, extra_bits);
58 kmsan_internal_set_shadow_origin(address, size, -1, handle, checked);
61 void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked)
63 kmsan_internal_set_shadow_origin(address, size, 0, 0, checked);
66 depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
67 unsigned int extra)
69 unsigned long entries[KMSAN_STACK_DEPTH];
70 unsigned int nr_entries;
71 depot_stack_handle_t handle;
73 nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
75 /* Don't sleep. */
76 flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
78 handle = stack_depot_save(entries, nr_entries, flags);
79 return stack_depot_set_extra_bits(handle, extra);
82 /* Copy the metadata following the memmove() behavior. */
83 void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n)
85 depot_stack_handle_t prev_old_origin = 0, prev_new_origin = 0;
86 int i, iter, step, src_off, dst_off, oiter_src, oiter_dst;
87 depot_stack_handle_t old_origin = 0, new_origin = 0;
88 depot_stack_handle_t *origin_src, *origin_dst;
89 u8 *shadow_src, *shadow_dst;
90 u32 *align_shadow_dst;
91 bool backwards;
93 shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW);
94 if (!shadow_dst)
95 return;
96 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n));
97 align_shadow_dst =
98 (u32 *)ALIGN_DOWN((u64)shadow_dst, KMSAN_ORIGIN_SIZE);
100 shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW);
101 if (!shadow_src) {
102 /* @src is untracked: mark @dst as initialized. */
103 kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
104 return;
106 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n));
108 origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN);
109 origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN);
110 KMSAN_WARN_ON(!origin_dst || !origin_src);
112 backwards = dst > src;
113 step = backwards ? -1 : 1;
114 iter = backwards ? n - 1 : 0;
115 src_off = (u64)src % KMSAN_ORIGIN_SIZE;
116 dst_off = (u64)dst % KMSAN_ORIGIN_SIZE;
118 /* Copy shadow bytes one by one, updating the origins if necessary. */
119 for (i = 0; i < n; i++, iter += step) {
120 oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE;
121 oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE;
122 if (!shadow_src[iter]) {
123 shadow_dst[iter] = 0;
124 if (!align_shadow_dst[oiter_dst])
125 origin_dst[oiter_dst] = 0;
126 continue;
128 shadow_dst[iter] = shadow_src[iter];
129 old_origin = origin_src[oiter_src];
130 if (old_origin == prev_old_origin)
131 new_origin = prev_new_origin;
132 else {
134 * kmsan_internal_chain_origin() may return
135 * NULL, but we don't want to lose the previous
136 * origin value.
138 new_origin = kmsan_internal_chain_origin(old_origin);
139 if (!new_origin)
140 new_origin = old_origin;
142 origin_dst[oiter_dst] = new_origin;
143 prev_new_origin = new_origin;
144 prev_old_origin = old_origin;
148 depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
150 unsigned long entries[3];
151 u32 extra_bits;
152 int depth;
153 bool uaf;
154 depot_stack_handle_t handle;
156 if (!id)
157 return id;
159 * Make sure we have enough spare bits in @id to hold the UAF bit and
160 * the chain depth.
162 BUILD_BUG_ON(
163 (1 << STACK_DEPOT_EXTRA_BITS) <= (KMSAN_MAX_ORIGIN_DEPTH << 1));
165 extra_bits = stack_depot_get_extra_bits(id);
166 depth = kmsan_depth_from_eb(extra_bits);
167 uaf = kmsan_uaf_from_eb(extra_bits);
170 * Stop chaining origins once the depth reached KMSAN_MAX_ORIGIN_DEPTH.
171 * This mostly happens in the case structures with uninitialized padding
172 * are copied around many times. Origin chains for such structures are
173 * usually periodic, and it does not make sense to fully store them.
175 if (depth == KMSAN_MAX_ORIGIN_DEPTH)
176 return id;
178 depth++;
179 extra_bits = kmsan_extra_bits(depth, uaf);
181 entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN;
182 entries[1] = kmsan_save_stack_with_flags(__GFP_HIGH, 0);
183 entries[2] = id;
185 * @entries is a local var in non-instrumented code, so KMSAN does not
186 * know it is initialized. Explicitly unpoison it to avoid false
187 * positives when stack_depot_save() passes it to instrumented code.
189 kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
190 handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
191 return stack_depot_set_extra_bits(handle, extra_bits);
194 void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
195 u32 origin, bool checked)
197 u64 address = (u64)addr;
198 u32 *shadow_start, *origin_start;
199 size_t pad = 0;
201 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
202 shadow_start = kmsan_get_metadata(addr, KMSAN_META_SHADOW);
203 if (!shadow_start) {
205 * kmsan_metadata_is_contiguous() is true, so either all shadow
206 * and origin pages are NULL, or all are non-NULL.
208 if (checked) {
209 pr_err("%s: not memsetting %ld bytes starting at %px, because the shadow is NULL\n",
210 __func__, size, addr);
211 KMSAN_WARN_ON(true);
213 return;
215 __memset(shadow_start, b, size);
217 if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
218 pad = address % KMSAN_ORIGIN_SIZE;
219 address -= pad;
220 size += pad;
222 size = ALIGN(size, KMSAN_ORIGIN_SIZE);
223 origin_start =
224 (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
227 * If the new origin is non-zero, assume that the shadow byte is also non-zero,
228 * and unconditionally overwrite the old origin slot.
229 * If the new origin is zero, overwrite the old origin slot iff the
230 * corresponding shadow slot is zero.
232 for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
233 if (origin || !shadow_start[i])
234 origin_start[i] = origin;
238 struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
240 struct page *page;
242 if (!kmsan_internal_is_vmalloc_addr(vaddr) &&
243 !kmsan_internal_is_module_addr(vaddr))
244 return NULL;
245 page = vmalloc_to_page(vaddr);
246 if (pfn_valid(page_to_pfn(page)))
247 return page;
248 else
249 return NULL;
252 void kmsan_internal_check_memory(void *addr, size_t size,
253 const void __user *user_addr, int reason)
255 depot_stack_handle_t cur_origin = 0, new_origin = 0;
256 unsigned long addr64 = (unsigned long)addr;
257 depot_stack_handle_t *origin = NULL;
258 unsigned char *shadow = NULL;
259 int cur_off_start = -1;
260 int chunk_size;
261 size_t pos = 0;
263 if (!size)
264 return;
265 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
266 while (pos < size) {
267 chunk_size = min(size - pos,
268 PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE));
269 shadow = kmsan_get_metadata((void *)(addr64 + pos),
270 KMSAN_META_SHADOW);
271 if (!shadow) {
273 * This page is untracked. If there were uninitialized
274 * bytes before, report them.
276 if (cur_origin) {
277 kmsan_enter_runtime();
278 kmsan_report(cur_origin, addr, size,
279 cur_off_start, pos - 1, user_addr,
280 reason);
281 kmsan_leave_runtime();
283 cur_origin = 0;
284 cur_off_start = -1;
285 pos += chunk_size;
286 continue;
288 for (int i = 0; i < chunk_size; i++) {
289 if (!shadow[i]) {
291 * This byte is unpoisoned. If there were
292 * poisoned bytes before, report them.
294 if (cur_origin) {
295 kmsan_enter_runtime();
296 kmsan_report(cur_origin, addr, size,
297 cur_off_start, pos + i - 1,
298 user_addr, reason);
299 kmsan_leave_runtime();
301 cur_origin = 0;
302 cur_off_start = -1;
303 continue;
305 origin = kmsan_get_metadata((void *)(addr64 + pos + i),
306 KMSAN_META_ORIGIN);
307 KMSAN_WARN_ON(!origin);
308 new_origin = *origin;
310 * Encountered new origin - report the previous
311 * uninitialized range.
313 if (cur_origin != new_origin) {
314 if (cur_origin) {
315 kmsan_enter_runtime();
316 kmsan_report(cur_origin, addr, size,
317 cur_off_start, pos + i - 1,
318 user_addr, reason);
319 kmsan_leave_runtime();
321 cur_origin = new_origin;
322 cur_off_start = pos + i;
325 pos += chunk_size;
327 KMSAN_WARN_ON(pos != size);
328 if (cur_origin) {
329 kmsan_enter_runtime();
330 kmsan_report(cur_origin, addr, size, cur_off_start, pos - 1,
331 user_addr, reason);
332 kmsan_leave_runtime();
336 bool kmsan_metadata_is_contiguous(void *addr, size_t size)
338 char *cur_shadow = NULL, *next_shadow = NULL, *cur_origin = NULL,
339 *next_origin = NULL;
340 u64 cur_addr = (u64)addr, next_addr = cur_addr + PAGE_SIZE;
341 depot_stack_handle_t *origin_p;
342 bool all_untracked = false;
344 if (!size)
345 return true;
347 /* The whole range belongs to the same page. */
348 if (ALIGN_DOWN(cur_addr + size - 1, PAGE_SIZE) ==
349 ALIGN_DOWN(cur_addr, PAGE_SIZE))
350 return true;
352 cur_shadow = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ false);
353 if (!cur_shadow)
354 all_untracked = true;
355 cur_origin = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ true);
356 if (all_untracked && cur_origin)
357 goto report;
359 for (; next_addr < (u64)addr + size;
360 cur_addr = next_addr, cur_shadow = next_shadow,
361 cur_origin = next_origin, next_addr += PAGE_SIZE) {
362 next_shadow = kmsan_get_metadata((void *)next_addr, false);
363 next_origin = kmsan_get_metadata((void *)next_addr, true);
364 if (all_untracked) {
365 if (next_shadow || next_origin)
366 goto report;
367 if (!next_shadow && !next_origin)
368 continue;
370 if (((u64)cur_shadow == ((u64)next_shadow - PAGE_SIZE)) &&
371 ((u64)cur_origin == ((u64)next_origin - PAGE_SIZE)))
372 continue;
373 goto report;
375 return true;
377 report:
378 pr_err("%s: attempting to access two shadow page ranges.\n", __func__);
379 pr_err("Access of size %ld at %px.\n", size, addr);
380 pr_err("Addresses belonging to different ranges: %px and %px\n",
381 (void *)cur_addr, (void *)next_addr);
382 pr_err("page[0].shadow: %px, page[1].shadow: %px\n", cur_shadow,
383 next_shadow);
384 pr_err("page[0].origin: %px, page[1].origin: %px\n", cur_origin,
385 next_origin);
386 origin_p = kmsan_get_metadata(addr, KMSAN_META_ORIGIN);
387 if (origin_p) {
388 pr_err("Origin: %08x\n", *origin_p);
389 kmsan_print_origin(*origin_p);
390 } else {
391 pr_err("Origin: unavailable\n");
393 return false;