1 // SPDX-License-Identifier: GPL-2.0
3 * KMSAN shadow implementation.
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
10 #include <asm/kmsan.h>
11 #include <asm/tlbflush.h>
12 #include <linux/cacheflush.h>
13 #include <linux/memblock.h>
14 #include <linux/mm_types.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
19 #include "../internal.h"
22 #define shadow_page_for(page) ((page)->kmsan_shadow)
24 #define origin_page_for(page) ((page)->kmsan_origin)
26 static void *shadow_ptr_for(struct page
*page
)
28 return page_address(shadow_page_for(page
));
31 static void *origin_ptr_for(struct page
*page
)
33 return page_address(origin_page_for(page
));
36 static bool page_has_metadata(struct page
*page
)
38 return shadow_page_for(page
) && origin_page_for(page
);
41 static void set_no_shadow_origin_page(struct page
*page
)
43 shadow_page_for(page
) = NULL
;
44 origin_page_for(page
) = NULL
;
48 * Dummy load and store pages to be used when the real metadata is unavailable.
49 * There are separate pages for loads and stores, so that every load returns a
50 * zero, and every store doesn't affect other loads.
52 static char dummy_load_page
[PAGE_SIZE
] __aligned(PAGE_SIZE
);
53 static char dummy_store_page
[PAGE_SIZE
] __aligned(PAGE_SIZE
);
55 static unsigned long vmalloc_meta(void *addr
, bool is_origin
)
57 unsigned long addr64
= (unsigned long)addr
, off
;
59 KMSAN_WARN_ON(is_origin
&& !IS_ALIGNED(addr64
, KMSAN_ORIGIN_SIZE
));
60 if (kmsan_internal_is_vmalloc_addr(addr
)) {
61 off
= addr64
- VMALLOC_START
;
62 return off
+ (is_origin
? KMSAN_VMALLOC_ORIGIN_START
:
63 KMSAN_VMALLOC_SHADOW_START
);
65 if (kmsan_internal_is_module_addr(addr
)) {
66 off
= addr64
- MODULES_VADDR
;
67 return off
+ (is_origin
? KMSAN_MODULES_ORIGIN_START
:
68 KMSAN_MODULES_SHADOW_START
);
73 static struct page
*virt_to_page_or_null(void *vaddr
)
75 if (kmsan_virt_addr_valid(vaddr
))
76 return virt_to_page(vaddr
);
81 struct shadow_origin_ptr
kmsan_get_shadow_origin_ptr(void *address
, u64 size
,
84 struct shadow_origin_ptr ret
;
88 * Even if we redirect this memory access to the dummy page, it will
91 KMSAN_WARN_ON(size
> PAGE_SIZE
);
96 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address
, size
));
97 shadow
= kmsan_get_metadata(address
, KMSAN_META_SHADOW
);
102 ret
.origin
= kmsan_get_metadata(address
, KMSAN_META_ORIGIN
);
107 /* Ignore this store. */
108 ret
.shadow
= dummy_store_page
;
109 ret
.origin
= dummy_store_page
;
111 /* This load will return zero. */
112 ret
.shadow
= dummy_load_page
;
113 ret
.origin
= dummy_load_page
;
119 * Obtain the shadow or origin pointer for the given address, or NULL if there's
120 * none. The caller must check the return value for being non-NULL if needed.
121 * The return value of this function should not depend on whether we're in the
124 void *kmsan_get_metadata(void *address
, bool is_origin
)
126 u64 addr
= (u64
)address
, off
;
131 addr
= ALIGN_DOWN(addr
, KMSAN_ORIGIN_SIZE
);
132 address
= (void *)addr
;
133 if (kmsan_internal_is_vmalloc_addr(address
) ||
134 kmsan_internal_is_module_addr(address
))
135 return (void *)vmalloc_meta(address
, is_origin
);
137 ret
= arch_kmsan_get_meta_or_null(address
, is_origin
);
141 page
= virt_to_page_or_null(address
);
144 if (!page_has_metadata(page
))
146 off
= offset_in_page(addr
);
148 return (is_origin
? origin_ptr_for(page
) : shadow_ptr_for(page
)) + off
;
151 void kmsan_copy_page_meta(struct page
*dst
, struct page
*src
)
153 if (!kmsan_enabled
|| kmsan_in_runtime())
155 if (!dst
|| !page_has_metadata(dst
))
157 if (!src
|| !page_has_metadata(src
)) {
158 kmsan_internal_unpoison_memory(page_address(dst
), PAGE_SIZE
,
163 kmsan_enter_runtime();
164 __memcpy(shadow_ptr_for(dst
), shadow_ptr_for(src
), PAGE_SIZE
);
165 __memcpy(origin_ptr_for(dst
), origin_ptr_for(src
), PAGE_SIZE
);
166 kmsan_leave_runtime();
168 EXPORT_SYMBOL(kmsan_copy_page_meta
);
170 void kmsan_alloc_page(struct page
*page
, unsigned int order
, gfp_t flags
)
172 bool initialized
= (flags
& __GFP_ZERO
) || !kmsan_enabled
;
173 struct page
*shadow
, *origin
;
174 depot_stack_handle_t handle
;
175 int pages
= 1 << order
;
180 shadow
= shadow_page_for(page
);
181 origin
= origin_page_for(page
);
184 __memset(page_address(shadow
), 0, PAGE_SIZE
* pages
);
185 __memset(page_address(origin
), 0, PAGE_SIZE
* pages
);
189 /* Zero pages allocated by the runtime should also be initialized. */
190 if (kmsan_in_runtime())
193 __memset(page_address(shadow
), -1, PAGE_SIZE
* pages
);
194 kmsan_enter_runtime();
195 handle
= kmsan_save_stack_with_flags(flags
, /*extra_bits*/ 0);
196 kmsan_leave_runtime();
198 * Addresses are page-aligned, pages are contiguous, so it's ok
199 * to just fill the origin pages with @handle.
201 for (int i
= 0; i
< PAGE_SIZE
* pages
/ sizeof(handle
); i
++)
202 ((depot_stack_handle_t
*)page_address(origin
))[i
] = handle
;
205 void kmsan_free_page(struct page
*page
, unsigned int order
)
207 if (!kmsan_enabled
|| kmsan_in_runtime())
209 kmsan_enter_runtime();
210 kmsan_internal_poison_memory(page_address(page
),
213 KMSAN_POISON_CHECK
| KMSAN_POISON_FREE
);
214 kmsan_leave_runtime();
217 int kmsan_vmap_pages_range_noflush(unsigned long start
, unsigned long end
,
218 pgprot_t prot
, struct page
**pages
,
219 unsigned int page_shift
)
221 unsigned long shadow_start
, origin_start
, shadow_end
, origin_end
;
222 struct page
**s_pages
, **o_pages
;
223 int nr
, mapped
, err
= 0;
228 shadow_start
= vmalloc_meta((void *)start
, KMSAN_META_SHADOW
);
229 shadow_end
= vmalloc_meta((void *)end
, KMSAN_META_SHADOW
);
233 nr
= (end
- start
) / PAGE_SIZE
;
234 s_pages
= kcalloc(nr
, sizeof(*s_pages
), GFP_KERNEL
);
235 o_pages
= kcalloc(nr
, sizeof(*o_pages
), GFP_KERNEL
);
236 if (!s_pages
|| !o_pages
) {
240 for (int i
= 0; i
< nr
; i
++) {
241 s_pages
[i
] = shadow_page_for(pages
[i
]);
242 o_pages
[i
] = origin_page_for(pages
[i
]);
246 origin_start
= vmalloc_meta((void *)start
, KMSAN_META_ORIGIN
);
247 origin_end
= vmalloc_meta((void *)end
, KMSAN_META_ORIGIN
);
248 kmsan_enter_runtime();
249 mapped
= __vmap_pages_range_noflush(shadow_start
, shadow_end
, prot
,
250 s_pages
, page_shift
);
255 mapped
= __vmap_pages_range_noflush(origin_start
, origin_end
, prot
,
256 o_pages
, page_shift
);
261 kmsan_leave_runtime();
262 flush_tlb_kernel_range(shadow_start
, shadow_end
);
263 flush_tlb_kernel_range(origin_start
, origin_end
);
264 flush_cache_vmap(shadow_start
, shadow_end
);
265 flush_cache_vmap(origin_start
, origin_end
);
273 /* Allocate metadata for pages allocated at boot time. */
274 void __init
kmsan_init_alloc_meta_for_range(void *start
, void *end
)
276 struct page
*shadow_p
, *origin_p
;
277 void *shadow
, *origin
;
281 start
= (void *)PAGE_ALIGN_DOWN((u64
)start
);
282 size
= PAGE_ALIGN((u64
)end
- (u64
)start
);
283 shadow
= memblock_alloc(size
, PAGE_SIZE
);
284 origin
= memblock_alloc(size
, PAGE_SIZE
);
286 if (!shadow
|| !origin
)
287 panic("%s: Failed to allocate metadata memory for early boot range of size %llu",
290 for (u64 addr
= 0; addr
< size
; addr
+= PAGE_SIZE
) {
291 page
= virt_to_page_or_null((char *)start
+ addr
);
292 shadow_p
= virt_to_page((char *)shadow
+ addr
);
293 set_no_shadow_origin_page(shadow_p
);
294 shadow_page_for(page
) = shadow_p
;
295 origin_p
= virt_to_page((char *)origin
+ addr
);
296 set_no_shadow_origin_page(origin_p
);
297 origin_page_for(page
) = origin_p
;
301 void kmsan_setup_meta(struct page
*page
, struct page
*shadow
,
302 struct page
*origin
, int order
)
304 for (int i
= 0; i
< (1 << order
); i
++) {
305 set_no_shadow_origin_page(&shadow
[i
]);
306 set_no_shadow_origin_page(&origin
[i
]);
307 shadow_page_for(&page
[i
]) = &shadow
[i
];
308 origin_page_for(&page
[i
]) = &origin
[i
];