1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Google LLC.
8 #include <linux/stdarg.h>
10 #include <linux/kernel.h>
11 #include <linux/lockdep.h>
12 #include <linux/math.h>
13 #include <linux/printk.h>
14 #include <linux/sched/debug.h>
15 #include <linux/seq_file.h>
16 #include <linux/sprintf.h>
17 #include <linux/stacktrace.h>
18 #include <linux/string.h>
19 #include <linux/sched/clock.h>
20 #include <trace/events/error_report.h>
22 #include <asm/kfence.h>
26 /* May be overridden by <asm/kfence.h>. */
27 #ifndef ARCH_FUNC_PREFIX
28 #define ARCH_FUNC_PREFIX ""
31 /* Helper function to either print to a seq_file or to console. */
33 static void seq_con_printf(struct seq_file
*seq
, const char *fmt
, ...)
39 seq_vprintf(seq
, fmt
, args
);
46 * Get the number of stack entries to skip to get out of MM internals. @type is
47 * optional, and if set to NULL, assumes an allocation or free stack.
49 static int get_stack_skipnr(const unsigned long stack_entries
[], int num_entries
,
50 const enum kfence_error_type
*type
)
53 int skipnr
, fallback
= 0;
56 /* Depending on error type, find different stack entries. */
58 case KFENCE_ERROR_UAF
:
59 case KFENCE_ERROR_OOB
:
60 case KFENCE_ERROR_INVALID
:
62 * kfence_handle_page_fault() may be called with pt_regs
63 * set to NULL; in that case we'll simply show the full
67 case KFENCE_ERROR_CORRUPTION
:
68 case KFENCE_ERROR_INVALID_FREE
:
73 for (skipnr
= 0; skipnr
< num_entries
; skipnr
++) {
74 int len
= scnprintf(buf
, sizeof(buf
), "%ps", (void *)stack_entries
[skipnr
]);
76 if (str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kfence_") ||
77 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"__kfence_") ||
78 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"__kmem_cache_free") ||
79 !strncmp(buf
, ARCH_FUNC_PREFIX
"__slab_free", len
)) {
81 * In case of tail calls from any of the below to any of
82 * the above, optimized by the compiler such that the
83 * stack trace would omit the initial entry point below.
85 fallback
= skipnr
+ 1;
89 * The below list should only include the initial entry points
90 * into the slab allocators. Includes the *_bulk() variants by
93 if (str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kfree") ||
94 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kmem_cache_free") ||
95 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"__kmalloc") ||
96 str_has_prefix(buf
, ARCH_FUNC_PREFIX
"kmem_cache_alloc"))
99 if (fallback
< num_entries
)
103 return skipnr
< num_entries
? skipnr
: 0;
106 static void kfence_print_stack(struct seq_file
*seq
, const struct kfence_metadata
*meta
,
109 const struct kfence_track
*track
= show_alloc
? &meta
->alloc_track
: &meta
->free_track
;
110 u64 ts_sec
= track
->ts_nsec
;
111 unsigned long rem_nsec
= do_div(ts_sec
, NSEC_PER_SEC
);
112 u64 interval_nsec
= local_clock() - track
->ts_nsec
;
113 unsigned long rem_interval_nsec
= do_div(interval_nsec
, NSEC_PER_SEC
);
115 /* Timestamp matches printk timestamp format. */
116 seq_con_printf(seq
, "%s by task %d on cpu %d at %lu.%06lus (%lu.%06lus ago):\n",
117 show_alloc
? "allocated" : meta
->state
== KFENCE_OBJECT_RCU_FREEING
?
118 "rcu freeing" : "freed", track
->pid
,
119 track
->cpu
, (unsigned long)ts_sec
, rem_nsec
/ 1000,
120 (unsigned long)interval_nsec
, rem_interval_nsec
/ 1000);
122 if (track
->num_stack_entries
) {
123 /* Skip allocation/free internals stack. */
124 int i
= get_stack_skipnr(track
->stack_entries
, track
->num_stack_entries
, NULL
);
126 /* stack_trace_seq_print() does not exist; open code our own. */
127 for (; i
< track
->num_stack_entries
; i
++)
128 seq_con_printf(seq
, " %pS\n", (void *)track
->stack_entries
[i
]);
130 seq_con_printf(seq
, " no %s stack\n", show_alloc
? "allocation" : "deallocation");
134 void kfence_print_object(struct seq_file
*seq
, const struct kfence_metadata
*meta
)
136 const int size
= abs(meta
->size
);
137 const unsigned long start
= meta
->addr
;
138 const struct kmem_cache
*const cache
= meta
->cache
;
140 lockdep_assert_held(&meta
->lock
);
142 if (meta
->state
== KFENCE_OBJECT_UNUSED
) {
143 seq_con_printf(seq
, "kfence-#%td unused\n", meta
- kfence_metadata
);
147 seq_con_printf(seq
, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
148 meta
- kfence_metadata
, (void *)start
, (void *)(start
+ size
- 1),
149 size
, (cache
&& cache
->name
) ? cache
->name
: "<destroyed>");
151 kfence_print_stack(seq
, meta
, true);
153 if (meta
->state
== KFENCE_OBJECT_FREED
|| meta
->state
== KFENCE_OBJECT_RCU_FREEING
) {
154 seq_con_printf(seq
, "\n");
155 kfence_print_stack(seq
, meta
, false);
160 * Show bytes at @addr that are different from the expected canary values, up to
163 static void print_diff_canary(unsigned long address
, size_t bytes_to_show
,
164 const struct kfence_metadata
*meta
)
166 const unsigned long show_until_addr
= address
+ bytes_to_show
;
169 /* Do not show contents of object nor read into following guard page. */
170 end
= (const u8
*)(address
< meta
->addr
? min(show_until_addr
, meta
->addr
)
171 : min(show_until_addr
, PAGE_ALIGN(address
)));
174 for (cur
= (const u8
*)address
; cur
< end
; cur
++) {
175 if (*cur
== KFENCE_CANARY_PATTERN_U8(cur
))
177 else if (no_hash_pointers
)
178 pr_cont(" 0x%02x", *cur
);
179 else /* Do not leak kernel memory in non-debug builds. */
185 static const char *get_access_type(bool is_write
)
187 return is_write
? "write" : "read";
190 void kfence_report_error(unsigned long address
, bool is_write
, struct pt_regs
*regs
,
191 const struct kfence_metadata
*meta
, enum kfence_error_type type
)
193 unsigned long stack_entries
[KFENCE_STACK_DEPTH
] = { 0 };
194 const ptrdiff_t object_index
= meta
? meta
- kfence_metadata
: -1;
195 int num_stack_entries
;
199 num_stack_entries
= stack_trace_save_regs(regs
, stack_entries
, KFENCE_STACK_DEPTH
, 0);
201 num_stack_entries
= stack_trace_save(stack_entries
, KFENCE_STACK_DEPTH
, 1);
202 skipnr
= get_stack_skipnr(stack_entries
, num_stack_entries
, &type
);
205 /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
206 if (WARN_ON(type
!= KFENCE_ERROR_INVALID
&& !meta
))
210 lockdep_assert_held(&meta
->lock
);
212 * Because we may generate reports in printk-unfriendly parts of the
213 * kernel, such as scheduler code, the use of printk() could deadlock.
214 * Until such time that all printing code here is safe in all parts of
215 * the kernel, accept the risk, and just get our message out (given the
216 * system might already behave unpredictably due to the memory error).
217 * As such, also disable lockdep to hide warnings, and avoid disabling
218 * lockdep for the rest of the kernel.
222 pr_err("==================================================================\n");
223 /* Print report header. */
225 case KFENCE_ERROR_OOB
: {
226 const bool left_of_object
= address
< meta
->addr
;
228 pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write
),
229 (void *)stack_entries
[skipnr
]);
230 pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
231 get_access_type(is_write
), (void *)address
,
232 left_of_object
? meta
->addr
- address
: address
- meta
->addr
,
233 left_of_object
? "left" : "right", object_index
);
236 case KFENCE_ERROR_UAF
:
237 pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write
),
238 (void *)stack_entries
[skipnr
]);
239 pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
240 get_access_type(is_write
), (void *)address
, object_index
);
242 case KFENCE_ERROR_CORRUPTION
:
243 pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries
[skipnr
]);
244 pr_err("Corrupted memory at 0x%p ", (void *)address
);
245 print_diff_canary(address
, 16, meta
);
246 pr_cont(" (in kfence-#%td):\n", object_index
);
248 case KFENCE_ERROR_INVALID
:
249 pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write
),
250 (void *)stack_entries
[skipnr
]);
251 pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write
),
254 case KFENCE_ERROR_INVALID_FREE
:
255 pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries
[skipnr
]);
256 pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address
,
261 /* Print stack trace and object info. */
262 stack_trace_print(stack_entries
+ skipnr
, num_stack_entries
- skipnr
, 0);
266 kfence_print_object(NULL
, meta
);
269 /* Print report footer. */
271 if (no_hash_pointers
&& regs
)
274 dump_stack_print_info(KERN_ERR
);
275 trace_error_report_end(ERROR_DETECTOR_KFENCE
, address
);
276 pr_err("==================================================================\n");
280 check_panic_on_warn("KFENCE");
282 /* We encountered a memory safety error, taint the kernel! */
283 add_taint(TAINT_BAD_PAGE
, LOCKDEP_STILL_OK
);
287 static void kfence_to_kp_stack(const struct kfence_track
*track
, void **kp_stack
)
291 i
= get_stack_skipnr(track
->stack_entries
, track
->num_stack_entries
, NULL
);
292 for (j
= 0; i
< track
->num_stack_entries
&& j
< KS_ADDRS_COUNT
; ++i
, ++j
)
293 kp_stack
[j
] = (void *)track
->stack_entries
[i
];
294 if (j
< KS_ADDRS_COUNT
)
298 bool __kfence_obj_info(struct kmem_obj_info
*kpp
, void *object
, struct slab
*slab
)
300 struct kfence_metadata
*meta
= addr_to_metadata((unsigned long)object
);
307 * If state is UNUSED at least show the pointer requested; the rest
308 * would be garbage data.
310 kpp
->kp_ptr
= object
;
312 /* Requesting info an a never-used object is almost certainly a bug. */
313 if (WARN_ON(meta
->state
== KFENCE_OBJECT_UNUSED
))
316 raw_spin_lock_irqsave(&meta
->lock
, flags
);
319 kpp
->kp_slab_cache
= meta
->cache
;
320 kpp
->kp_objp
= (void *)meta
->addr
;
321 kfence_to_kp_stack(&meta
->alloc_track
, kpp
->kp_stack
);
322 if (meta
->state
== KFENCE_OBJECT_FREED
|| meta
->state
== KFENCE_OBJECT_RCU_FREEING
)
323 kfence_to_kp_stack(&meta
->free_track
, kpp
->kp_free_stack
);
324 /* get_stack_skipnr() ensures the first entry is outside allocator. */
325 kpp
->kp_ret
= kpp
->kp_stack
[0];
327 raw_spin_unlock_irqrestore(&meta
->lock
, flags
);