perf stat: Move 'interval_clear' to 'struct perf_stat_config'
[linux/fpc-iii.git] / lib / bug.c
blob1077366f496ba6c7ef6905685c2435b090b81b1b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 Generic support for BUG()
5 This respects the following config options:
7 CONFIG_BUG - emit BUG traps. Nothing happens without this.
8 CONFIG_GENERIC_BUG - enable this code.
9 CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to
10 the containing struct bug_entry for bug_addr and file.
11 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
13 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
14 (though they're generally always on).
16 CONFIG_GENERIC_BUG is set by each architecture using this code.
18 To use this, your architecture must:
20 1. Set up the config options:
21 - Enable CONFIG_GENERIC_BUG if CONFIG_BUG
23 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
24 - Define HAVE_ARCH_BUG
25 - Implement BUG() to generate a faulting instruction
26 - NOTE: struct bug_entry does not have "file" or "line" entries
27 when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
28 the values accordingly.
30 3. Implement the trap
31 - In the illegal instruction trap handler (typically), verify
32 that the fault was in kernel mode, and call report_bug()
33 - report_bug() will return whether it was a false alarm, a warning,
34 or an actual bug.
35 - You must implement the is_valid_bugaddr(bugaddr) callback which
36 returns true if the eip is a real kernel address, and it points
37 to the expected BUG trap instruction.
39 Jeremy Fitzhardinge <jeremy@goop.org> 2006
42 #define pr_fmt(fmt) fmt
44 #include <linux/list.h>
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/bug.h>
48 #include <linux/sched.h>
49 #include <linux/rculist.h>
51 extern struct bug_entry __start___bug_table[], __stop___bug_table[];
53 static inline unsigned long bug_addr(const struct bug_entry *bug)
55 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
56 return bug->bug_addr;
57 #else
58 return (unsigned long)bug + bug->bug_addr_disp;
59 #endif
62 #ifdef CONFIG_MODULES
63 /* Updates are protected by module mutex */
64 static LIST_HEAD(module_bug_list);
66 static struct bug_entry *module_find_bug(unsigned long bugaddr)
68 struct module *mod;
69 struct bug_entry *bug = NULL;
71 rcu_read_lock_sched();
72 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
73 unsigned i;
75 bug = mod->bug_table;
76 for (i = 0; i < mod->num_bugs; ++i, ++bug)
77 if (bugaddr == bug_addr(bug))
78 goto out;
80 bug = NULL;
81 out:
82 rcu_read_unlock_sched();
84 return bug;
87 void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
88 struct module *mod)
90 char *secstrings;
91 unsigned int i;
93 lockdep_assert_held(&module_mutex);
95 mod->bug_table = NULL;
96 mod->num_bugs = 0;
98 /* Find the __bug_table section, if present */
99 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
100 for (i = 1; i < hdr->e_shnum; i++) {
101 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
102 continue;
103 mod->bug_table = (void *) sechdrs[i].sh_addr;
104 mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
105 break;
109 * Strictly speaking this should have a spinlock to protect against
110 * traversals, but since we only traverse on BUG()s, a spinlock
111 * could potentially lead to deadlock and thus be counter-productive.
112 * Thus, this uses RCU to safely manipulate the bug list, since BUG
113 * must run in non-interruptive state.
115 list_add_rcu(&mod->bug_list, &module_bug_list);
118 void module_bug_cleanup(struct module *mod)
120 lockdep_assert_held(&module_mutex);
121 list_del_rcu(&mod->bug_list);
124 #else
126 static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
128 return NULL;
130 #endif
132 struct bug_entry *find_bug(unsigned long bugaddr)
134 struct bug_entry *bug;
136 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
137 if (bugaddr == bug_addr(bug))
138 return bug;
140 return module_find_bug(bugaddr);
143 enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
145 struct bug_entry *bug;
146 const char *file;
147 unsigned line, warning, once, done;
149 if (!is_valid_bugaddr(bugaddr))
150 return BUG_TRAP_TYPE_NONE;
152 bug = find_bug(bugaddr);
153 if (!bug)
154 return BUG_TRAP_TYPE_NONE;
156 file = NULL;
157 line = 0;
158 warning = 0;
160 if (bug) {
161 #ifdef CONFIG_DEBUG_BUGVERBOSE
162 #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
163 file = bug->file;
164 #else
165 file = (const char *)bug + bug->file_disp;
166 #endif
167 line = bug->line;
168 #endif
169 warning = (bug->flags & BUGFLAG_WARNING) != 0;
170 once = (bug->flags & BUGFLAG_ONCE) != 0;
171 done = (bug->flags & BUGFLAG_DONE) != 0;
173 if (warning && once) {
174 if (done)
175 return BUG_TRAP_TYPE_WARN;
178 * Since this is the only store, concurrency is not an issue.
180 bug->flags |= BUGFLAG_DONE;
184 if (warning) {
185 /* this is a WARN_ON rather than BUG/BUG_ON */
186 __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs,
187 NULL);
188 return BUG_TRAP_TYPE_WARN;
191 printk(KERN_DEFAULT CUT_HERE);
193 if (file)
194 pr_crit("kernel BUG at %s:%u!\n", file, line);
195 else
196 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
197 (void *)bugaddr);
199 return BUG_TRAP_TYPE_BUG;
202 static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
204 struct bug_entry *bug;
206 for (bug = start; bug < end; bug++)
207 bug->flags &= ~BUGFLAG_DONE;
210 void generic_bug_clear_once(void)
212 #ifdef CONFIG_MODULES
213 struct module *mod;
215 rcu_read_lock_sched();
216 list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
217 clear_once_table(mod->bug_table,
218 mod->bug_table + mod->num_bugs);
219 rcu_read_unlock_sched();
220 #endif
222 clear_once_table(__start___bug_table, __stop___bug_table);