bug.c (6016B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 Generic support for BUG() 4 5 This respects the following config options: 6 7 CONFIG_BUG - emit BUG traps. Nothing happens without this. 8 CONFIG_GENERIC_BUG - enable this code. 9 CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit relative pointers for bug_addr and file 10 CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG 11 12 CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable 13 (though they're generally always on). 14 15 CONFIG_GENERIC_BUG is set by each architecture using this code. 16 17 To use this, your architecture must: 18 19 1. Set up the config options: 20 - Enable CONFIG_GENERIC_BUG if CONFIG_BUG 21 22 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON) 23 - Define HAVE_ARCH_BUG 24 - Implement BUG() to generate a faulting instruction 25 - NOTE: struct bug_entry does not have "file" or "line" entries 26 when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate 27 the values accordingly. 28 29 3. Implement the trap 30 - In the illegal instruction trap handler (typically), verify 31 that the fault was in kernel mode, and call report_bug() 32 - report_bug() will return whether it was a false alarm, a warning, 33 or an actual bug. 34 - You must implement the is_valid_bugaddr(bugaddr) callback which 35 returns true if the eip is a real kernel address, and it points 36 to the expected BUG trap instruction. 37 38 Jeremy Fitzhardinge <jeremy@goop.org> 2006 39 */ 40 41#define pr_fmt(fmt) fmt 42 43#include <linux/list.h> 44#include <linux/module.h> 45#include <linux/kernel.h> 46#include <linux/bug.h> 47#include <linux/sched.h> 48#include <linux/rculist.h> 49#include <linux/ftrace.h> 50 51extern struct bug_entry __start___bug_table[], __stop___bug_table[]; 52 53static inline unsigned long bug_addr(const struct bug_entry *bug) 54{ 55#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 56 return (unsigned long)&bug->bug_addr_disp + bug->bug_addr_disp; 57#else 58 return bug->bug_addr; 59#endif 60} 61 62#ifdef CONFIG_MODULES 63/* Updates are protected by module mutex */ 64static LIST_HEAD(module_bug_list); 65 66static struct bug_entry *module_find_bug(unsigned long bugaddr) 67{ 68 struct module *mod; 69 struct bug_entry *bug = NULL; 70 71 rcu_read_lock_sched(); 72 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { 73 unsigned i; 74 75 bug = mod->bug_table; 76 for (i = 0; i < mod->num_bugs; ++i, ++bug) 77 if (bugaddr == bug_addr(bug)) 78 goto out; 79 } 80 bug = NULL; 81out: 82 rcu_read_unlock_sched(); 83 84 return bug; 85} 86 87void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, 88 struct module *mod) 89{ 90 char *secstrings; 91 unsigned int i; 92 93 mod->bug_table = NULL; 94 mod->num_bugs = 0; 95 96 /* Find the __bug_table section, if present */ 97 secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 98 for (i = 1; i < hdr->e_shnum; i++) { 99 if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table")) 100 continue; 101 mod->bug_table = (void *) sechdrs[i].sh_addr; 102 mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry); 103 break; 104 } 105 106 /* 107 * Strictly speaking this should have a spinlock to protect against 108 * traversals, but since we only traverse on BUG()s, a spinlock 109 * could potentially lead to deadlock and thus be counter-productive. 110 * Thus, this uses RCU to safely manipulate the bug list, since BUG 111 * must run in non-interruptive state. 112 */ 113 list_add_rcu(&mod->bug_list, &module_bug_list); 114} 115 116void module_bug_cleanup(struct module *mod) 117{ 118 list_del_rcu(&mod->bug_list); 119} 120 121#else 122 123static inline struct bug_entry *module_find_bug(unsigned long bugaddr) 124{ 125 return NULL; 126} 127#endif 128 129void bug_get_file_line(struct bug_entry *bug, const char **file, 130 unsigned int *line) 131{ 132#ifdef CONFIG_DEBUG_BUGVERBOSE 133#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 134 *file = (const char *)&bug->file_disp + bug->file_disp; 135#else 136 *file = bug->file; 137#endif 138 *line = bug->line; 139#else 140 *file = NULL; 141 *line = 0; 142#endif 143} 144 145struct bug_entry *find_bug(unsigned long bugaddr) 146{ 147 struct bug_entry *bug; 148 149 for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) 150 if (bugaddr == bug_addr(bug)) 151 return bug; 152 153 return module_find_bug(bugaddr); 154} 155 156enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) 157{ 158 struct bug_entry *bug; 159 const char *file; 160 unsigned line, warning, once, done; 161 162 if (!is_valid_bugaddr(bugaddr)) 163 return BUG_TRAP_TYPE_NONE; 164 165 bug = find_bug(bugaddr); 166 if (!bug) 167 return BUG_TRAP_TYPE_NONE; 168 169 disable_trace_on_warning(); 170 171 bug_get_file_line(bug, &file, &line); 172 173 warning = (bug->flags & BUGFLAG_WARNING) != 0; 174 once = (bug->flags & BUGFLAG_ONCE) != 0; 175 done = (bug->flags & BUGFLAG_DONE) != 0; 176 177 if (warning && once) { 178 if (done) 179 return BUG_TRAP_TYPE_WARN; 180 181 /* 182 * Since this is the only store, concurrency is not an issue. 183 */ 184 bug->flags |= BUGFLAG_DONE; 185 } 186 187 /* 188 * BUG() and WARN_ON() families don't print a custom debug message 189 * before triggering the exception handler, so we must add the 190 * "cut here" line now. WARN() issues its own "cut here" before the 191 * extra debugging message it writes before triggering the handler. 192 */ 193 if ((bug->flags & BUGFLAG_NO_CUT_HERE) == 0) 194 printk(KERN_DEFAULT CUT_HERE); 195 196 if (warning) { 197 /* this is a WARN_ON rather than BUG/BUG_ON */ 198 __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs, 199 NULL); 200 return BUG_TRAP_TYPE_WARN; 201 } 202 203 if (file) 204 pr_crit("kernel BUG at %s:%u!\n", file, line); 205 else 206 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n", 207 (void *)bugaddr); 208 209 return BUG_TRAP_TYPE_BUG; 210} 211 212static void clear_once_table(struct bug_entry *start, struct bug_entry *end) 213{ 214 struct bug_entry *bug; 215 216 for (bug = start; bug < end; bug++) 217 bug->flags &= ~BUGFLAG_DONE; 218} 219 220void generic_bug_clear_once(void) 221{ 222#ifdef CONFIG_MODULES 223 struct module *mod; 224 225 rcu_read_lock_sched(); 226 list_for_each_entry_rcu(mod, &module_bug_list, bug_list) 227 clear_once_table(mod->bug_table, 228 mod->bug_table + mod->num_bugs); 229 rcu_read_unlock_sched(); 230#endif 231 232 clear_once_table(__start___bug_table, __stop___bug_table); 233}