debug-monitors.c (10835B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * ARMv8 single-step debug support and mdscr context switching. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ 9 10#include <linux/cpu.h> 11#include <linux/debugfs.h> 12#include <linux/hardirq.h> 13#include <linux/init.h> 14#include <linux/ptrace.h> 15#include <linux/kprobes.h> 16#include <linux/stat.h> 17#include <linux/uaccess.h> 18#include <linux/sched/task_stack.h> 19 20#include <asm/cpufeature.h> 21#include <asm/cputype.h> 22#include <asm/daifflags.h> 23#include <asm/debug-monitors.h> 24#include <asm/system_misc.h> 25#include <asm/traps.h> 26 27/* Determine debug architecture. */ 28u8 debug_monitors_arch(void) 29{ 30 return cpuid_feature_extract_unsigned_field(read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1), 31 ID_AA64DFR0_DEBUGVER_SHIFT); 32} 33 34/* 35 * MDSCR access routines. 36 */ 37static void mdscr_write(u32 mdscr) 38{ 39 unsigned long flags; 40 flags = local_daif_save(); 41 write_sysreg(mdscr, mdscr_el1); 42 local_daif_restore(flags); 43} 44NOKPROBE_SYMBOL(mdscr_write); 45 46static u32 mdscr_read(void) 47{ 48 return read_sysreg(mdscr_el1); 49} 50NOKPROBE_SYMBOL(mdscr_read); 51 52/* 53 * Allow root to disable self-hosted debug from userspace. 54 * This is useful if you want to connect an external JTAG debugger. 55 */ 56static bool debug_enabled = true; 57 58static int create_debug_debugfs_entry(void) 59{ 60 debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled); 61 return 0; 62} 63fs_initcall(create_debug_debugfs_entry); 64 65static int __init early_debug_disable(char *buf) 66{ 67 debug_enabled = false; 68 return 0; 69} 70 71early_param("nodebugmon", early_debug_disable); 72 73/* 74 * Keep track of debug users on each core. 75 * The ref counts are per-cpu so we use a local_t type. 76 */ 77static DEFINE_PER_CPU(int, mde_ref_count); 78static DEFINE_PER_CPU(int, kde_ref_count); 79 80void enable_debug_monitors(enum dbg_active_el el) 81{ 82 u32 mdscr, enable = 0; 83 84 WARN_ON(preemptible()); 85 86 if (this_cpu_inc_return(mde_ref_count) == 1) 87 enable = DBG_MDSCR_MDE; 88 89 if (el == DBG_ACTIVE_EL1 && 90 this_cpu_inc_return(kde_ref_count) == 1) 91 enable |= DBG_MDSCR_KDE; 92 93 if (enable && debug_enabled) { 94 mdscr = mdscr_read(); 95 mdscr |= enable; 96 mdscr_write(mdscr); 97 } 98} 99NOKPROBE_SYMBOL(enable_debug_monitors); 100 101void disable_debug_monitors(enum dbg_active_el el) 102{ 103 u32 mdscr, disable = 0; 104 105 WARN_ON(preemptible()); 106 107 if (this_cpu_dec_return(mde_ref_count) == 0) 108 disable = ~DBG_MDSCR_MDE; 109 110 if (el == DBG_ACTIVE_EL1 && 111 this_cpu_dec_return(kde_ref_count) == 0) 112 disable &= ~DBG_MDSCR_KDE; 113 114 if (disable) { 115 mdscr = mdscr_read(); 116 mdscr &= disable; 117 mdscr_write(mdscr); 118 } 119} 120NOKPROBE_SYMBOL(disable_debug_monitors); 121 122/* 123 * OS lock clearing. 124 */ 125static int clear_os_lock(unsigned int cpu) 126{ 127 write_sysreg(0, osdlr_el1); 128 write_sysreg(0, oslar_el1); 129 isb(); 130 return 0; 131} 132 133static int __init debug_monitors_init(void) 134{ 135 return cpuhp_setup_state(CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, 136 "arm64/debug_monitors:starting", 137 clear_os_lock, NULL); 138} 139postcore_initcall(debug_monitors_init); 140 141/* 142 * Single step API and exception handling. 143 */ 144static void set_user_regs_spsr_ss(struct user_pt_regs *regs) 145{ 146 regs->pstate |= DBG_SPSR_SS; 147} 148NOKPROBE_SYMBOL(set_user_regs_spsr_ss); 149 150static void clear_user_regs_spsr_ss(struct user_pt_regs *regs) 151{ 152 regs->pstate &= ~DBG_SPSR_SS; 153} 154NOKPROBE_SYMBOL(clear_user_regs_spsr_ss); 155 156#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs) 157#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs) 158 159static DEFINE_SPINLOCK(debug_hook_lock); 160static LIST_HEAD(user_step_hook); 161static LIST_HEAD(kernel_step_hook); 162 163static void register_debug_hook(struct list_head *node, struct list_head *list) 164{ 165 spin_lock(&debug_hook_lock); 166 list_add_rcu(node, list); 167 spin_unlock(&debug_hook_lock); 168 169} 170 171static void unregister_debug_hook(struct list_head *node) 172{ 173 spin_lock(&debug_hook_lock); 174 list_del_rcu(node); 175 spin_unlock(&debug_hook_lock); 176 synchronize_rcu(); 177} 178 179void register_user_step_hook(struct step_hook *hook) 180{ 181 register_debug_hook(&hook->node, &user_step_hook); 182} 183 184void unregister_user_step_hook(struct step_hook *hook) 185{ 186 unregister_debug_hook(&hook->node); 187} 188 189void register_kernel_step_hook(struct step_hook *hook) 190{ 191 register_debug_hook(&hook->node, &kernel_step_hook); 192} 193 194void unregister_kernel_step_hook(struct step_hook *hook) 195{ 196 unregister_debug_hook(&hook->node); 197} 198 199/* 200 * Call registered single step handlers 201 * There is no Syndrome info to check for determining the handler. 202 * So we call all the registered handlers, until the right handler is 203 * found which returns zero. 204 */ 205static int call_step_hook(struct pt_regs *regs, unsigned long esr) 206{ 207 struct step_hook *hook; 208 struct list_head *list; 209 int retval = DBG_HOOK_ERROR; 210 211 list = user_mode(regs) ? &user_step_hook : &kernel_step_hook; 212 213 /* 214 * Since single-step exception disables interrupt, this function is 215 * entirely not preemptible, and we can use rcu list safely here. 216 */ 217 list_for_each_entry_rcu(hook, list, node) { 218 retval = hook->fn(regs, esr); 219 if (retval == DBG_HOOK_HANDLED) 220 break; 221 } 222 223 return retval; 224} 225NOKPROBE_SYMBOL(call_step_hook); 226 227static void send_user_sigtrap(int si_code) 228{ 229 struct pt_regs *regs = current_pt_regs(); 230 231 if (WARN_ON(!user_mode(regs))) 232 return; 233 234 if (interrupts_enabled(regs)) 235 local_irq_enable(); 236 237 arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs), 238 "User debug trap"); 239} 240 241static int single_step_handler(unsigned long unused, unsigned long esr, 242 struct pt_regs *regs) 243{ 244 bool handler_found = false; 245 246 /* 247 * If we are stepping a pending breakpoint, call the hw_breakpoint 248 * handler first. 249 */ 250 if (!reinstall_suspended_bps(regs)) 251 return 0; 252 253 if (!handler_found && call_step_hook(regs, esr) == DBG_HOOK_HANDLED) 254 handler_found = true; 255 256 if (!handler_found && user_mode(regs)) { 257 send_user_sigtrap(TRAP_TRACE); 258 259 /* 260 * ptrace will disable single step unless explicitly 261 * asked to re-enable it. For other clients, it makes 262 * sense to leave it enabled (i.e. rewind the controls 263 * to the active-not-pending state). 264 */ 265 user_rewind_single_step(current); 266 } else if (!handler_found) { 267 pr_warn("Unexpected kernel single-step exception at EL1\n"); 268 /* 269 * Re-enable stepping since we know that we will be 270 * returning to regs. 271 */ 272 set_regs_spsr_ss(regs); 273 } 274 275 return 0; 276} 277NOKPROBE_SYMBOL(single_step_handler); 278 279static LIST_HEAD(user_break_hook); 280static LIST_HEAD(kernel_break_hook); 281 282void register_user_break_hook(struct break_hook *hook) 283{ 284 register_debug_hook(&hook->node, &user_break_hook); 285} 286 287void unregister_user_break_hook(struct break_hook *hook) 288{ 289 unregister_debug_hook(&hook->node); 290} 291 292void register_kernel_break_hook(struct break_hook *hook) 293{ 294 register_debug_hook(&hook->node, &kernel_break_hook); 295} 296 297void unregister_kernel_break_hook(struct break_hook *hook) 298{ 299 unregister_debug_hook(&hook->node); 300} 301 302static int call_break_hook(struct pt_regs *regs, unsigned long esr) 303{ 304 struct break_hook *hook; 305 struct list_head *list; 306 int (*fn)(struct pt_regs *regs, unsigned long esr) = NULL; 307 308 list = user_mode(regs) ? &user_break_hook : &kernel_break_hook; 309 310 /* 311 * Since brk exception disables interrupt, this function is 312 * entirely not preemptible, and we can use rcu list safely here. 313 */ 314 list_for_each_entry_rcu(hook, list, node) { 315 unsigned long comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK; 316 317 if ((comment & ~hook->mask) == hook->imm) 318 fn = hook->fn; 319 } 320 321 return fn ? fn(regs, esr) : DBG_HOOK_ERROR; 322} 323NOKPROBE_SYMBOL(call_break_hook); 324 325static int brk_handler(unsigned long unused, unsigned long esr, 326 struct pt_regs *regs) 327{ 328 if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) 329 return 0; 330 331 if (user_mode(regs)) { 332 send_user_sigtrap(TRAP_BRKPT); 333 } else { 334 pr_warn("Unexpected kernel BRK exception at EL1\n"); 335 return -EFAULT; 336 } 337 338 return 0; 339} 340NOKPROBE_SYMBOL(brk_handler); 341 342int aarch32_break_handler(struct pt_regs *regs) 343{ 344 u32 arm_instr; 345 u16 thumb_instr; 346 bool bp = false; 347 void __user *pc = (void __user *)instruction_pointer(regs); 348 349 if (!compat_user_mode(regs)) 350 return -EFAULT; 351 352 if (compat_thumb_mode(regs)) { 353 /* get 16-bit Thumb instruction */ 354 __le16 instr; 355 get_user(instr, (__le16 __user *)pc); 356 thumb_instr = le16_to_cpu(instr); 357 if (thumb_instr == AARCH32_BREAK_THUMB2_LO) { 358 /* get second half of 32-bit Thumb-2 instruction */ 359 get_user(instr, (__le16 __user *)(pc + 2)); 360 thumb_instr = le16_to_cpu(instr); 361 bp = thumb_instr == AARCH32_BREAK_THUMB2_HI; 362 } else { 363 bp = thumb_instr == AARCH32_BREAK_THUMB; 364 } 365 } else { 366 /* 32-bit ARM instruction */ 367 __le32 instr; 368 get_user(instr, (__le32 __user *)pc); 369 arm_instr = le32_to_cpu(instr); 370 bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM; 371 } 372 373 if (!bp) 374 return -EFAULT; 375 376 send_user_sigtrap(TRAP_BRKPT); 377 return 0; 378} 379NOKPROBE_SYMBOL(aarch32_break_handler); 380 381void __init debug_traps_init(void) 382{ 383 hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, 384 TRAP_TRACE, "single-step handler"); 385 hook_debug_fault_code(DBG_ESR_EVT_BRK, brk_handler, SIGTRAP, 386 TRAP_BRKPT, "BRK handler"); 387} 388 389/* Re-enable single step for syscall restarting. */ 390void user_rewind_single_step(struct task_struct *task) 391{ 392 /* 393 * If single step is active for this thread, then set SPSR.SS 394 * to 1 to avoid returning to the active-pending state. 395 */ 396 if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 397 set_regs_spsr_ss(task_pt_regs(task)); 398} 399NOKPROBE_SYMBOL(user_rewind_single_step); 400 401void user_fastforward_single_step(struct task_struct *task) 402{ 403 if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 404 clear_regs_spsr_ss(task_pt_regs(task)); 405} 406 407void user_regs_reset_single_step(struct user_pt_regs *regs, 408 struct task_struct *task) 409{ 410 if (test_tsk_thread_flag(task, TIF_SINGLESTEP)) 411 set_user_regs_spsr_ss(regs); 412 else 413 clear_user_regs_spsr_ss(regs); 414} 415 416/* Kernel API */ 417void kernel_enable_single_step(struct pt_regs *regs) 418{ 419 WARN_ON(!irqs_disabled()); 420 set_regs_spsr_ss(regs); 421 mdscr_write(mdscr_read() | DBG_MDSCR_SS); 422 enable_debug_monitors(DBG_ACTIVE_EL1); 423} 424NOKPROBE_SYMBOL(kernel_enable_single_step); 425 426void kernel_disable_single_step(void) 427{ 428 WARN_ON(!irqs_disabled()); 429 mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); 430 disable_debug_monitors(DBG_ACTIVE_EL1); 431} 432NOKPROBE_SYMBOL(kernel_disable_single_step); 433 434int kernel_active_single_step(void) 435{ 436 WARN_ON(!irqs_disabled()); 437 return mdscr_read() & DBG_MDSCR_SS; 438} 439NOKPROBE_SYMBOL(kernel_active_single_step); 440 441/* ptrace API */ 442void user_enable_single_step(struct task_struct *task) 443{ 444 struct thread_info *ti = task_thread_info(task); 445 446 if (!test_and_set_ti_thread_flag(ti, TIF_SINGLESTEP)) 447 set_regs_spsr_ss(task_pt_regs(task)); 448} 449NOKPROBE_SYMBOL(user_enable_single_step); 450 451void user_disable_single_step(struct task_struct *task) 452{ 453 clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); 454} 455NOKPROBE_SYMBOL(user_disable_single_step);