cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hw_breakpoint.c (8426B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * arch/sh/kernel/hw_breakpoint.c
      4 *
      5 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
      6 *
      7 * Copyright (C) 2009 - 2010  Paul Mundt
      8 */
      9#include <linux/init.h>
     10#include <linux/perf_event.h>
     11#include <linux/sched/signal.h>
     12#include <linux/hw_breakpoint.h>
     13#include <linux/percpu.h>
     14#include <linux/kallsyms.h>
     15#include <linux/notifier.h>
     16#include <linux/kprobes.h>
     17#include <linux/kdebug.h>
     18#include <linux/io.h>
     19#include <linux/clk.h>
     20#include <asm/hw_breakpoint.h>
     21#include <asm/mmu_context.h>
     22#include <asm/ptrace.h>
     23#include <asm/traps.h>
     24
     25/*
     26 * Stores the breakpoints currently in use on each breakpoint address
     27 * register for each cpus
     28 */
     29static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
     30
     31/*
     32 * A dummy placeholder for early accesses until the CPUs get a chance to
     33 * register their UBCs later in the boot process.
     34 */
     35static struct sh_ubc ubc_dummy = { .num_events = 0 };
     36
     37static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
     38
     39/*
     40 * Install a perf counter breakpoint.
     41 *
     42 * We seek a free UBC channel and use it for this breakpoint.
     43 *
     44 * Atomic: we hold the counter->ctx->lock and we only handle variables
     45 * and registers local to this cpu.
     46 */
     47int arch_install_hw_breakpoint(struct perf_event *bp)
     48{
     49	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
     50	int i;
     51
     52	for (i = 0; i < sh_ubc->num_events; i++) {
     53		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
     54
     55		if (!*slot) {
     56			*slot = bp;
     57			break;
     58		}
     59	}
     60
     61	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
     62		return -EBUSY;
     63
     64	clk_enable(sh_ubc->clk);
     65	sh_ubc->enable(info, i);
     66
     67	return 0;
     68}
     69
     70/*
     71 * Uninstall the breakpoint contained in the given counter.
     72 *
     73 * First we search the debug address register it uses and then we disable
     74 * it.
     75 *
     76 * Atomic: we hold the counter->ctx->lock and we only handle variables
     77 * and registers local to this cpu.
     78 */
     79void arch_uninstall_hw_breakpoint(struct perf_event *bp)
     80{
     81	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
     82	int i;
     83
     84	for (i = 0; i < sh_ubc->num_events; i++) {
     85		struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
     86
     87		if (*slot == bp) {
     88			*slot = NULL;
     89			break;
     90		}
     91	}
     92
     93	if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
     94		return;
     95
     96	sh_ubc->disable(info, i);
     97	clk_disable(sh_ubc->clk);
     98}
     99
    100static int get_hbp_len(u16 hbp_len)
    101{
    102	unsigned int len_in_bytes = 0;
    103
    104	switch (hbp_len) {
    105	case SH_BREAKPOINT_LEN_1:
    106		len_in_bytes = 1;
    107		break;
    108	case SH_BREAKPOINT_LEN_2:
    109		len_in_bytes = 2;
    110		break;
    111	case SH_BREAKPOINT_LEN_4:
    112		len_in_bytes = 4;
    113		break;
    114	case SH_BREAKPOINT_LEN_8:
    115		len_in_bytes = 8;
    116		break;
    117	}
    118	return len_in_bytes;
    119}
    120
    121/*
    122 * Check for virtual address in kernel space.
    123 */
    124int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
    125{
    126	unsigned int len;
    127	unsigned long va;
    128
    129	va = hw->address;
    130	len = get_hbp_len(hw->len);
    131
    132	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
    133}
    134
    135int arch_bp_generic_fields(int sh_len, int sh_type,
    136			   int *gen_len, int *gen_type)
    137{
    138	/* Len */
    139	switch (sh_len) {
    140	case SH_BREAKPOINT_LEN_1:
    141		*gen_len = HW_BREAKPOINT_LEN_1;
    142		break;
    143	case SH_BREAKPOINT_LEN_2:
    144		*gen_len = HW_BREAKPOINT_LEN_2;
    145		break;
    146	case SH_BREAKPOINT_LEN_4:
    147		*gen_len = HW_BREAKPOINT_LEN_4;
    148		break;
    149	case SH_BREAKPOINT_LEN_8:
    150		*gen_len = HW_BREAKPOINT_LEN_8;
    151		break;
    152	default:
    153		return -EINVAL;
    154	}
    155
    156	/* Type */
    157	switch (sh_type) {
    158	case SH_BREAKPOINT_READ:
    159		*gen_type = HW_BREAKPOINT_R;
    160		break;
    161	case SH_BREAKPOINT_WRITE:
    162		*gen_type = HW_BREAKPOINT_W;
    163		break;
    164	case SH_BREAKPOINT_RW:
    165		*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
    166		break;
    167	default:
    168		return -EINVAL;
    169	}
    170
    171	return 0;
    172}
    173
    174static int arch_build_bp_info(struct perf_event *bp,
    175			      const struct perf_event_attr *attr,
    176			      struct arch_hw_breakpoint *hw)
    177{
    178	hw->address = attr->bp_addr;
    179
    180	/* Len */
    181	switch (attr->bp_len) {
    182	case HW_BREAKPOINT_LEN_1:
    183		hw->len = SH_BREAKPOINT_LEN_1;
    184		break;
    185	case HW_BREAKPOINT_LEN_2:
    186		hw->len = SH_BREAKPOINT_LEN_2;
    187		break;
    188	case HW_BREAKPOINT_LEN_4:
    189		hw->len = SH_BREAKPOINT_LEN_4;
    190		break;
    191	case HW_BREAKPOINT_LEN_8:
    192		hw->len = SH_BREAKPOINT_LEN_8;
    193		break;
    194	default:
    195		return -EINVAL;
    196	}
    197
    198	/* Type */
    199	switch (attr->bp_type) {
    200	case HW_BREAKPOINT_R:
    201		hw->type = SH_BREAKPOINT_READ;
    202		break;
    203	case HW_BREAKPOINT_W:
    204		hw->type = SH_BREAKPOINT_WRITE;
    205		break;
    206	case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
    207		hw->type = SH_BREAKPOINT_RW;
    208		break;
    209	default:
    210		return -EINVAL;
    211	}
    212
    213	return 0;
    214}
    215
    216/*
    217 * Validate the arch-specific HW Breakpoint register settings
    218 */
    219int hw_breakpoint_arch_parse(struct perf_event *bp,
    220			     const struct perf_event_attr *attr,
    221			     struct arch_hw_breakpoint *hw)
    222{
    223	unsigned int align;
    224	int ret;
    225
    226	ret = arch_build_bp_info(bp, attr, hw);
    227	if (ret)
    228		return ret;
    229
    230	ret = -EINVAL;
    231
    232	switch (hw->len) {
    233	case SH_BREAKPOINT_LEN_1:
    234		align = 0;
    235		break;
    236	case SH_BREAKPOINT_LEN_2:
    237		align = 1;
    238		break;
    239	case SH_BREAKPOINT_LEN_4:
    240		align = 3;
    241		break;
    242	case SH_BREAKPOINT_LEN_8:
    243		align = 7;
    244		break;
    245	default:
    246		return ret;
    247	}
    248
    249	/*
    250	 * Check that the low-order bits of the address are appropriate
    251	 * for the alignment implied by len.
    252	 */
    253	if (hw->address & align)
    254		return -EINVAL;
    255
    256	return 0;
    257}
    258
    259/*
    260 * Release the user breakpoints used by ptrace
    261 */
    262void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
    263{
    264	int i;
    265	struct thread_struct *t = &tsk->thread;
    266
    267	for (i = 0; i < sh_ubc->num_events; i++) {
    268		unregister_hw_breakpoint(t->ptrace_bps[i]);
    269		t->ptrace_bps[i] = NULL;
    270	}
    271}
    272
    273static int __kprobes hw_breakpoint_handler(struct die_args *args)
    274{
    275	int cpu, i, rc = NOTIFY_STOP;
    276	struct perf_event *bp;
    277	unsigned int cmf, resume_mask;
    278
    279	/*
    280	 * Do an early return if none of the channels triggered.
    281	 */
    282	cmf = sh_ubc->triggered_mask();
    283	if (unlikely(!cmf))
    284		return NOTIFY_DONE;
    285
    286	/*
    287	 * By default, resume all of the active channels.
    288	 */
    289	resume_mask = sh_ubc->active_mask();
    290
    291	/*
    292	 * Disable breakpoints during exception handling.
    293	 */
    294	sh_ubc->disable_all();
    295
    296	cpu = get_cpu();
    297	for (i = 0; i < sh_ubc->num_events; i++) {
    298		unsigned long event_mask = (1 << i);
    299
    300		if (likely(!(cmf & event_mask)))
    301			continue;
    302
    303		/*
    304		 * The counter may be concurrently released but that can only
    305		 * occur from a call_rcu() path. We can then safely fetch
    306		 * the breakpoint, use its callback, touch its counter
    307		 * while we are in an rcu_read_lock() path.
    308		 */
    309		rcu_read_lock();
    310
    311		bp = per_cpu(bp_per_reg[i], cpu);
    312		if (bp)
    313			rc = NOTIFY_DONE;
    314
    315		/*
    316		 * Reset the condition match flag to denote completion of
    317		 * exception handling.
    318		 */
    319		sh_ubc->clear_triggered_mask(event_mask);
    320
    321		/*
    322		 * bp can be NULL due to concurrent perf counter
    323		 * removing.
    324		 */
    325		if (!bp) {
    326			rcu_read_unlock();
    327			break;
    328		}
    329
    330		/*
    331		 * Don't restore the channel if the breakpoint is from
    332		 * ptrace, as it always operates in one-shot mode.
    333		 */
    334		if (bp->overflow_handler == ptrace_triggered)
    335			resume_mask &= ~(1 << i);
    336
    337		perf_bp_event(bp, args->regs);
    338
    339		/* Deliver the signal to userspace */
    340		if (!arch_check_bp_in_kernelspace(&bp->hw.info)) {
    341			force_sig_fault(SIGTRAP, TRAP_HWBKPT,
    342					(void __user *)NULL);
    343		}
    344
    345		rcu_read_unlock();
    346	}
    347
    348	if (cmf == 0)
    349		rc = NOTIFY_DONE;
    350
    351	sh_ubc->enable_all(resume_mask);
    352
    353	put_cpu();
    354
    355	return rc;
    356}
    357
    358BUILD_TRAP_HANDLER(breakpoint)
    359{
    360	unsigned long ex = lookup_exception_vector();
    361	TRAP_HANDLER_DECL;
    362
    363	notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
    364}
    365
    366/*
    367 * Handle debug exception notifications.
    368 */
    369int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
    370				    unsigned long val, void *data)
    371{
    372	struct die_args *args = data;
    373
    374	if (val != DIE_BREAKPOINT)
    375		return NOTIFY_DONE;
    376
    377	/*
    378	 * If the breakpoint hasn't been triggered by the UBC, it's
    379	 * probably from a debugger, so don't do anything more here.
    380	 *
    381	 * This also permits the UBC interface clock to remain off for
    382	 * non-UBC breakpoints, as we don't need to check the triggered
    383	 * or active channel masks.
    384	 */
    385	if (args->trapnr != sh_ubc->trap_nr)
    386		return NOTIFY_DONE;
    387
    388	return hw_breakpoint_handler(data);
    389}
    390
    391void hw_breakpoint_pmu_read(struct perf_event *bp)
    392{
    393	/* TODO */
    394}
    395
    396int register_sh_ubc(struct sh_ubc *ubc)
    397{
    398	/* Bail if it's already assigned */
    399	if (sh_ubc != &ubc_dummy)
    400		return -EBUSY;
    401	sh_ubc = ubc;
    402
    403	pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
    404
    405	WARN_ON(ubc->num_events > HBP_NUM);
    406
    407	return 0;
    408}