cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uprobes.c (3788B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2
      3#include <linux/highmem.h>
      4#include <linux/ptrace.h>
      5#include <linux/uprobes.h>
      6
      7#include "decode-insn.h"
      8
      9#define UPROBE_TRAP_NR	UINT_MAX
     10
     11bool is_swbp_insn(uprobe_opcode_t *insn)
     12{
     13#ifdef CONFIG_RISCV_ISA_C
     14	return (*insn & 0xffff) == UPROBE_SWBP_INSN;
     15#else
     16	return *insn == UPROBE_SWBP_INSN;
     17#endif
     18}
     19
     20unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
     21{
     22	return instruction_pointer(regs);
     23}
     24
     25int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
     26			     unsigned long addr)
     27{
     28	probe_opcode_t opcode;
     29
     30	opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
     31
     32	auprobe->insn_size = GET_INSN_LENGTH(opcode);
     33
     34	switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
     35	case INSN_REJECTED:
     36		return -EINVAL;
     37
     38	case INSN_GOOD_NO_SLOT:
     39		auprobe->simulate = true;
     40		break;
     41
     42	case INSN_GOOD:
     43		auprobe->simulate = false;
     44		break;
     45
     46	default:
     47		return -EINVAL;
     48	}
     49
     50	return 0;
     51}
     52
     53int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
     54{
     55	struct uprobe_task *utask = current->utask;
     56
     57	utask->autask.saved_cause = current->thread.bad_cause;
     58	current->thread.bad_cause = UPROBE_TRAP_NR;
     59
     60	instruction_pointer_set(regs, utask->xol_vaddr);
     61
     62	regs->status &= ~SR_SPIE;
     63
     64	return 0;
     65}
     66
     67int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
     68{
     69	struct uprobe_task *utask = current->utask;
     70
     71	WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
     72
     73	instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
     74
     75	regs->status |= SR_SPIE;
     76
     77	return 0;
     78}
     79
     80bool arch_uprobe_xol_was_trapped(struct task_struct *t)
     81{
     82	if (t->thread.bad_cause != UPROBE_TRAP_NR)
     83		return true;
     84
     85	return false;
     86}
     87
     88bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
     89{
     90	probe_opcode_t insn;
     91	unsigned long addr;
     92
     93	if (!auprobe->simulate)
     94		return false;
     95
     96	insn = *(probe_opcode_t *)(&auprobe->insn[0]);
     97	addr = instruction_pointer(regs);
     98
     99	if (auprobe->api.handler)
    100		auprobe->api.handler(insn, addr, regs);
    101
    102	return true;
    103}
    104
    105void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
    106{
    107	struct uprobe_task *utask = current->utask;
    108
    109	/*
    110	 * Task has received a fatal signal, so reset back to probbed
    111	 * address.
    112	 */
    113	instruction_pointer_set(regs, utask->vaddr);
    114
    115	regs->status &= ~SR_SPIE;
    116}
    117
    118bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
    119		struct pt_regs *regs)
    120{
    121	if (ctx == RP_CHECK_CHAIN_CALL)
    122		return regs->sp <= ret->stack;
    123	else
    124		return regs->sp < ret->stack;
    125}
    126
    127unsigned long
    128arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
    129				  struct pt_regs *regs)
    130{
    131	unsigned long ra;
    132
    133	ra = regs->ra;
    134
    135	regs->ra = trampoline_vaddr;
    136
    137	return ra;
    138}
    139
    140int arch_uprobe_exception_notify(struct notifier_block *self,
    141				 unsigned long val, void *data)
    142{
    143	return NOTIFY_DONE;
    144}
    145
    146bool uprobe_breakpoint_handler(struct pt_regs *regs)
    147{
    148	if (uprobe_pre_sstep_notifier(regs))
    149		return true;
    150
    151	return false;
    152}
    153
    154bool uprobe_single_step_handler(struct pt_regs *regs)
    155{
    156	if (uprobe_post_sstep_notifier(regs))
    157		return true;
    158
    159	return false;
    160}
    161
    162void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
    163			   void *src, unsigned long len)
    164{
    165	/* Initialize the slot */
    166	void *kaddr = kmap_atomic(page);
    167	void *dst = kaddr + (vaddr & ~PAGE_MASK);
    168
    169	memcpy(dst, src, len);
    170
    171	/* Add ebreak behind opcode to simulate singlestep */
    172	if (vaddr) {
    173		dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
    174		*(uprobe_opcode_t *)dst = __BUG_INSN_32;
    175	}
    176
    177	kunmap_atomic(kaddr);
    178
    179	/*
    180	 * We probably need flush_icache_user_page() but it needs vma.
    181	 * This should work on most of architectures by default. If
    182	 * architecture needs to do something different it can define
    183	 * its own version of the function.
    184	 */
    185	flush_dcache_page(page);
    186}