cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

text-patching.h (5065B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_TEXT_PATCHING_H
      3#define _ASM_X86_TEXT_PATCHING_H
      4
      5#include <linux/types.h>
      6#include <linux/stddef.h>
      7#include <asm/ptrace.h>
      8
      9struct paravirt_patch_site;
     10#ifdef CONFIG_PARAVIRT
     11void apply_paravirt(struct paravirt_patch_site *start,
     12		    struct paravirt_patch_site *end);
     13#else
     14static inline void apply_paravirt(struct paravirt_patch_site *start,
     15				  struct paravirt_patch_site *end)
     16{}
     17#define __parainstructions	NULL
     18#define __parainstructions_end	NULL
     19#endif
     20
     21/*
     22 * Currently, the max observed size in the kernel code is
     23 * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
     24 * Raise it if needed.
     25 */
     26#define POKE_MAX_OPCODE_SIZE	5
     27
     28extern void text_poke_early(void *addr, const void *opcode, size_t len);
     29
     30/*
     31 * Clear and restore the kernel write-protection flag on the local CPU.
     32 * Allows the kernel to edit read-only pages.
     33 * Side-effect: any interrupt handler running between save and restore will have
     34 * the ability to write to read-only pages.
     35 *
     36 * Warning:
     37 * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
     38 * no thread can be preempted in the instructions being modified (no iret to an
     39 * invalid instruction possible) or if the instructions are changed from a
     40 * consistent state to another consistent state atomically.
     41 * On the local CPU you need to be protected against NMI or MCE handlers seeing
     42 * an inconsistent instruction while you patch.
     43 */
     44extern void *text_poke(void *addr, const void *opcode, size_t len);
     45extern void text_poke_sync(void);
     46extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
     47extern void *text_poke_copy(void *addr, const void *opcode, size_t len);
     48extern void *text_poke_set(void *addr, int c, size_t len);
     49extern int poke_int3_handler(struct pt_regs *regs);
     50extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
     51
     52extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
     53extern void text_poke_finish(void);
     54
     55#define INT3_INSN_SIZE		1
     56#define INT3_INSN_OPCODE	0xCC
     57
     58#define RET_INSN_SIZE		1
     59#define RET_INSN_OPCODE		0xC3
     60
     61#define CALL_INSN_SIZE		5
     62#define CALL_INSN_OPCODE	0xE8
     63
     64#define JMP32_INSN_SIZE		5
     65#define JMP32_INSN_OPCODE	0xE9
     66
     67#define JMP8_INSN_SIZE		2
     68#define JMP8_INSN_OPCODE	0xEB
     69
     70#define DISP32_SIZE		4
     71
     72static __always_inline int text_opcode_size(u8 opcode)
     73{
     74	int size = 0;
     75
     76#define __CASE(insn)	\
     77	case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
     78
     79	switch(opcode) {
     80	__CASE(INT3);
     81	__CASE(RET);
     82	__CASE(CALL);
     83	__CASE(JMP32);
     84	__CASE(JMP8);
     85	}
     86
     87#undef __CASE
     88
     89	return size;
     90}
     91
     92union text_poke_insn {
     93	u8 text[POKE_MAX_OPCODE_SIZE];
     94	struct {
     95		u8 opcode;
     96		s32 disp;
     97	} __attribute__((packed));
     98};
     99
    100static __always_inline
    101void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size)
    102{
    103	union text_poke_insn *insn = buf;
    104
    105	BUG_ON(size < text_opcode_size(opcode));
    106
    107	/*
    108	 * Hide the addresses to avoid the compiler folding in constants when
    109	 * referencing code, these can mess up annotations like
    110	 * ANNOTATE_NOENDBR.
    111	 */
    112	OPTIMIZER_HIDE_VAR(insn);
    113	OPTIMIZER_HIDE_VAR(addr);
    114	OPTIMIZER_HIDE_VAR(dest);
    115
    116	insn->opcode = opcode;
    117
    118	if (size > 1) {
    119		insn->disp = (long)dest - (long)(addr + size);
    120		if (size == 2) {
    121			/*
    122			 * Ensure that for JMP8 the displacement
    123			 * actually fits the signed byte.
    124			 */
    125			BUG_ON((insn->disp >> 31) != (insn->disp >> 7));
    126		}
    127	}
    128}
    129
    130static __always_inline
    131void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
    132{
    133	static union text_poke_insn insn; /* per instance */
    134	__text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode));
    135	return &insn.text;
    136}
    137
    138extern int after_bootmem;
    139extern __ro_after_init struct mm_struct *poking_mm;
    140extern __ro_after_init unsigned long poking_addr;
    141
    142#ifndef CONFIG_UML_X86
    143static __always_inline
    144void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
    145{
    146	regs->ip = ip;
    147}
    148
    149static __always_inline
    150void int3_emulate_push(struct pt_regs *regs, unsigned long val)
    151{
    152	/*
    153	 * The int3 handler in entry_64.S adds a gap between the
    154	 * stack where the break point happened, and the saving of
    155	 * pt_regs. We can extend the original stack because of
    156	 * this gap. See the idtentry macro's create_gap option.
    157	 *
    158	 * Similarly entry_32.S will have a gap on the stack for (any) hardware
    159	 * exception and pt_regs; see FIXUP_FRAME.
    160	 */
    161	regs->sp -= sizeof(unsigned long);
    162	*(unsigned long *)regs->sp = val;
    163}
    164
    165static __always_inline
    166unsigned long int3_emulate_pop(struct pt_regs *regs)
    167{
    168	unsigned long val = *(unsigned long *)regs->sp;
    169	regs->sp += sizeof(unsigned long);
    170	return val;
    171}
    172
    173static __always_inline
    174void int3_emulate_call(struct pt_regs *regs, unsigned long func)
    175{
    176	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
    177	int3_emulate_jmp(regs, func);
    178}
    179
    180static __always_inline
    181void int3_emulate_ret(struct pt_regs *regs)
    182{
    183	unsigned long ip = int3_emulate_pop(regs);
    184	int3_emulate_jmp(regs, ip);
    185}
    186#endif /* !CONFIG_UML_X86 */
    187
    188#endif /* _ASM_X86_TEXT_PATCHING_H */