cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

unwind.c (12676B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Kernel unwinding support
      4 *
      5 * (c) 2002-2004 Randolph Chung <tausq@debian.org>
      6 *
      7 * Derived partially from the IA64 implementation. The PA-RISC
      8 * Runtime Architecture Document is also a useful reference to
      9 * understand what is happening here
     10 */
     11
     12#include <linux/kernel.h>
     13#include <linux/init.h>
     14#include <linux/sched.h>
     15#include <linux/slab.h>
     16#include <linux/sort.h>
     17#include <linux/sched/task_stack.h>
     18
     19#include <linux/uaccess.h>
     20#include <asm/assembly.h>
     21#include <asm/asm-offsets.h>
     22#include <asm/ptrace.h>
     23
     24#include <asm/unwind.h>
     25#include <asm/switch_to.h>
     26#include <asm/sections.h>
     27
     28/* #define DEBUG 1 */
     29#ifdef DEBUG
     30#define dbg(x...) pr_debug(x)
     31#else
     32#define dbg(x...)
     33#endif
     34
     35#define KERNEL_START (KERNEL_BINARY_TEXT_START)
     36
     37extern struct unwind_table_entry __start___unwind[];
     38extern struct unwind_table_entry __stop___unwind[];
     39
     40static DEFINE_SPINLOCK(unwind_lock);
     41/*
     42 * the kernel unwind block is not dynamically allocated so that
     43 * we can call unwind_init as early in the bootup process as 
     44 * possible (before the slab allocator is initialized)
     45 */
     46static struct unwind_table kernel_unwind_table __ro_after_init;
     47static LIST_HEAD(unwind_tables);
     48
     49static inline const struct unwind_table_entry *
     50find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
     51{
     52	const struct unwind_table_entry *e = NULL;
     53	unsigned long lo, hi, mid;
     54
     55	lo = 0; 
     56	hi = table->length - 1; 
     57	
     58	while (lo <= hi) {
     59		mid = (hi - lo) / 2 + lo;
     60		e = &table->table[mid];
     61		if (addr < e->region_start)
     62			hi = mid - 1;
     63		else if (addr > e->region_end)
     64			lo = mid + 1;
     65		else
     66			return e;
     67	}
     68
     69	return NULL;
     70}
     71
     72static const struct unwind_table_entry *
     73find_unwind_entry(unsigned long addr)
     74{
     75	struct unwind_table *table;
     76	const struct unwind_table_entry *e = NULL;
     77
     78	if (addr >= kernel_unwind_table.start && 
     79	    addr <= kernel_unwind_table.end)
     80		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
     81	else {
     82		unsigned long flags;
     83
     84		spin_lock_irqsave(&unwind_lock, flags);
     85		list_for_each_entry(table, &unwind_tables, list) {
     86			if (addr >= table->start && 
     87			    addr <= table->end)
     88				e = find_unwind_entry_in_table(table, addr);
     89			if (e) {
     90				/* Move-to-front to exploit common traces */
     91				list_move(&table->list, &unwind_tables);
     92				break;
     93			}
     94		}
     95		spin_unlock_irqrestore(&unwind_lock, flags);
     96	}
     97
     98	return e;
     99}
    100
    101static void
    102unwind_table_init(struct unwind_table *table, const char *name,
    103		  unsigned long base_addr, unsigned long gp,
    104		  void *table_start, void *table_end)
    105{
    106	struct unwind_table_entry *start = table_start;
    107	struct unwind_table_entry *end = 
    108		(struct unwind_table_entry *)table_end - 1;
    109
    110	table->name = name;
    111	table->base_addr = base_addr;
    112	table->gp = gp;
    113	table->start = base_addr + start->region_start;
    114	table->end = base_addr + end->region_end;
    115	table->table = (struct unwind_table_entry *)table_start;
    116	table->length = end - start + 1;
    117	INIT_LIST_HEAD(&table->list);
    118
    119	for (; start <= end; start++) {
    120		if (start < end && 
    121		    start->region_end > (start+1)->region_start) {
    122			pr_warn("Out of order unwind entry! %px and %px\n",
    123				start, start+1);
    124		}
    125
    126		start->region_start += base_addr;
    127		start->region_end += base_addr;
    128	}
    129}
    130
    131static int cmp_unwind_table_entry(const void *a, const void *b)
    132{
    133	return ((const struct unwind_table_entry *)a)->region_start
    134	     - ((const struct unwind_table_entry *)b)->region_start;
    135}
    136
    137static void
    138unwind_table_sort(struct unwind_table_entry *start,
    139		  struct unwind_table_entry *finish)
    140{
    141	sort(start, finish - start, sizeof(struct unwind_table_entry),
    142	     cmp_unwind_table_entry, NULL);
    143}
    144
    145struct unwind_table *
    146unwind_table_add(const char *name, unsigned long base_addr, 
    147		 unsigned long gp,
    148                 void *start, void *end)
    149{
    150	struct unwind_table *table;
    151	unsigned long flags;
    152	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
    153	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
    154
    155	unwind_table_sort(s, e);
    156
    157	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
    158	if (table == NULL)
    159		return NULL;
    160	unwind_table_init(table, name, base_addr, gp, start, end);
    161	spin_lock_irqsave(&unwind_lock, flags);
    162	list_add_tail(&table->list, &unwind_tables);
    163	spin_unlock_irqrestore(&unwind_lock, flags);
    164
    165	return table;
    166}
    167
    168void unwind_table_remove(struct unwind_table *table)
    169{
    170	unsigned long flags;
    171
    172	spin_lock_irqsave(&unwind_lock, flags);
    173	list_del(&table->list);
    174	spin_unlock_irqrestore(&unwind_lock, flags);
    175
    176	kfree(table);
    177}
    178
    179/* Called from setup_arch to import the kernel unwind info */
    180int __init unwind_init(void)
    181{
    182	long start, stop;
    183	register unsigned long gp __asm__ ("r27");
    184
    185	start = (long)&__start___unwind[0];
    186	stop = (long)&__stop___unwind[0];
    187
    188	dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
    189	    start, stop,
    190	    (stop - start) / sizeof(struct unwind_table_entry));
    191
    192	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
    193			  gp, 
    194			  &__start___unwind[0], &__stop___unwind[0]);
    195#if 0
    196	{
    197		int i;
    198		for (i = 0; i < 10; i++)
    199		{
    200			printk("region 0x%x-0x%x\n", 
    201				__start___unwind[i].region_start, 
    202				__start___unwind[i].region_end);
    203		}
    204	}
    205#endif
    206	return 0;
    207}
    208
    209static bool pc_is_kernel_fn(unsigned long pc, void *fn)
    210{
    211	return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
    212}
    213
    214static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
    215{
    216	/*
    217	 * We have to use void * instead of a function pointer, because
    218	 * function pointers aren't a pointer to the function on 64-bit.
    219	 * Make them const so the compiler knows they live in .text
    220	 * Note: We could use dereference_kernel_function_descriptor()
    221	 * instead but we want to keep it simple here.
    222	 */
    223	extern void * const handle_interruption;
    224	extern void * const ret_from_kernel_thread;
    225	extern void * const syscall_exit;
    226	extern void * const intr_return;
    227	extern void * const _switch_to_ret;
    228#ifdef CONFIG_IRQSTACKS
    229	extern void * const _call_on_stack;
    230#endif /* CONFIG_IRQSTACKS */
    231
    232	if (pc_is_kernel_fn(pc, handle_interruption)) {
    233		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
    234		dbg("Unwinding through handle_interruption()\n");
    235		info->prev_sp = regs->gr[30];
    236		info->prev_ip = regs->iaoq[0];
    237		return 1;
    238	}
    239
    240	if (pc_is_kernel_fn(pc, ret_from_kernel_thread) ||
    241	    pc_is_kernel_fn(pc, syscall_exit)) {
    242		info->prev_sp = info->prev_ip = 0;
    243		return 1;
    244	}
    245
    246	if (pc_is_kernel_fn(pc, intr_return)) {
    247		struct pt_regs *regs;
    248
    249		dbg("Found intr_return()\n");
    250		regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
    251		info->prev_sp = regs->gr[30];
    252		info->prev_ip = regs->iaoq[0];
    253		info->rp = regs->gr[2];
    254		return 1;
    255	}
    256
    257	if (pc_is_kernel_fn(pc, _switch_to) ||
    258	    pc_is_kernel_fn(pc, _switch_to_ret)) {
    259		info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
    260		info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
    261		return 1;
    262	}
    263
    264#ifdef CONFIG_IRQSTACKS
    265	if (pc_is_kernel_fn(pc, _call_on_stack)) {
    266		info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
    267		info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
    268		return 1;
    269	}
    270#endif
    271	return 0;
    272}
    273
    274static void unwind_frame_regs(struct unwind_frame_info *info)
    275{
    276	const struct unwind_table_entry *e;
    277	unsigned long npc;
    278	unsigned int insn;
    279	long frame_size = 0;
    280	int looking_for_rp, rpoffset = 0;
    281
    282	e = find_unwind_entry(info->ip);
    283	if (e == NULL) {
    284		unsigned long sp;
    285
    286		dbg("Cannot find unwind entry for %pS; forced unwinding\n",
    287			(void *) info->ip);
    288
    289		/* Since we are doing the unwinding blind, we don't know if
    290		   we are adjusting the stack correctly or extracting the rp
    291		   correctly. The rp is checked to see if it belongs to the
    292		   kernel text section, if not we assume we don't have a 
    293		   correct stack frame and we continue to unwind the stack.
    294		   This is not quite correct, and will fail for loadable
    295		   modules. */
    296		sp = info->sp & ~63;
    297		do {
    298			unsigned long tmp;
    299
    300			info->prev_sp = sp - 64;
    301			info->prev_ip = 0;
    302
    303			/* Check if stack is inside kernel stack area */
    304			if ((info->prev_sp - (unsigned long) task_stack_page(info->t))
    305					>= THREAD_SIZE) {
    306				info->prev_sp = 0;
    307				break;
    308			}
    309
    310			if (copy_from_kernel_nofault(&tmp,
    311			    (void *)info->prev_sp - RP_OFFSET, sizeof(tmp)))
    312				break;
    313			info->prev_ip = tmp;
    314			sp = info->prev_sp;
    315		} while (!kernel_text_address(info->prev_ip));
    316
    317		info->rp = 0;
    318
    319		dbg("analyzing func @ %lx with no unwind info, setting "
    320		    "prev_sp=%lx prev_ip=%lx\n", info->ip, 
    321		    info->prev_sp, info->prev_ip);
    322	} else {
    323		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
    324		    "Save_RP = %d, Millicode = %d size = %u\n", 
    325		    e->region_start, e->region_end, e->Save_SP, e->Save_RP, 
    326		    e->Millicode, e->Total_frame_size);
    327
    328		looking_for_rp = e->Save_RP;
    329
    330		for (npc = e->region_start; 
    331		     (frame_size < (e->Total_frame_size << 3) || 
    332		      looking_for_rp) && 
    333		     npc < info->ip; 
    334		     npc += 4) {
    335
    336			insn = *(unsigned int *)npc;
    337
    338			if ((insn & 0xffffc001) == 0x37de0000 ||
    339			    (insn & 0xffe00001) == 0x6fc00000) {
    340				/* ldo X(sp), sp, or stwm X,D(sp) */
    341				frame_size += (insn & 0x3fff) >> 1;
    342				dbg("analyzing func @ %lx, insn=%08x @ "
    343				    "%lx, frame_size = %ld\n", info->ip,
    344				    insn, npc, frame_size);
    345			} else if ((insn & 0xffe00009) == 0x73c00008) {
    346				/* std,ma X,D(sp) */
    347				frame_size += ((insn >> 4) & 0x3ff) << 3;
    348				dbg("analyzing func @ %lx, insn=%08x @ "
    349				    "%lx, frame_size = %ld\n", info->ip,
    350				    insn, npc, frame_size);
    351			} else if (insn == 0x6bc23fd9) { 
    352				/* stw rp,-20(sp) */
    353				rpoffset = 20;
    354				looking_for_rp = 0;
    355				dbg("analyzing func @ %lx, insn=stw rp,"
    356				    "-20(sp) @ %lx\n", info->ip, npc);
    357			} else if (insn == 0x0fc212c1) {
    358				/* std rp,-16(sr0,sp) */
    359				rpoffset = 16;
    360				looking_for_rp = 0;
    361				dbg("analyzing func @ %lx, insn=std rp,"
    362				    "-16(sp) @ %lx\n", info->ip, npc);
    363			}
    364		}
    365
    366		if (frame_size > e->Total_frame_size << 3)
    367			frame_size = e->Total_frame_size << 3;
    368
    369		if (!unwind_special(info, e->region_start, frame_size)) {
    370			info->prev_sp = info->sp - frame_size;
    371			if (e->Millicode)
    372				info->rp = info->r31;
    373			else if (rpoffset)
    374				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
    375			info->prev_ip = info->rp;
    376			info->rp = 0;
    377		}
    378
    379		dbg("analyzing func @ %lx, setting prev_sp=%lx "
    380		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 
    381		    info->prev_ip, npc);
    382	}
    383}
    384
    385void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
    386		       struct pt_regs *regs)
    387{
    388	memset(info, 0, sizeof(struct unwind_frame_info));
    389	info->t = t;
    390	info->sp = regs->gr[30];
    391	info->ip = regs->iaoq[0];
    392	info->rp = regs->gr[2];
    393	info->r31 = regs->gr[31];
    394
    395	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 
    396	    t ? (int)t->pid : -1, info->sp, info->ip);
    397}
    398
    399void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
    400{
    401	struct pt_regs *r = &t->thread.regs;
    402	struct pt_regs *r2;
    403
    404	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
    405	if (!r2)
    406		return;
    407	*r2 = *r;
    408	r2->gr[30] = r->ksp;
    409	r2->iaoq[0] = r->kpc;
    410	unwind_frame_init(info, t, r2);
    411	kfree(r2);
    412}
    413
    414#define get_parisc_stackpointer() ({ \
    415	unsigned long sp; \
    416	__asm__("copy %%r30, %0" : "=r"(sp)); \
    417	(sp); \
    418})
    419
    420void unwind_frame_init_task(struct unwind_frame_info *info,
    421	struct task_struct *task, struct pt_regs *regs)
    422{
    423	task = task ? task : current;
    424
    425	if (task == current) {
    426		struct pt_regs r;
    427
    428		if (!regs) {
    429			memset(&r, 0, sizeof(r));
    430			r.iaoq[0] =  _THIS_IP_;
    431			r.gr[2] = _RET_IP_;
    432			r.gr[30] = get_parisc_stackpointer();
    433			regs = &r;
    434		}
    435		unwind_frame_init(info, task, regs);
    436	} else {
    437		unwind_frame_init_from_blocked_task(info, task);
    438	}
    439}
    440
    441int unwind_once(struct unwind_frame_info *next_frame)
    442{
    443	unwind_frame_regs(next_frame);
    444
    445	if (next_frame->prev_sp == 0 ||
    446	    next_frame->prev_ip == 0)
    447		return -1;
    448
    449	next_frame->sp = next_frame->prev_sp;
    450	next_frame->ip = next_frame->prev_ip;
    451	next_frame->prev_sp = 0;
    452	next_frame->prev_ip = 0;
    453
    454	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 
    455	    next_frame->t ? (int)next_frame->t->pid : -1, 
    456	    next_frame->sp, next_frame->ip);
    457
    458	return 0;
    459}
    460
    461int unwind_to_user(struct unwind_frame_info *info)
    462{
    463	int ret;
    464	
    465	do {
    466		ret = unwind_once(info);
    467	} while (!ret && !(info->ip & 3));
    468
    469	return ret;
    470}
    471
    472unsigned long return_address(unsigned int level)
    473{
    474	struct unwind_frame_info info;
    475
    476	/* initialize unwind info */
    477	unwind_frame_init_task(&info, current, NULL);
    478
    479	/* unwind stack */
    480	level += 2;
    481	do {
    482		if (unwind_once(&info) < 0 || info.ip == 0)
    483			return 0;
    484		if (!kernel_text_address(info.ip))
    485			return 0;
    486	} while (info.ip && level--);
    487
    488	return info.ip;
    489}