cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dwarf-unwind.c (6205B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/compiler.h>
      3#include <linux/types.h>
      4#include <linux/zalloc.h>
      5#include <inttypes.h>
      6#include <limits.h>
      7#include <unistd.h>
      8#include "tests.h"
      9#include "debug.h"
     10#include "machine.h"
     11#include "event.h"
     12#include "../util/unwind.h"
     13#include "perf_regs.h"
     14#include "map.h"
     15#include "symbol.h"
     16#include "thread.h"
     17#include "callchain.h"
     18#include "util/synthetic-events.h"
     19
     20/* For bsearch. We try to unwind functions in shared object. */
     21#include <stdlib.h>
     22
     23/*
     24 * The test will assert frames are on the stack but tail call optimizations lose
     25 * the frame of the caller. Clang can disable this optimization on a called
     26 * function but GCC currently (11/2020) lacks this attribute. The barrier is
     27 * used to inhibit tail calls in these cases.
     28 */
     29#ifdef __has_attribute
     30#if __has_attribute(disable_tail_calls)
     31#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
     32#define NO_TAIL_CALL_BARRIER
     33#endif
     34#endif
     35#ifndef NO_TAIL_CALL_ATTRIBUTE
     36#define NO_TAIL_CALL_ATTRIBUTE
     37#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
     38#endif
     39
     40static int mmap_handler(struct perf_tool *tool __maybe_unused,
     41			union perf_event *event,
     42			struct perf_sample *sample,
     43			struct machine *machine)
     44{
     45	return machine__process_mmap2_event(machine, event, sample);
     46}
     47
     48static int init_live_machine(struct machine *machine)
     49{
     50	union perf_event event;
     51	pid_t pid = getpid();
     52
     53	memset(&event, 0, sizeof(event));
     54	return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
     55						  mmap_handler, machine, true);
     56}
     57
     58/*
     59 * We need to keep these functions global, despite the
     60 * fact that they are used only locally in this object,
     61 * in order to keep them around even if the binary is
     62 * stripped. If they are gone, the unwind check for
     63 * symbol fails.
     64 */
     65int test_dwarf_unwind__thread(struct thread *thread);
     66int test_dwarf_unwind__compare(void *p1, void *p2);
     67int test_dwarf_unwind__krava_3(struct thread *thread);
     68int test_dwarf_unwind__krava_2(struct thread *thread);
     69int test_dwarf_unwind__krava_1(struct thread *thread);
     70
     71#define MAX_STACK 8
     72
     73static int unwind_entry(struct unwind_entry *entry, void *arg)
     74{
     75	unsigned long *cnt = (unsigned long *) arg;
     76	char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
     77	static const char *funcs[MAX_STACK] = {
     78		"test__arch_unwind_sample",
     79		"test_dwarf_unwind__thread",
     80		"test_dwarf_unwind__compare",
     81		"bsearch",
     82		"test_dwarf_unwind__krava_3",
     83		"test_dwarf_unwind__krava_2",
     84		"test_dwarf_unwind__krava_1",
     85		"test__dwarf_unwind"
     86	};
     87	/*
     88	 * The funcs[MAX_STACK] array index, based on the
     89	 * callchain order setup.
     90	 */
     91	int idx = callchain_param.order == ORDER_CALLER ?
     92		  MAX_STACK - *cnt - 1 : *cnt;
     93
     94	if (*cnt >= MAX_STACK) {
     95		pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
     96		return -1;
     97	}
     98
     99	if (!symbol) {
    100		pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
    101			 entry->ip);
    102		return -1;
    103	}
    104
    105	(*cnt)++;
    106	pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
    107		 symbol, entry->ip, funcs[idx]);
    108	return strcmp((const char *) symbol, funcs[idx]);
    109}
    110
    111NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
    112{
    113	struct perf_sample sample;
    114	unsigned long cnt = 0;
    115	int err = -1;
    116
    117	memset(&sample, 0, sizeof(sample));
    118
    119	if (test__arch_unwind_sample(&sample, thread)) {
    120		pr_debug("failed to get unwind sample\n");
    121		goto out;
    122	}
    123
    124	err = unwind__get_entries(unwind_entry, &cnt, thread,
    125				  &sample, MAX_STACK, false);
    126	if (err)
    127		pr_debug("unwind failed\n");
    128	else if (cnt != MAX_STACK) {
    129		pr_debug("got wrong number of stack entries %lu != %d\n",
    130			 cnt, MAX_STACK);
    131		err = -1;
    132	}
    133
    134 out:
    135	zfree(&sample.user_stack.data);
    136	zfree(&sample.user_regs.regs);
    137	return err;
    138}
    139
    140static int global_unwind_retval = -INT_MAX;
    141
    142NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
    143{
    144	/* Any possible value should be 'thread' */
    145	struct thread *thread = *(struct thread **)p1;
    146
    147	if (global_unwind_retval == -INT_MAX) {
    148		/* Call unwinder twice for both callchain orders. */
    149		callchain_param.order = ORDER_CALLER;
    150
    151		global_unwind_retval = test_dwarf_unwind__thread(thread);
    152		if (!global_unwind_retval) {
    153			callchain_param.order = ORDER_CALLEE;
    154			global_unwind_retval = test_dwarf_unwind__thread(thread);
    155		}
    156	}
    157
    158	return p1 - p2;
    159}
    160
    161NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
    162{
    163	struct thread *array[2] = {thread, thread};
    164	void *fp = &bsearch;
    165	/*
    166	 * make _bsearch a volatile function pointer to
    167	 * prevent potential optimization, which may expand
    168	 * bsearch and call compare directly from this function,
    169	 * instead of libc shared object.
    170	 */
    171	void *(*volatile _bsearch)(void *, void *, size_t,
    172			size_t, int (*)(void *, void *));
    173
    174	_bsearch = fp;
    175	_bsearch(array, &thread, 2, sizeof(struct thread **),
    176		 test_dwarf_unwind__compare);
    177	return global_unwind_retval;
    178}
    179
    180NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
    181{
    182	int ret;
    183
    184	ret =  test_dwarf_unwind__krava_3(thread);
    185	NO_TAIL_CALL_BARRIER;
    186	return ret;
    187}
    188
    189NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
    190{
    191	int ret;
    192
    193	ret =  test_dwarf_unwind__krava_2(thread);
    194	NO_TAIL_CALL_BARRIER;
    195	return ret;
    196}
    197
    198static int test__dwarf_unwind(struct test_suite *test __maybe_unused,
    199			      int subtest __maybe_unused)
    200{
    201	struct machine *machine;
    202	struct thread *thread;
    203	int err = -1;
    204
    205	machine = machine__new_host();
    206	if (!machine) {
    207		pr_err("Could not get machine\n");
    208		return -1;
    209	}
    210
    211	if (machine__create_kernel_maps(machine)) {
    212		pr_err("Failed to create kernel maps\n");
    213		return -1;
    214	}
    215
    216	callchain_param.record_mode = CALLCHAIN_DWARF;
    217	dwarf_callchain_users = true;
    218
    219	if (init_live_machine(machine)) {
    220		pr_err("Could not init machine\n");
    221		goto out;
    222	}
    223
    224	if (verbose > 1)
    225		machine__fprintf(machine, stderr);
    226
    227	thread = machine__find_thread(machine, getpid(), getpid());
    228	if (!thread) {
    229		pr_err("Could not get thread\n");
    230		goto out;
    231	}
    232
    233	err = test_dwarf_unwind__krava_1(thread);
    234	thread__put(thread);
    235
    236 out:
    237	machine__delete_threads(machine);
    238	machine__delete(machine);
    239	return err;
    240}
    241
    242DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);