cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

unwind.h (2579B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __UNWIND_H
      3#define __UNWIND_H
      4
      5#include <linux/compiler.h>
      6#include <linux/types.h>
      7#include "util/map_symbol.h"
      8
      9struct maps;
     10struct perf_sample;
     11struct thread;
     12
     13struct unwind_entry {
     14	struct map_symbol ms;
     15	u64		  ip;
     16};
     17
     18typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
     19
     20struct unwind_libunwind_ops {
     21	int (*prepare_access)(struct maps *maps);
     22	void (*flush_access)(struct maps *maps);
     23	void (*finish_access)(struct maps *maps);
     24	int (*get_entries)(unwind_entry_cb_t cb, void *arg,
     25			   struct thread *thread,
     26			   struct perf_sample *data, int max_stack, bool best_effort);
     27};
     28
     29#ifdef HAVE_DWARF_UNWIND_SUPPORT
     30/*
     31 * When best_effort is set, don't report errors and fail silently. This could
     32 * be expanded in the future to be more permissive about things other than
     33 * error messages.
     34 */
     35int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
     36			struct thread *thread,
     37			struct perf_sample *data, int max_stack,
     38			bool best_effort);
     39/* libunwind specific */
     40#ifdef HAVE_LIBUNWIND_SUPPORT
     41#ifndef LIBUNWIND__ARCH_REG_ID
     42#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
     43#endif
     44
     45#ifndef LIBUNWIND__ARCH_REG_SP
     46#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
     47#endif
     48
     49#ifndef LIBUNWIND__ARCH_REG_IP
     50#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
     51#endif
     52
     53int LIBUNWIND__ARCH_REG_ID(int regnum);
     54int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
     55void unwind__flush_access(struct maps *maps);
     56void unwind__finish_access(struct maps *maps);
     57#else
     58static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
     59					 struct map *map __maybe_unused,
     60					 bool *initialized __maybe_unused)
     61{
     62	return 0;
     63}
     64
     65static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
     66static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
     67#endif
     68#else
     69static inline int
     70unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
     71		    void *arg __maybe_unused,
     72		    struct thread *thread __maybe_unused,
     73		    struct perf_sample *data __maybe_unused,
     74		    int max_stack __maybe_unused,
     75		    bool best_effort __maybe_unused)
     76{
     77	return 0;
     78}
     79
     80static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
     81					 struct map *map __maybe_unused,
     82					 bool *initialized __maybe_unused)
     83{
     84	return 0;
     85}
     86
     87static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
     88static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
     89#endif /* HAVE_DWARF_UNWIND_SUPPORT */
     90#endif /* __UNWIND_H */