cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

skel_internal.h (8650B)


      1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
      2/* Copyright (c) 2021 Facebook */
      3#ifndef __SKEL_INTERNAL_H
      4#define __SKEL_INTERNAL_H
      5
      6#ifdef __KERNEL__
      7#include <linux/fdtable.h>
      8#include <linux/mm.h>
      9#include <linux/mman.h>
     10#include <linux/slab.h>
     11#include <linux/bpf.h>
     12#else
     13#include <unistd.h>
     14#include <sys/syscall.h>
     15#include <sys/mman.h>
     16#include <stdlib.h>
     17#include "bpf.h"
     18#endif
     19
     20#ifndef __NR_bpf
     21# if defined(__mips__) && defined(_ABIO32)
     22#  define __NR_bpf 4355
     23# elif defined(__mips__) && defined(_ABIN32)
     24#  define __NR_bpf 6319
     25# elif defined(__mips__) && defined(_ABI64)
     26#  define __NR_bpf 5315
     27# endif
     28#endif
     29
     30/* This file is a base header for auto-generated *.lskel.h files.
     31 * Its contents will change and may become part of auto-generation in the future.
     32 *
     33 * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
     34 * and will change from one version of libbpf to another and features
     35 * requested during loader program generation.
     36 */
     37struct bpf_map_desc {
     38	/* output of the loader prog */
     39	int map_fd;
     40	/* input for the loader prog */
     41	__u32 max_entries;
     42	__aligned_u64 initial_value;
     43};
     44struct bpf_prog_desc {
     45	int prog_fd;
     46};
     47
     48enum {
     49	BPF_SKEL_KERNEL = (1ULL << 0),
     50};
     51
     52struct bpf_loader_ctx {
     53	__u32 sz;
     54	__u32 flags;
     55	__u32 log_level;
     56	__u32 log_size;
     57	__u64 log_buf;
     58};
     59
     60struct bpf_load_and_run_opts {
     61	struct bpf_loader_ctx *ctx;
     62	const void *data;
     63	const void *insns;
     64	__u32 data_sz;
     65	__u32 insns_sz;
     66	const char *errstr;
     67};
     68
     69long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
     70
     71static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
     72			  unsigned int size)
     73{
     74#ifdef __KERNEL__
     75	return bpf_sys_bpf(cmd, attr, size);
     76#else
     77	return syscall(__NR_bpf, cmd, attr, size);
     78#endif
     79}
     80
     81#ifdef __KERNEL__
     82static inline int close(int fd)
     83{
     84	return close_fd(fd);
     85}
     86
     87static inline void *skel_alloc(size_t size)
     88{
     89	struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
     90
     91	if (!ctx)
     92		return NULL;
     93	ctx->flags |= BPF_SKEL_KERNEL;
     94	return ctx;
     95}
     96
     97static inline void skel_free(const void *p)
     98{
     99	kfree(p);
    100}
    101
    102/* skel->bss/rodata maps are populated the following way:
    103 *
    104 * For kernel use:
    105 * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
    106 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
    107 * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
    108 * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
    109 * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
    110 * is not nessary.
    111 *
    112 * For user space:
    113 * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
    114 * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
    115 * The loader program will perform copy_from_user() from maps.rodata.initial_value.
    116 * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
    117 * skel->rodata address.
    118 *
    119 * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
    120 * both kernel and user space. The generated loader program does
    121 * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
    122 * depending on bpf_loader_ctx->flags.
    123 */
    124static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
    125{
    126	if (addr != ~0ULL)
    127		kvfree(p);
    128	/* When addr == ~0ULL the 'p' points to
    129	 * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
    130	 */
    131}
    132
    133static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
    134{
    135	void *addr;
    136
    137	addr = kvmalloc(val_sz, GFP_KERNEL);
    138	if (!addr)
    139		return NULL;
    140	memcpy(addr, val, val_sz);
    141	return addr;
    142}
    143
    144static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
    145{
    146	struct bpf_map *map;
    147	void *addr = NULL;
    148
    149	kvfree((void *) (long) *init_val);
    150	*init_val = ~0ULL;
    151
    152	/* At this point bpf_load_and_run() finished without error and
    153	 * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
    154	 */
    155	map = bpf_map_get(fd);
    156	if (IS_ERR(map))
    157		return NULL;
    158	if (map->map_type != BPF_MAP_TYPE_ARRAY)
    159		goto out;
    160	addr = ((struct bpf_array *)map)->value;
    161	/* the addr stays valid, since FD is not closed */
    162out:
    163	bpf_map_put(map);
    164	return addr;
    165}
    166
    167#else
    168
    169static inline void *skel_alloc(size_t size)
    170{
    171	return calloc(1, size);
    172}
    173
    174static inline void skel_free(void *p)
    175{
    176	free(p);
    177}
    178
    179static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
    180{
    181	munmap(p, sz);
    182}
    183
    184static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
    185{
    186	void *addr;
    187
    188	addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
    189		    MAP_SHARED | MAP_ANONYMOUS, -1, 0);
    190	if (addr == (void *) -1)
    191		return NULL;
    192	memcpy(addr, val, val_sz);
    193	return addr;
    194}
    195
    196static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
    197{
    198	void *addr;
    199
    200	addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
    201	if (addr == (void *) -1)
    202		return NULL;
    203	return addr;
    204}
    205#endif
    206
    207static inline int skel_closenz(int fd)
    208{
    209	if (fd > 0)
    210		return close(fd);
    211	return -EINVAL;
    212}
    213
    214#ifndef offsetofend
    215#define offsetofend(TYPE, MEMBER) \
    216	(offsetof(TYPE, MEMBER)	+ sizeof((((TYPE *)0)->MEMBER)))
    217#endif
    218
    219static inline int skel_map_create(enum bpf_map_type map_type,
    220				  const char *map_name,
    221				  __u32 key_size,
    222				  __u32 value_size,
    223				  __u32 max_entries)
    224{
    225	const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
    226	union bpf_attr attr;
    227
    228	memset(&attr, 0, attr_sz);
    229
    230	attr.map_type = map_type;
    231	strncpy(attr.map_name, map_name, sizeof(attr.map_name));
    232	attr.key_size = key_size;
    233	attr.value_size = value_size;
    234	attr.max_entries = max_entries;
    235
    236	return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
    237}
    238
    239static inline int skel_map_update_elem(int fd, const void *key,
    240				       const void *value, __u64 flags)
    241{
    242	const size_t attr_sz = offsetofend(union bpf_attr, flags);
    243	union bpf_attr attr;
    244
    245	memset(&attr, 0, attr_sz);
    246	attr.map_fd = fd;
    247	attr.key = (long) key;
    248	attr.value = (long) value;
    249	attr.flags = flags;
    250
    251	return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
    252}
    253
    254static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
    255{
    256	const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
    257	union bpf_attr attr;
    258
    259	memset(&attr, 0, attr_sz);
    260	attr.raw_tracepoint.name = (long) name;
    261	attr.raw_tracepoint.prog_fd = prog_fd;
    262
    263	return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
    264}
    265
    266static inline int skel_link_create(int prog_fd, int target_fd,
    267				   enum bpf_attach_type attach_type)
    268{
    269	const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
    270	union bpf_attr attr;
    271
    272	memset(&attr, 0, attr_sz);
    273	attr.link_create.prog_fd = prog_fd;
    274	attr.link_create.target_fd = target_fd;
    275	attr.link_create.attach_type = attach_type;
    276
    277	return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
    278}
    279
    280#ifdef __KERNEL__
    281#define set_err
    282#else
    283#define set_err err = -errno
    284#endif
    285
    286static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
    287{
    288	int map_fd = -1, prog_fd = -1, key = 0, err;
    289	union bpf_attr attr;
    290
    291	err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
    292	if (map_fd < 0) {
    293		opts->errstr = "failed to create loader map";
    294		set_err;
    295		goto out;
    296	}
    297
    298	err = skel_map_update_elem(map_fd, &key, opts->data, 0);
    299	if (err < 0) {
    300		opts->errstr = "failed to update loader map";
    301		set_err;
    302		goto out;
    303	}
    304
    305	memset(&attr, 0, sizeof(attr));
    306	attr.prog_type = BPF_PROG_TYPE_SYSCALL;
    307	attr.insns = (long) opts->insns;
    308	attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
    309	attr.license = (long) "Dual BSD/GPL";
    310	memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
    311	attr.fd_array = (long) &map_fd;
    312	attr.log_level = opts->ctx->log_level;
    313	attr.log_size = opts->ctx->log_size;
    314	attr.log_buf = opts->ctx->log_buf;
    315	attr.prog_flags = BPF_F_SLEEPABLE;
    316	err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
    317	if (prog_fd < 0) {
    318		opts->errstr = "failed to load loader prog";
    319		set_err;
    320		goto out;
    321	}
    322
    323	memset(&attr, 0, sizeof(attr));
    324	attr.test.prog_fd = prog_fd;
    325	attr.test.ctx_in = (long) opts->ctx;
    326	attr.test.ctx_size_in = opts->ctx->sz;
    327	err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
    328	if (err < 0 || (int)attr.test.retval < 0) {
    329		opts->errstr = "failed to execute loader prog";
    330		if (err < 0) {
    331			set_err;
    332		} else {
    333			err = (int)attr.test.retval;
    334#ifndef __KERNEL__
    335			errno = -err;
    336#endif
    337		}
    338		goto out;
    339	}
    340	err = 0;
    341out:
    342	if (map_fd >= 0)
    343		close(map_fd);
    344	if (prog_fd >= 0)
    345		close(prog_fd);
    346	return err;
    347}
    348
    349#endif