cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_jit_core.c (4245B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Common functionality for RV32 and RV64 BPF JIT compilers
      4 *
      5 * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
      6 *
      7 */
      8
      9#include <linux/bpf.h>
     10#include <linux/filter.h>
     11#include "bpf_jit.h"
     12
     13/* Number of iterations to try until offsets converge. */
     14#define NR_JIT_ITERATIONS	32
     15
     16static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
     17{
     18	const struct bpf_prog *prog = ctx->prog;
     19	int i;
     20
     21	for (i = 0; i < prog->len; i++) {
     22		const struct bpf_insn *insn = &prog->insnsi[i];
     23		int ret;
     24
     25		ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
     26		/* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
     27		if (ret > 0)
     28			i++;
     29		if (offset)
     30			offset[i] = ctx->ninsns;
     31		if (ret < 0)
     32			return ret;
     33	}
     34	return 0;
     35}
     36
     37bool bpf_jit_needs_zext(void)
     38{
     39	return true;
     40}
     41
     42struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
     43{
     44	unsigned int prog_size = 0, extable_size = 0;
     45	bool tmp_blinded = false, extra_pass = false;
     46	struct bpf_prog *tmp, *orig_prog = prog;
     47	int pass = 0, prev_ninsns = 0, i;
     48	struct rv_jit_data *jit_data;
     49	struct rv_jit_context *ctx;
     50
     51	if (!prog->jit_requested)
     52		return orig_prog;
     53
     54	tmp = bpf_jit_blind_constants(prog);
     55	if (IS_ERR(tmp))
     56		return orig_prog;
     57	if (tmp != prog) {
     58		tmp_blinded = true;
     59		prog = tmp;
     60	}
     61
     62	jit_data = prog->aux->jit_data;
     63	if (!jit_data) {
     64		jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
     65		if (!jit_data) {
     66			prog = orig_prog;
     67			goto out;
     68		}
     69		prog->aux->jit_data = jit_data;
     70	}
     71
     72	ctx = &jit_data->ctx;
     73
     74	if (ctx->offset) {
     75		extra_pass = true;
     76		prog_size = sizeof(*ctx->insns) * ctx->ninsns;
     77		goto skip_init_ctx;
     78	}
     79
     80	ctx->prog = prog;
     81	ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
     82	if (!ctx->offset) {
     83		prog = orig_prog;
     84		goto out_offset;
     85	}
     86	for (i = 0; i < prog->len; i++) {
     87		prev_ninsns += 32;
     88		ctx->offset[i] = prev_ninsns;
     89	}
     90
     91	for (i = 0; i < NR_JIT_ITERATIONS; i++) {
     92		pass++;
     93		ctx->ninsns = 0;
     94		if (build_body(ctx, extra_pass, ctx->offset)) {
     95			prog = orig_prog;
     96			goto out_offset;
     97		}
     98		bpf_jit_build_prologue(ctx);
     99		ctx->epilogue_offset = ctx->ninsns;
    100		bpf_jit_build_epilogue(ctx);
    101
    102		if (ctx->ninsns == prev_ninsns) {
    103			if (jit_data->header)
    104				break;
    105			/* obtain the actual image size */
    106			extable_size = prog->aux->num_exentries *
    107				sizeof(struct exception_table_entry);
    108			prog_size = sizeof(*ctx->insns) * ctx->ninsns;
    109
    110			jit_data->header =
    111				bpf_jit_binary_alloc(prog_size + extable_size,
    112						     &jit_data->image,
    113						     sizeof(u32),
    114						     bpf_fill_ill_insns);
    115			if (!jit_data->header) {
    116				prog = orig_prog;
    117				goto out_offset;
    118			}
    119
    120			ctx->insns = (u16 *)jit_data->image;
    121			/*
    122			 * Now, when the image is allocated, the image can
    123			 * potentially shrink more (auipc/jalr -> jal).
    124			 */
    125		}
    126		prev_ninsns = ctx->ninsns;
    127	}
    128
    129	if (i == NR_JIT_ITERATIONS) {
    130		pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
    131		if (jit_data->header)
    132			bpf_jit_binary_free(jit_data->header);
    133		prog = orig_prog;
    134		goto out_offset;
    135	}
    136
    137	if (extable_size)
    138		prog->aux->extable = (void *)ctx->insns + prog_size;
    139
    140skip_init_ctx:
    141	pass++;
    142	ctx->ninsns = 0;
    143	ctx->nexentries = 0;
    144
    145	bpf_jit_build_prologue(ctx);
    146	if (build_body(ctx, extra_pass, NULL)) {
    147		bpf_jit_binary_free(jit_data->header);
    148		prog = orig_prog;
    149		goto out_offset;
    150	}
    151	bpf_jit_build_epilogue(ctx);
    152
    153	if (bpf_jit_enable > 1)
    154		bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
    155
    156	prog->bpf_func = (void *)ctx->insns;
    157	prog->jited = 1;
    158	prog->jited_len = prog_size;
    159
    160	bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
    161
    162	if (!prog->is_func || extra_pass) {
    163		bpf_jit_binary_lock_ro(jit_data->header);
    164out_offset:
    165		kfree(ctx->offset);
    166		kfree(jit_data);
    167		prog->aux->jit_data = NULL;
    168	}
    169out:
    170
    171	if (tmp_blinded)
    172		bpf_jit_prog_release_other(prog, prog == orig_prog ?
    173					   tmp : orig_prog);
    174	return prog;
    175}
    176
    177u64 bpf_jit_alloc_exec_limit(void)
    178{
    179	return BPF_JIT_REGION_SIZE;
    180}
    181
    182void *bpf_jit_alloc_exec(unsigned long size)
    183{
    184	return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
    185				    BPF_JIT_REGION_END, GFP_KERNEL,
    186				    PAGE_KERNEL, 0, NUMA_NO_NODE,
    187				    __builtin_return_address(0));
    188}
    189
    190void bpf_jit_free_exec(void *addr)
    191{
    192	return vfree(addr);
    193}