cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_jit.h (6107B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * bpf_jit.h: BPF JIT compiler for PPC
      4 *
      5 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
      6 * 	     2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
      7 */
      8#ifndef _BPF_JIT_H
      9#define _BPF_JIT_H
     10
     11#ifndef __ASSEMBLY__
     12
     13#include <asm/types.h>
     14#include <asm/ppc-opcode.h>
     15
     16#ifdef CONFIG_PPC64_ELF_ABI_V1
     17#define FUNCTION_DESCR_SIZE	24
     18#else
     19#define FUNCTION_DESCR_SIZE	0
     20#endif
     21
     22#define PLANT_INSTR(d, idx, instr)					      \
     23	do { if (d) { (d)[idx] = instr; } idx++; } while (0)
     24#define EMIT(instr)		PLANT_INSTR(image, ctx->idx, instr)
     25
     26/* Long jump; (unconditional 'branch') */
     27#define PPC_JMP(dest)							      \
     28	do {								      \
     29		long offset = (long)(dest) - (ctx->idx * 4);		      \
     30		if ((dest) != 0 && !is_offset_in_branch_range(offset)) {		      \
     31			pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);			\
     32			return -ERANGE;					      \
     33		}							      \
     34		EMIT(PPC_RAW_BRANCH(offset));				      \
     35	} while (0)
     36
     37/* bl (unconditional 'branch' with link) */
     38#define PPC_BL(dest)	EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
     39
     40/* "cond" here covers BO:BI fields. */
     41#define PPC_BCC_SHORT(cond, dest)					      \
     42	do {								      \
     43		long offset = (long)(dest) - (ctx->idx * 4);		      \
     44		if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) {		      \
     45			pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx);		\
     46			return -ERANGE;					      \
     47		}							      \
     48		EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc));					\
     49	} while (0)
     50
     51/* Sign-extended 32-bit immediate load */
     52#define PPC_LI32(d, i)		do {					      \
     53		if ((int)(uintptr_t)(i) >= -32768 &&			      \
     54				(int)(uintptr_t)(i) < 32768)		      \
     55			EMIT(PPC_RAW_LI(d, i));				      \
     56		else {							      \
     57			EMIT(PPC_RAW_LIS(d, IMM_H(i)));			      \
     58			if (IMM_L(i))					      \
     59				EMIT(PPC_RAW_ORI(d, d, IMM_L(i)));	      \
     60		} } while(0)
     61
     62#ifdef CONFIG_PPC64
     63#define PPC_LI64(d, i)		do {					      \
     64		if ((long)(i) >= -2147483648 &&				      \
     65				(long)(i) < 2147483648)			      \
     66			PPC_LI32(d, i);					      \
     67		else {							      \
     68			if (!((uintptr_t)(i) & 0xffff800000000000ULL))	      \
     69				EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) &   \
     70						0xffff));		      \
     71			else {						      \
     72				EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
     73				if ((uintptr_t)(i) & 0x0000ffff00000000ULL)   \
     74					EMIT(PPC_RAW_ORI(d, d,		      \
     75					  ((uintptr_t)(i) >> 32) & 0xffff));  \
     76			}						      \
     77			EMIT(PPC_RAW_SLDI(d, d, 32));			      \
     78			if ((uintptr_t)(i) & 0x00000000ffff0000ULL)	      \
     79				EMIT(PPC_RAW_ORIS(d, d,			      \
     80					 ((uintptr_t)(i) >> 16) & 0xffff));   \
     81			if ((uintptr_t)(i) & 0x000000000000ffffULL)	      \
     82				EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) &       \
     83							0xffff));             \
     84		} } while (0)
     85#endif
     86
     87/*
     88 * The fly in the ointment of code size changing from pass to pass is
     89 * avoided by padding the short branch case with a NOP.	 If code size differs
     90 * with different branch reaches we will have the issue of code moving from
     91 * one pass to the next and will need a few passes to converge on a stable
     92 * state.
     93 */
     94#define PPC_BCC(cond, dest)	do {					      \
     95		if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) {	\
     96			PPC_BCC_SHORT(cond, dest);			      \
     97			EMIT(PPC_RAW_NOP());				      \
     98		} else {						      \
     99			/* Flip the 'T or F' bit to invert comparison */      \
    100			PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4);  \
    101			PPC_JMP(dest);					      \
    102		} } while(0)
    103
    104/* To create a branch condition, select a bit of cr0... */
    105#define CR0_LT		0
    106#define CR0_GT		1
    107#define CR0_EQ		2
    108/* ...and modify BO[3] */
    109#define COND_CMP_TRUE	0x100
    110#define COND_CMP_FALSE	0x000
    111/* Together, they make all required comparisons: */
    112#define COND_GT		(CR0_GT | COND_CMP_TRUE)
    113#define COND_GE		(CR0_LT | COND_CMP_FALSE)
    114#define COND_EQ		(CR0_EQ | COND_CMP_TRUE)
    115#define COND_NE		(CR0_EQ | COND_CMP_FALSE)
    116#define COND_LT		(CR0_LT | COND_CMP_TRUE)
    117#define COND_LE		(CR0_GT | COND_CMP_FALSE)
    118
    119#define SEEN_FUNC	0x20000000 /* might call external helpers */
    120#define SEEN_TAILCALL	0x40000000 /* uses tail calls */
    121
    122struct codegen_context {
    123	/*
    124	 * This is used to track register usage as well
    125	 * as calls to external helpers.
    126	 * - register usage is tracked with corresponding
    127	 *   bits (r3-r31)
    128	 * - rest of the bits can be used to track other
    129	 *   things -- for now, we use bits 0 to 2
    130	 *   encoded in SEEN_* macros above
    131	 */
    132	unsigned int seen;
    133	unsigned int idx;
    134	unsigned int stack_size;
    135	int b2p[MAX_BPF_JIT_REG + 2];
    136	unsigned int exentry_idx;
    137	unsigned int alt_exit_addr;
    138};
    139
    140#define bpf_to_ppc(r)	(ctx->b2p[r])
    141
    142#ifdef CONFIG_PPC32
    143#define BPF_FIXUP_LEN	3 /* Three instructions => 12 bytes */
    144#else
    145#define BPF_FIXUP_LEN	2 /* Two instructions => 8 bytes */
    146#endif
    147
    148static inline void bpf_flush_icache(void *start, void *end)
    149{
    150	smp_wmb();	/* smp write barrier */
    151	flush_icache_range((unsigned long)start, (unsigned long)end);
    152}
    153
    154static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
    155{
    156	return ctx->seen & (1 << (31 - i));
    157}
    158
    159static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
    160{
    161	ctx->seen |= 1 << (31 - i);
    162}
    163
    164static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
    165{
    166	ctx->seen &= ~(1 << (31 - i));
    167}
    168
    169void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
    170int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
    171int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
    172		       u32 *addrs, int pass);
    173void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
    174void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
    175void bpf_jit_realloc_regs(struct codegen_context *ctx);
    176int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
    177
    178int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
    179			  int insn_idx, int jmp_off, int dst_reg);
    180
    181#endif
    182
    183#endif