cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bpf_jit.h (11898B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * BPF JIT compiler for ARM64
      4 *
      5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
      6 */
      7#ifndef _BPF_JIT_H
      8#define _BPF_JIT_H
      9
     10#include <asm/insn.h>
     11
     12/* 5-bit Register Operand */
     13#define A64_R(x)	AARCH64_INSN_REG_##x
     14#define A64_FP		AARCH64_INSN_REG_FP
     15#define A64_LR		AARCH64_INSN_REG_LR
     16#define A64_ZR		AARCH64_INSN_REG_ZR
     17#define A64_SP		AARCH64_INSN_REG_SP
     18
     19#define A64_VARIANT(sf) \
     20	((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
     21
     22/* Compare & branch (immediate) */
     23#define A64_COMP_BRANCH(sf, Rt, offset, type) \
     24	aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
     25		AARCH64_INSN_BRANCH_COMP_##type)
     26#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
     27#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
     28
     29/* Conditional branch (immediate) */
     30#define A64_COND_BRANCH(cond, offset) \
     31	aarch64_insn_gen_cond_branch_imm(0, offset, cond)
     32#define A64_COND_EQ	AARCH64_INSN_COND_EQ /* == */
     33#define A64_COND_NE	AARCH64_INSN_COND_NE /* != */
     34#define A64_COND_CS	AARCH64_INSN_COND_CS /* unsigned >= */
     35#define A64_COND_HI	AARCH64_INSN_COND_HI /* unsigned > */
     36#define A64_COND_LS	AARCH64_INSN_COND_LS /* unsigned <= */
     37#define A64_COND_CC	AARCH64_INSN_COND_CC /* unsigned < */
     38#define A64_COND_GE	AARCH64_INSN_COND_GE /* signed >= */
     39#define A64_COND_GT	AARCH64_INSN_COND_GT /* signed > */
     40#define A64_COND_LE	AARCH64_INSN_COND_LE /* signed <= */
     41#define A64_COND_LT	AARCH64_INSN_COND_LT /* signed < */
     42#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
     43
     44/* Unconditional branch (immediate) */
     45#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
     46	AARCH64_INSN_BRANCH_##type)
     47#define A64_B(imm26)  A64_BRANCH((imm26) << 2, NOLINK)
     48#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
     49
     50/* Unconditional branch (register) */
     51#define A64_BR(Rn)  aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
     52#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
     53#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
     54
     55/* Load/store register (register offset) */
     56#define A64_LS_REG(Rt, Rn, Rm, size, type) \
     57	aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
     58		AARCH64_INSN_SIZE_##size, \
     59		AARCH64_INSN_LDST_##type##_REG_OFFSET)
     60#define A64_STRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, STORE)
     61#define A64_LDRB(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
     62#define A64_STRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, STORE)
     63#define A64_LDRH(Wt, Xn, Xm)  A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
     64#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
     65#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
     66#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
     67#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
     68
     69/* Load/store register (immediate offset) */
     70#define A64_LS_IMM(Rt, Rn, imm, size, type) \
     71	aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
     72		AARCH64_INSN_SIZE_##size, \
     73		AARCH64_INSN_LDST_##type##_IMM_OFFSET)
     74#define A64_STRBI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 8, STORE)
     75#define A64_LDRBI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
     76#define A64_STRHI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 16, STORE)
     77#define A64_LDRHI(Wt, Xn, imm)  A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
     78#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
     79#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
     80#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
     81#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
     82
     83/* Load/store register pair */
     84#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
     85	aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
     86		AARCH64_INSN_VARIANT_64BIT, \
     87		AARCH64_INSN_LDST_##ls##_PAIR_##type)
     88/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
     89#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
     90/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
     91#define A64_POP(Rt, Rt2, Rn)  A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
     92
     93/* Load/store exclusive */
     94#define A64_SIZE(sf) \
     95	((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
     96#define A64_LSX(sf, Rt, Rn, Rs, type) \
     97	aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
     98				       AARCH64_INSN_LDST_##type)
     99/* Rt = [Rn]; (atomic) */
    100#define A64_LDXR(sf, Rt, Rn) \
    101	A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
    102/* [Rn] = Rt; (atomic) Rs = [state] */
    103#define A64_STXR(sf, Rt, Rn, Rs) \
    104	A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
    105/* [Rn] = Rt (store release); (atomic) Rs = [state] */
    106#define A64_STLXR(sf, Rt, Rn, Rs) \
    107	aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
    108				       AARCH64_INSN_LDST_STORE_REL_EX)
    109
    110/*
    111 * LSE atomics
    112 *
    113 * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
    114 * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
    115 */
    116#define A64_ST_OP(sf, Rn, Rs, op) \
    117	aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
    118		A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
    119		AARCH64_INSN_MEM_ORDER_NONE)
    120/* [Rn] <op>= Rs */
    121#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
    122#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
    123#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
    124#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
    125
    126#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
    127	aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
    128		A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
    129		AARCH64_INSN_MEM_ORDER_ACQREL)
    130/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
    131#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
    132#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
    133#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
    134#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
    135/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
    136#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
    137/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
    138#define A64_CASAL(sf, Rt, Rn, Rs) \
    139	aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
    140		AARCH64_INSN_MEM_ORDER_ACQREL)
    141
    142/* Add/subtract (immediate) */
    143#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
    144	aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
    145		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
    146/* Rd = Rn OP imm12 */
    147#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
    148#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
    149#define A64_ADDS_I(sf, Rd, Rn, imm12) \
    150	A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
    151#define A64_SUBS_I(sf, Rd, Rn, imm12) \
    152	A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
    153/* Rn + imm12; set condition flags */
    154#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
    155/* Rn - imm12; set condition flags */
    156#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
    157/* Rd = Rn */
    158#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
    159
    160/* Bitfield move */
    161#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
    162	aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
    163		A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
    164/* Signed, with sign replication to left and zeros to right */
    165#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
    166/* Unsigned, with zeros to left and right */
    167#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
    168
    169/* Rd = Rn << shift */
    170#define A64_LSL(sf, Rd, Rn, shift) ({	\
    171	int sz = (sf) ? 64 : 32;	\
    172	A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
    173})
    174/* Rd = Rn >> shift */
    175#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
    176/* Rd = Rn >> shift; signed */
    177#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
    178
    179/* Zero extend */
    180#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
    181#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
    182
    183/* Move wide (immediate) */
    184#define A64_MOVEW(sf, Rd, imm16, shift, type) \
    185	aarch64_insn_gen_movewide(Rd, imm16, shift, \
    186		A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
    187/* Rd = Zeros (for MOVZ);
    188 * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
    189 * Rd = ~Rd; (for MOVN); */
    190#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
    191#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
    192#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
    193
    194/* Add/subtract (shifted register) */
    195#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
    196	aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
    197		A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
    198/* Rd = Rn OP Rm */
    199#define A64_ADD(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
    200#define A64_SUB(sf, Rd, Rn, Rm)  A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
    201#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
    202/* Rd = -Rm */
    203#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
    204/* Rn - Rm; set condition flags */
    205#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
    206
    207/* Data-processing (1 source) */
    208#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
    209	A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
    210/* Rd = BSWAPx(Rn) */
    211#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
    212#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
    213#define A64_REV64(Rd, Rn)     A64_DATA1(1, Rd, Rn, REVERSE_64)
    214
    215/* Data-processing (2 source) */
    216/* Rd = Rn OP Rm */
    217#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
    218	A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
    219#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
    220#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
    221#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
    222#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
    223
    224/* Data-processing (3 source) */
    225/* Rd = Ra + Rn * Rm */
    226#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
    227	A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
    228/* Rd = Ra - Rn * Rm */
    229#define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
    230	A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB)
    231/* Rd = Rn * Rm */
    232#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
    233
    234/* Logical (shifted register) */
    235#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
    236	aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
    237		A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
    238/* Rd = Rn OP Rm */
    239#define A64_AND(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
    240#define A64_ORR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
    241#define A64_EOR(sf, Rd, Rn, Rm)  A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
    242#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
    243/* Rn & Rm; set condition flags */
    244#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
    245/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
    246#define A64_MVN(sf, Rd, Rm)  \
    247	A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
    248
    249/* Logical (immediate) */
    250#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
    251	u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
    252	aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
    253		A64_VARIANT(sf), Rn, Rd, imm64); \
    254})
    255/* Rd = Rn OP imm */
    256#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
    257#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
    258#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
    259#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
    260/* Rn & imm; set condition flags */
    261#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
    262
    263/* HINTs */
    264#define A64_HINT(x) aarch64_insn_gen_hint(x)
    265
    266#define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
    267#define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
    268
    269/* BTI */
    270#define A64_BTI_C  A64_HINT(AARCH64_INSN_HINT_BTIC)
    271#define A64_BTI_J  A64_HINT(AARCH64_INSN_HINT_BTIJ)
    272#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
    273
    274/* DMB */
    275#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
    276
    277#endif /* _BPF_JIT_H */