cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atomic_cmpxchg.c (6945B)


      1{
      2	"atomic compare-and-exchange smoketest - 64bit",
      3	.insns = {
      4		/* val = 3; */
      5		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
      6		/* old = atomic_cmpxchg(&val, 2, 4); */
      7		BPF_MOV64_IMM(BPF_REG_1, 4),
      8		BPF_MOV64_IMM(BPF_REG_0, 2),
      9		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
     10		/* if (old != 3) exit(2); */
     11		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     12		BPF_MOV64_IMM(BPF_REG_0, 2),
     13		BPF_EXIT_INSN(),
     14		/* if (val != 3) exit(3); */
     15		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
     16		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     17		BPF_MOV64_IMM(BPF_REG_0, 3),
     18		BPF_EXIT_INSN(),
     19		/* old = atomic_cmpxchg(&val, 3, 4); */
     20		BPF_MOV64_IMM(BPF_REG_1, 4),
     21		BPF_MOV64_IMM(BPF_REG_0, 3),
     22		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
     23		/* if (old != 3) exit(4); */
     24		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     25		BPF_MOV64_IMM(BPF_REG_0, 4),
     26		BPF_EXIT_INSN(),
     27		/* if (val != 4) exit(5); */
     28		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
     29		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
     30		BPF_MOV64_IMM(BPF_REG_0, 5),
     31		BPF_EXIT_INSN(),
     32		/* exit(0); */
     33		BPF_MOV64_IMM(BPF_REG_0, 0),
     34		BPF_EXIT_INSN(),
     35	},
     36	.result = ACCEPT,
     37},
     38{
     39	"atomic compare-and-exchange smoketest - 32bit",
     40	.insns = {
     41		/* val = 3; */
     42		BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
     43		/* old = atomic_cmpxchg(&val, 2, 4); */
     44		BPF_MOV32_IMM(BPF_REG_1, 4),
     45		BPF_MOV32_IMM(BPF_REG_0, 2),
     46		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
     47		/* if (old != 3) exit(2); */
     48		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     49		BPF_MOV32_IMM(BPF_REG_0, 2),
     50		BPF_EXIT_INSN(),
     51		/* if (val != 3) exit(3); */
     52		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
     53		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     54		BPF_MOV32_IMM(BPF_REG_0, 3),
     55		BPF_EXIT_INSN(),
     56		/* old = atomic_cmpxchg(&val, 3, 4); */
     57		BPF_MOV32_IMM(BPF_REG_1, 4),
     58		BPF_MOV32_IMM(BPF_REG_0, 3),
     59		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
     60		/* if (old != 3) exit(4); */
     61		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
     62		BPF_MOV32_IMM(BPF_REG_0, 4),
     63		BPF_EXIT_INSN(),
     64		/* if (val != 4) exit(5); */
     65		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
     66		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
     67		BPF_MOV32_IMM(BPF_REG_0, 5),
     68		BPF_EXIT_INSN(),
     69		/* exit(0); */
     70		BPF_MOV32_IMM(BPF_REG_0, 0),
     71		BPF_EXIT_INSN(),
     72	},
     73	.result = ACCEPT,
     74},
     75{
     76	"Can't use cmpxchg on uninit src reg",
     77	.insns = {
     78		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
     79		BPF_MOV64_IMM(BPF_REG_0, 3),
     80		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
     81		BPF_EXIT_INSN(),
     82	},
     83	.result = REJECT,
     84	.errstr = "!read_ok",
     85},
     86{
     87	"Can't use cmpxchg on uninit memory",
     88	.insns = {
     89		BPF_MOV64_IMM(BPF_REG_0, 3),
     90		BPF_MOV64_IMM(BPF_REG_2, 4),
     91		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
     92		BPF_EXIT_INSN(),
     93	},
     94	.result = REJECT,
     95	.errstr = "invalid read from stack",
     96},
     97{
     98	"BPF_W cmpxchg should zero top 32 bits",
     99	.insns = {
    100		/* r0 = U64_MAX; */
    101		BPF_MOV64_IMM(BPF_REG_0, 0),
    102		BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
    103		/* u64 val = r0; */
    104		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
    105		/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
    106		BPF_MOV32_IMM(BPF_REG_1, 1),
    107		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
    108		/* r1 = 0x00000000FFFFFFFFull; */
    109		BPF_MOV64_IMM(BPF_REG_1, 1),
    110		BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
    111		BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
    112		/* if (r0 != r1) exit(1); */
    113		BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
    114		BPF_MOV32_IMM(BPF_REG_0, 1),
    115		BPF_EXIT_INSN(),
    116		/* exit(0); */
    117		BPF_MOV32_IMM(BPF_REG_0, 0),
    118		BPF_EXIT_INSN(),
    119	},
    120	.result = ACCEPT,
    121},
    122{
    123	"Dest pointer in r0 - fail",
    124	.insns = {
    125		/* val = 0; */
    126		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
    127		/* r0 = &val */
    128		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
    129		/* r0 = atomic_cmpxchg(&val, r0, 1); */
    130		BPF_MOV64_IMM(BPF_REG_1, 1),
    131		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
    132		/* if (r0 != 0) exit(1); */
    133		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
    134		BPF_MOV64_IMM(BPF_REG_0, 1),
    135		BPF_EXIT_INSN(),
    136		/* exit(0); */
    137		BPF_MOV64_IMM(BPF_REG_0, 0),
    138		BPF_EXIT_INSN(),
    139	},
    140	.result = ACCEPT,
    141	.result_unpriv = REJECT,
    142	.errstr_unpriv = "R0 leaks addr into mem",
    143},
    144{
    145	"Dest pointer in r0 - succeed",
    146	.insns = {
    147		/* r0 = &val */
    148		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
    149		/* val = r0; */
    150		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
    151		/* r0 = atomic_cmpxchg(&val, r0, 0); */
    152		BPF_MOV64_IMM(BPF_REG_1, 0),
    153		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
    154		/* r1 = *r0 */
    155		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
    156		/* exit(0); */
    157		BPF_MOV64_IMM(BPF_REG_0, 0),
    158		BPF_EXIT_INSN(),
    159	},
    160	.result = ACCEPT,
    161	.result_unpriv = REJECT,
    162	.errstr_unpriv = "R0 leaks addr into mem",
    163},
    164{
    165	"Dest pointer in r0 - succeed, check 2",
    166	.insns = {
    167		/* r0 = &val */
    168		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
    169		/* val = r0; */
    170		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
    171		/* r5 = &val */
    172		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
    173		/* r0 = atomic_cmpxchg(&val, r0, r5); */
    174		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
    175		/* r1 = *r0 */
    176		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
    177		/* exit(0); */
    178		BPF_MOV64_IMM(BPF_REG_0, 0),
    179		BPF_EXIT_INSN(),
    180	},
    181	.result = ACCEPT,
    182	.result_unpriv = REJECT,
    183	.errstr_unpriv = "R0 leaks addr into mem",
    184},
    185{
    186	"Dest pointer in r0 - succeed, check 3",
    187	.insns = {
    188		/* r0 = &val */
    189		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
    190		/* val = r0; */
    191		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
    192		/* r5 = &val */
    193		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
    194		/* r0 = atomic_cmpxchg(&val, r0, r5); */
    195		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
    196		/* exit(0); */
    197		BPF_MOV64_IMM(BPF_REG_0, 0),
    198		BPF_EXIT_INSN(),
    199	},
    200	.result = REJECT,
    201	.errstr = "invalid size of register fill",
    202	.errstr_unpriv = "R0 leaks addr into mem",
    203},
    204{
    205	"Dest pointer in r0 - succeed, check 4",
    206	.insns = {
    207		/* r0 = &val */
    208		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
    209		/* val = r0; */
    210		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
    211		/* r5 = &val */
    212		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
    213		/* r0 = atomic_cmpxchg(&val, r0, r5); */
    214		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
    215		/* r1 = *r10 */
    216		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
    217		/* exit(0); */
    218		BPF_MOV64_IMM(BPF_REG_0, 0),
    219		BPF_EXIT_INSN(),
    220	},
    221	.result = ACCEPT,
    222	.result_unpriv = REJECT,
    223	.errstr_unpriv = "R10 partial copy of pointer",
    224},
    225{
    226	"Dest pointer in r0 - succeed, check 5",
    227	.insns = {
    228		/* r0 = &val */
    229		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
    230		/* val = r0; */
    231		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
    232		/* r5 = &val */
    233		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
    234		/* r0 = atomic_cmpxchg(&val, r0, r5); */
    235		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
    236		/* r1 = *r0 */
    237		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
    238		/* exit(0); */
    239		BPF_MOV64_IMM(BPF_REG_0, 0),
    240		BPF_EXIT_INSN(),
    241	},
    242	.result = REJECT,
    243	.errstr = "R0 invalid mem access",
    244	.errstr_unpriv = "R10 partial copy of pointer",
    245},