diff options
| author | David S. Miller <davem@davemloft.net> | 2014-05-12 00:25:51 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-05-12 00:25:51 -0400 |
| commit | bb399fbd223f478988ede5e6f2dfcc6750bf1f05 (patch) | |
| tree | aa3f671c1be28e93a6951e51fc669fb32520d89b /include/linux | |
| parent | 05ab2dae650e09add1c5295392b5516704c03a4b (diff) | |
| parent | 9def624afdf2a8122eed5f2beec7448513c9a703 (diff) | |
| download | cachepc-linux-bb399fbd223f478988ede5e6f2dfcc6750bf1f05.tar.gz cachepc-linux-bb399fbd223f478988ede5e6f2dfcc6750bf1f05.zip | |
Merge branch 'filter-next'
Alexei Starovoitov says:
====================
BPF testsuite and cleanup
This patchset adds BPF testsuite and improves readability of classic
to internal BPF converter.
The testsuite helped to find 'negative offset bug' in x64 JIT that was
fixed by commit fdfaf64e ("x86: bpf_jit: support negative offsets")
It can be very useful for classic and internal JIT compiler developers.
Also it serves as performance benchmark.
x86_64/i386 pass all tests with and without JIT. arm32 JIT is failing
negative offset tests which are unsupported.
Internal BPF tests are much larger than classic tests to cover different
combinations of registers. Negative tests check correctness of classic
BPF verifier which must reject them.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/filter.h | 51 |
1 files changed, 51 insertions, 0 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h index ed1efab10b8f..4457b383961c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -79,6 +79,57 @@ enum { /* BPF program can access up to 512 bytes of stack space. */ #define MAX_BPF_STACK 512 +/* bpf_add|sub|...: a += x, bpf_mov: a = x */ +#define BPF_ALU64_REG(op, a, x) \ + ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_X, a, x, 0, 0}) +#define BPF_ALU32_REG(op, a, x) \ + ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_X, a, x, 0, 0}) + +/* bpf_add|sub|...: a += imm, bpf_mov: a = imm */ +#define BPF_ALU64_IMM(op, a, imm) \ + ((struct sock_filter_int) {BPF_ALU64|BPF_OP(op)|BPF_K, a, 0, 0, imm}) +#define BPF_ALU32_IMM(op, a, imm) \ + ((struct sock_filter_int) {BPF_ALU|BPF_OP(op)|BPF_K, a, 0, 0, imm}) + +/* R0 = *(uint *) (skb->data + off) */ +#define BPF_LD_ABS(size, off) \ + ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_ABS, 0, 0, 0, off}) + +/* R0 = *(uint *) (skb->data + x + off) */ +#define BPF_LD_IND(size, x, off) \ + ((struct sock_filter_int) {BPF_LD|BPF_SIZE(size)|BPF_IND, 0, x, 0, off}) + +/* a = *(uint *) (x + off) */ +#define BPF_LDX_MEM(sz, a, x, off) \ + ((struct sock_filter_int) {BPF_LDX|BPF_SIZE(sz)|BPF_MEM, a, x, off, 0}) + +/* if (a 'op' x) goto pc+off */ +#define BPF_JMP_REG(op, a, x, off) \ + ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_X, a, x, off, 0}) + +/* if (a 'op' imm) goto pc+off */ +#define BPF_JMP_IMM(op, a, imm, off) \ + ((struct sock_filter_int) {BPF_JMP|BPF_OP(op)|BPF_K, a, 0, off, imm}) + +#define BPF_EXIT_INSN() \ + ((struct sock_filter_int) {BPF_JMP|BPF_EXIT, 0, 0, 0, 0}) + +static inline int size_to_bpf(int size) +{ + switch (size) { + case 1: + return BPF_B; + case 2: + return BPF_H; + case 4: + return BPF_W; + case 8: + return BPF_DW; + default: + return -EINVAL; + } +} + /* Macro to invoke filter function. */ #define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) |
