summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-05-31 19:29:48 -0400
committerDavid S. Miller <davem@davemloft.net>2017-05-31 19:29:48 -0400
commit6c21a2a65ded27ce49847339efe4e1373a2319e5 (patch)
tree7578aae05af40cd063fa3cd8679effc13a28863b /include
parentd2e0ef493ad953048bdf562b06cc4330b3fd9fb7 (diff)
parent2960ae48c4636778761610dd49187691c3774465 (diff)
downloadcachepc-linux-6c21a2a65ded27ce49847339efe4e1373a2319e5.tar.gz
cachepc-linux-6c21a2a65ded27ce49847339efe4e1373a2319e5.zip
Merge branch 'bpf-stack-tracker'
Alexei Starovoitov says: ==================== bpf: stack depth tracking Introduce tracking of bpf program stack depth in the verifier and use that info to reduce bpf program stack consumption in the interpreter and x64 JIT. Other JITs can take advantage of it as well in the future. Most of the programs consume very little stack, so it's good optimization in general and it's the first step toward bpf to bpf function calls. Also use internal opcode for bpf_tail_call() marking to make clear that jmp|call|x opcode is not uapi and may be used for actual indirect call opcode in the future. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/filter.h3
2 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6bb38d76faf4..fcc80ca11045 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -171,6 +171,7 @@ struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 stack_depth;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_verifier_ops *ops;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 62d948f80730..a20ba40fcb73 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,9 @@ struct bpf_prog_aux;
#define BPF_REG_AX MAX_BPF_REG
#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+/* unused opcode to mark special call to bpf_tail_call() helper */
+#define BPF_TAIL_CALL 0xf0
+
/* As per nm, we expose JITed images as text (code) section for
* kallsyms. That way, tools like perf can find it to match
* addresses.