summaryrefslogtreecommitdiffstats
path: root/tools/include/uapi/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'tools/include/uapi/linux/bpf.h')
-rw-r--r--tools/include/uapi/linux/bpf.h26
1 files changed, 25 insertions, 1 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 791f31dd0abe..3e9785f1064a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1629,7 +1629,7 @@ union bpf_attr {
* u32 bpf_get_smp_processor_id(void)
* Description
* Get the SMP (symmetric multiprocessing) processor id. Note that
- * all programs run with preemption disabled, which means that the
+ * all programs run with migration disabled, which means that the
* SMP processor id is stable during all the execution of the
* program.
* Return
@@ -4877,6 +4877,27 @@ union bpf_attr {
* Get the struct pt_regs associated with **task**.
* Return
* A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5055,6 +5076,7 @@ union bpf_attr {
FN(get_func_ip), \
FN(get_attach_cookie), \
FN(task_pt_regs), \
+ FN(get_branch_snapshot), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -5284,6 +5306,8 @@ struct __sk_buff {
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
__u32 gso_size;
+ __u32 :32; /* Padding, future use. */
+ __u64 hwtstamp;
};
struct bpf_tunnel_key {