cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

test_run.c (39198B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2017 Facebook
      3 */
      4#include <linux/bpf.h>
      5#include <linux/btf.h>
      6#include <linux/btf_ids.h>
      7#include <linux/slab.h>
      8#include <linux/init.h>
      9#include <linux/vmalloc.h>
     10#include <linux/etherdevice.h>
     11#include <linux/filter.h>
     12#include <linux/rcupdate_trace.h>
     13#include <linux/sched/signal.h>
     14#include <net/bpf_sk_storage.h>
     15#include <net/sock.h>
     16#include <net/tcp.h>
     17#include <net/net_namespace.h>
     18#include <net/page_pool.h>
     19#include <linux/error-injection.h>
     20#include <linux/smp.h>
     21#include <linux/sock_diag.h>
     22#include <net/xdp.h>
     23
     24#define CREATE_TRACE_POINTS
     25#include <trace/events/bpf_test_run.h>
     26
     27struct bpf_test_timer {
     28	enum { NO_PREEMPT, NO_MIGRATE } mode;
     29	u32 i;
     30	u64 time_start, time_spent;
     31};
     32
     33static void bpf_test_timer_enter(struct bpf_test_timer *t)
     34	__acquires(rcu)
     35{
     36	rcu_read_lock();
     37	if (t->mode == NO_PREEMPT)
     38		preempt_disable();
     39	else
     40		migrate_disable();
     41
     42	t->time_start = ktime_get_ns();
     43}
     44
     45static void bpf_test_timer_leave(struct bpf_test_timer *t)
     46	__releases(rcu)
     47{
     48	t->time_start = 0;
     49
     50	if (t->mode == NO_PREEMPT)
     51		preempt_enable();
     52	else
     53		migrate_enable();
     54	rcu_read_unlock();
     55}
     56
     57static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
     58				    u32 repeat, int *err, u32 *duration)
     59	__must_hold(rcu)
     60{
     61	t->i += iterations;
     62	if (t->i >= repeat) {
     63		/* We're done. */
     64		t->time_spent += ktime_get_ns() - t->time_start;
     65		do_div(t->time_spent, t->i);
     66		*duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
     67		*err = 0;
     68		goto reset;
     69	}
     70
     71	if (signal_pending(current)) {
     72		/* During iteration: we've been cancelled, abort. */
     73		*err = -EINTR;
     74		goto reset;
     75	}
     76
     77	if (need_resched()) {
     78		/* During iteration: we need to reschedule between runs. */
     79		t->time_spent += ktime_get_ns() - t->time_start;
     80		bpf_test_timer_leave(t);
     81		cond_resched();
     82		bpf_test_timer_enter(t);
     83	}
     84
     85	/* Do another round. */
     86	return true;
     87
     88reset:
     89	t->i = 0;
     90	return false;
     91}
     92
     93/* We put this struct at the head of each page with a context and frame
     94 * initialised when the page is allocated, so we don't have to do this on each
     95 * repetition of the test run.
     96 */
     97struct xdp_page_head {
     98	struct xdp_buff orig_ctx;
     99	struct xdp_buff ctx;
    100	struct xdp_frame frm;
    101	u8 data[];
    102};
    103
    104struct xdp_test_data {
    105	struct xdp_buff *orig_ctx;
    106	struct xdp_rxq_info rxq;
    107	struct net_device *dev;
    108	struct page_pool *pp;
    109	struct xdp_frame **frames;
    110	struct sk_buff **skbs;
    111	struct xdp_mem_info mem;
    112	u32 batch_size;
    113	u32 frame_cnt;
    114};
    115
    116#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
    117#define TEST_XDP_MAX_BATCH 256
    118
    119static void xdp_test_run_init_page(struct page *page, void *arg)
    120{
    121	struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
    122	struct xdp_buff *new_ctx, *orig_ctx;
    123	u32 headroom = XDP_PACKET_HEADROOM;
    124	struct xdp_test_data *xdp = arg;
    125	size_t frm_len, meta_len;
    126	struct xdp_frame *frm;
    127	void *data;
    128
    129	orig_ctx = xdp->orig_ctx;
    130	frm_len = orig_ctx->data_end - orig_ctx->data_meta;
    131	meta_len = orig_ctx->data - orig_ctx->data_meta;
    132	headroom -= meta_len;
    133
    134	new_ctx = &head->ctx;
    135	frm = &head->frm;
    136	data = &head->data;
    137	memcpy(data + headroom, orig_ctx->data_meta, frm_len);
    138
    139	xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
    140	xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
    141	new_ctx->data = new_ctx->data_meta + meta_len;
    142
    143	xdp_update_frame_from_buff(new_ctx, frm);
    144	frm->mem = new_ctx->rxq->mem;
    145
    146	memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
    147}
    148
    149static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
    150{
    151	struct page_pool *pp;
    152	int err = -ENOMEM;
    153	struct page_pool_params pp_params = {
    154		.order = 0,
    155		.flags = 0,
    156		.pool_size = xdp->batch_size,
    157		.nid = NUMA_NO_NODE,
    158		.init_callback = xdp_test_run_init_page,
    159		.init_arg = xdp,
    160	};
    161
    162	xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
    163	if (!xdp->frames)
    164		return -ENOMEM;
    165
    166	xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
    167	if (!xdp->skbs)
    168		goto err_skbs;
    169
    170	pp = page_pool_create(&pp_params);
    171	if (IS_ERR(pp)) {
    172		err = PTR_ERR(pp);
    173		goto err_pp;
    174	}
    175
    176	/* will copy 'mem.id' into pp->xdp_mem_id */
    177	err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
    178	if (err)
    179		goto err_mmodel;
    180
    181	xdp->pp = pp;
    182
    183	/* We create a 'fake' RXQ referencing the original dev, but with an
    184	 * xdp_mem_info pointing to our page_pool
    185	 */
    186	xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
    187	xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
    188	xdp->rxq.mem.id = pp->xdp_mem_id;
    189	xdp->dev = orig_ctx->rxq->dev;
    190	xdp->orig_ctx = orig_ctx;
    191
    192	return 0;
    193
    194err_mmodel:
    195	page_pool_destroy(pp);
    196err_pp:
    197	kvfree(xdp->skbs);
    198err_skbs:
    199	kvfree(xdp->frames);
    200	return err;
    201}
    202
    203static void xdp_test_run_teardown(struct xdp_test_data *xdp)
    204{
    205	xdp_unreg_mem_model(&xdp->mem);
    206	page_pool_destroy(xdp->pp);
    207	kfree(xdp->frames);
    208	kfree(xdp->skbs);
    209}
    210
    211static bool ctx_was_changed(struct xdp_page_head *head)
    212{
    213	return head->orig_ctx.data != head->ctx.data ||
    214		head->orig_ctx.data_meta != head->ctx.data_meta ||
    215		head->orig_ctx.data_end != head->ctx.data_end;
    216}
    217
    218static void reset_ctx(struct xdp_page_head *head)
    219{
    220	if (likely(!ctx_was_changed(head)))
    221		return;
    222
    223	head->ctx.data = head->orig_ctx.data;
    224	head->ctx.data_meta = head->orig_ctx.data_meta;
    225	head->ctx.data_end = head->orig_ctx.data_end;
    226	xdp_update_frame_from_buff(&head->ctx, &head->frm);
    227}
    228
    229static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
    230			   struct sk_buff **skbs,
    231			   struct net_device *dev)
    232{
    233	gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
    234	int i, n;
    235	LIST_HEAD(list);
    236
    237	n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
    238	if (unlikely(n == 0)) {
    239		for (i = 0; i < nframes; i++)
    240			xdp_return_frame(frames[i]);
    241		return -ENOMEM;
    242	}
    243
    244	for (i = 0; i < nframes; i++) {
    245		struct xdp_frame *xdpf = frames[i];
    246		struct sk_buff *skb = skbs[i];
    247
    248		skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
    249		if (!skb) {
    250			xdp_return_frame(xdpf);
    251			continue;
    252		}
    253
    254		list_add_tail(&skb->list, &list);
    255	}
    256	netif_receive_skb_list(&list);
    257
    258	return 0;
    259}
    260
    261static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
    262			      u32 repeat)
    263{
    264	struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
    265	int err = 0, act, ret, i, nframes = 0, batch_sz;
    266	struct xdp_frame **frames = xdp->frames;
    267	struct xdp_page_head *head;
    268	struct xdp_frame *frm;
    269	bool redirect = false;
    270	struct xdp_buff *ctx;
    271	struct page *page;
    272
    273	batch_sz = min_t(u32, repeat, xdp->batch_size);
    274
    275	local_bh_disable();
    276	xdp_set_return_frame_no_direct();
    277
    278	for (i = 0; i < batch_sz; i++) {
    279		page = page_pool_dev_alloc_pages(xdp->pp);
    280		if (!page) {
    281			err = -ENOMEM;
    282			goto out;
    283		}
    284
    285		head = phys_to_virt(page_to_phys(page));
    286		reset_ctx(head);
    287		ctx = &head->ctx;
    288		frm = &head->frm;
    289		xdp->frame_cnt++;
    290
    291		act = bpf_prog_run_xdp(prog, ctx);
    292
    293		/* if program changed pkt bounds we need to update the xdp_frame */
    294		if (unlikely(ctx_was_changed(head))) {
    295			ret = xdp_update_frame_from_buff(ctx, frm);
    296			if (ret) {
    297				xdp_return_buff(ctx);
    298				continue;
    299			}
    300		}
    301
    302		switch (act) {
    303		case XDP_TX:
    304			/* we can't do a real XDP_TX since we're not in the
    305			 * driver, so turn it into a REDIRECT back to the same
    306			 * index
    307			 */
    308			ri->tgt_index = xdp->dev->ifindex;
    309			ri->map_id = INT_MAX;
    310			ri->map_type = BPF_MAP_TYPE_UNSPEC;
    311			fallthrough;
    312		case XDP_REDIRECT:
    313			redirect = true;
    314			ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
    315			if (ret)
    316				xdp_return_buff(ctx);
    317			break;
    318		case XDP_PASS:
    319			frames[nframes++] = frm;
    320			break;
    321		default:
    322			bpf_warn_invalid_xdp_action(NULL, prog, act);
    323			fallthrough;
    324		case XDP_DROP:
    325			xdp_return_buff(ctx);
    326			break;
    327		}
    328	}
    329
    330out:
    331	if (redirect)
    332		xdp_do_flush();
    333	if (nframes) {
    334		ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
    335		if (ret)
    336			err = ret;
    337	}
    338
    339	xdp_clear_return_frame_no_direct();
    340	local_bh_enable();
    341	return err;
    342}
    343
    344static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
    345				 u32 repeat, u32 batch_size, u32 *time)
    346
    347{
    348	struct xdp_test_data xdp = { .batch_size = batch_size };
    349	struct bpf_test_timer t = { .mode = NO_MIGRATE };
    350	int ret;
    351
    352	if (!repeat)
    353		repeat = 1;
    354
    355	ret = xdp_test_run_setup(&xdp, ctx);
    356	if (ret)
    357		return ret;
    358
    359	bpf_test_timer_enter(&t);
    360	do {
    361		xdp.frame_cnt = 0;
    362		ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
    363		if (unlikely(ret < 0))
    364			break;
    365	} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
    366	bpf_test_timer_leave(&t);
    367
    368	xdp_test_run_teardown(&xdp);
    369	return ret;
    370}
    371
    372static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
    373			u32 *retval, u32 *time, bool xdp)
    374{
    375	struct bpf_prog_array_item item = {.prog = prog};
    376	struct bpf_run_ctx *old_ctx;
    377	struct bpf_cg_run_ctx run_ctx;
    378	struct bpf_test_timer t = { NO_MIGRATE };
    379	enum bpf_cgroup_storage_type stype;
    380	int ret;
    381
    382	for_each_cgroup_storage_type(stype) {
    383		item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
    384		if (IS_ERR(item.cgroup_storage[stype])) {
    385			item.cgroup_storage[stype] = NULL;
    386			for_each_cgroup_storage_type(stype)
    387				bpf_cgroup_storage_free(item.cgroup_storage[stype]);
    388			return -ENOMEM;
    389		}
    390	}
    391
    392	if (!repeat)
    393		repeat = 1;
    394
    395	bpf_test_timer_enter(&t);
    396	old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
    397	do {
    398		run_ctx.prog_item = &item;
    399		if (xdp)
    400			*retval = bpf_prog_run_xdp(prog, ctx);
    401		else
    402			*retval = bpf_prog_run(prog, ctx);
    403	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
    404	bpf_reset_run_ctx(old_ctx);
    405	bpf_test_timer_leave(&t);
    406
    407	for_each_cgroup_storage_type(stype)
    408		bpf_cgroup_storage_free(item.cgroup_storage[stype]);
    409
    410	return ret;
    411}
    412
    413static int bpf_test_finish(const union bpf_attr *kattr,
    414			   union bpf_attr __user *uattr, const void *data,
    415			   struct skb_shared_info *sinfo, u32 size,
    416			   u32 retval, u32 duration)
    417{
    418	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
    419	int err = -EFAULT;
    420	u32 copy_size = size;
    421
    422	/* Clamp copy if the user has provided a size hint, but copy the full
    423	 * buffer if not to retain old behaviour.
    424	 */
    425	if (kattr->test.data_size_out &&
    426	    copy_size > kattr->test.data_size_out) {
    427		copy_size = kattr->test.data_size_out;
    428		err = -ENOSPC;
    429	}
    430
    431	if (data_out) {
    432		int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
    433
    434		if (len < 0) {
    435			err = -ENOSPC;
    436			goto out;
    437		}
    438
    439		if (copy_to_user(data_out, data, len))
    440			goto out;
    441
    442		if (sinfo) {
    443			int i, offset = len;
    444			u32 data_len;
    445
    446			for (i = 0; i < sinfo->nr_frags; i++) {
    447				skb_frag_t *frag = &sinfo->frags[i];
    448
    449				if (offset >= copy_size) {
    450					err = -ENOSPC;
    451					break;
    452				}
    453
    454				data_len = min_t(u32, copy_size - offset,
    455						 skb_frag_size(frag));
    456
    457				if (copy_to_user(data_out + offset,
    458						 skb_frag_address(frag),
    459						 data_len))
    460					goto out;
    461
    462				offset += data_len;
    463			}
    464		}
    465	}
    466
    467	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
    468		goto out;
    469	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
    470		goto out;
    471	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
    472		goto out;
    473	if (err != -ENOSPC)
    474		err = 0;
    475out:
    476	trace_bpf_test_finish(&err);
    477	return err;
    478}
    479
    480/* Integer types of various sizes and pointer combinations cover variety of
    481 * architecture dependent calling conventions. 7+ can be supported in the
    482 * future.
    483 */
    484__diag_push();
    485__diag_ignore_all("-Wmissing-prototypes",
    486		  "Global functions as their definitions will be in vmlinux BTF");
    487int noinline bpf_fentry_test1(int a)
    488{
    489	return a + 1;
    490}
    491EXPORT_SYMBOL_GPL(bpf_fentry_test1);
    492ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO);
    493
    494int noinline bpf_fentry_test2(int a, u64 b)
    495{
    496	return a + b;
    497}
    498
    499int noinline bpf_fentry_test3(char a, int b, u64 c)
    500{
    501	return a + b + c;
    502}
    503
    504int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
    505{
    506	return (long)a + b + c + d;
    507}
    508
    509int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
    510{
    511	return a + (long)b + c + d + e;
    512}
    513
    514int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
    515{
    516	return a + (long)b + c + d + (long)e + f;
    517}
    518
    519struct bpf_fentry_test_t {
    520	struct bpf_fentry_test_t *a;
    521};
    522
    523int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
    524{
    525	return (long)arg;
    526}
    527
    528int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
    529{
    530	return (long)arg->a;
    531}
    532
    533int noinline bpf_modify_return_test(int a, int *b)
    534{
    535	*b += 1;
    536	return a + *b;
    537}
    538
    539u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
    540{
    541	return a + b + c + d;
    542}
    543
    544int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
    545{
    546	return a + b;
    547}
    548
    549struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
    550{
    551	return sk;
    552}
    553
    554struct prog_test_member1 {
    555	int a;
    556};
    557
    558struct prog_test_member {
    559	struct prog_test_member1 m;
    560	int c;
    561};
    562
    563struct prog_test_ref_kfunc {
    564	int a;
    565	int b;
    566	struct prog_test_member memb;
    567	struct prog_test_ref_kfunc *next;
    568	refcount_t cnt;
    569};
    570
    571static struct prog_test_ref_kfunc prog_test_struct = {
    572	.a = 42,
    573	.b = 108,
    574	.next = &prog_test_struct,
    575	.cnt = REFCOUNT_INIT(1),
    576};
    577
    578noinline struct prog_test_ref_kfunc *
    579bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
    580{
    581	refcount_inc(&prog_test_struct.cnt);
    582	return &prog_test_struct;
    583}
    584
    585noinline struct prog_test_member *
    586bpf_kfunc_call_memb_acquire(void)
    587{
    588	WARN_ON_ONCE(1);
    589	return NULL;
    590}
    591
    592noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
    593{
    594	if (!p)
    595		return;
    596
    597	refcount_dec(&p->cnt);
    598}
    599
    600noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p)
    601{
    602}
    603
    604noinline void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
    605{
    606	WARN_ON_ONCE(1);
    607}
    608
    609noinline struct prog_test_ref_kfunc *
    610bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b)
    611{
    612	struct prog_test_ref_kfunc *p = READ_ONCE(*pp);
    613
    614	if (!p)
    615		return NULL;
    616	refcount_inc(&p->cnt);
    617	return p;
    618}
    619
    620struct prog_test_pass1 {
    621	int x0;
    622	struct {
    623		int x1;
    624		struct {
    625			int x2;
    626			struct {
    627				int x3;
    628			};
    629		};
    630	};
    631};
    632
    633struct prog_test_pass2 {
    634	int len;
    635	short arr1[4];
    636	struct {
    637		char arr2[4];
    638		unsigned long arr3[8];
    639	} x;
    640};
    641
    642struct prog_test_fail1 {
    643	void *p;
    644	int x;
    645};
    646
    647struct prog_test_fail2 {
    648	int x8;
    649	struct prog_test_pass1 x;
    650};
    651
    652struct prog_test_fail3 {
    653	int len;
    654	char arr1[2];
    655	char arr2[];
    656};
    657
    658noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
    659{
    660}
    661
    662noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
    663{
    664}
    665
    666noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
    667{
    668}
    669
    670noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
    671{
    672}
    673
    674noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
    675{
    676}
    677
    678noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
    679{
    680}
    681
    682noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
    683{
    684}
    685
    686noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
    687{
    688}
    689
    690noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
    691{
    692}
    693
    694__diag_pop();
    695
    696ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
    697
    698BTF_SET_START(test_sk_check_kfunc_ids)
    699BTF_ID(func, bpf_kfunc_call_test1)
    700BTF_ID(func, bpf_kfunc_call_test2)
    701BTF_ID(func, bpf_kfunc_call_test3)
    702BTF_ID(func, bpf_kfunc_call_test_acquire)
    703BTF_ID(func, bpf_kfunc_call_memb_acquire)
    704BTF_ID(func, bpf_kfunc_call_test_release)
    705BTF_ID(func, bpf_kfunc_call_memb_release)
    706BTF_ID(func, bpf_kfunc_call_memb1_release)
    707BTF_ID(func, bpf_kfunc_call_test_kptr_get)
    708BTF_ID(func, bpf_kfunc_call_test_pass_ctx)
    709BTF_ID(func, bpf_kfunc_call_test_pass1)
    710BTF_ID(func, bpf_kfunc_call_test_pass2)
    711BTF_ID(func, bpf_kfunc_call_test_fail1)
    712BTF_ID(func, bpf_kfunc_call_test_fail2)
    713BTF_ID(func, bpf_kfunc_call_test_fail3)
    714BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1)
    715BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1)
    716BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2)
    717BTF_SET_END(test_sk_check_kfunc_ids)
    718
    719BTF_SET_START(test_sk_acquire_kfunc_ids)
    720BTF_ID(func, bpf_kfunc_call_test_acquire)
    721BTF_ID(func, bpf_kfunc_call_memb_acquire)
    722BTF_ID(func, bpf_kfunc_call_test_kptr_get)
    723BTF_SET_END(test_sk_acquire_kfunc_ids)
    724
    725BTF_SET_START(test_sk_release_kfunc_ids)
    726BTF_ID(func, bpf_kfunc_call_test_release)
    727BTF_ID(func, bpf_kfunc_call_memb_release)
    728BTF_ID(func, bpf_kfunc_call_memb1_release)
    729BTF_SET_END(test_sk_release_kfunc_ids)
    730
    731BTF_SET_START(test_sk_ret_null_kfunc_ids)
    732BTF_ID(func, bpf_kfunc_call_test_acquire)
    733BTF_ID(func, bpf_kfunc_call_memb_acquire)
    734BTF_ID(func, bpf_kfunc_call_test_kptr_get)
    735BTF_SET_END(test_sk_ret_null_kfunc_ids)
    736
    737BTF_SET_START(test_sk_kptr_acquire_kfunc_ids)
    738BTF_ID(func, bpf_kfunc_call_test_kptr_get)
    739BTF_SET_END(test_sk_kptr_acquire_kfunc_ids)
    740
    741static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
    742			   u32 size, u32 headroom, u32 tailroom)
    743{
    744	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
    745	void *data;
    746
    747	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
    748		return ERR_PTR(-EINVAL);
    749
    750	if (user_size > size)
    751		return ERR_PTR(-EMSGSIZE);
    752
    753	data = kzalloc(size + headroom + tailroom, GFP_USER);
    754	if (!data)
    755		return ERR_PTR(-ENOMEM);
    756
    757	if (copy_from_user(data + headroom, data_in, user_size)) {
    758		kfree(data);
    759		return ERR_PTR(-EFAULT);
    760	}
    761
    762	return data;
    763}
    764
    765int bpf_prog_test_run_tracing(struct bpf_prog *prog,
    766			      const union bpf_attr *kattr,
    767			      union bpf_attr __user *uattr)
    768{
    769	struct bpf_fentry_test_t arg = {};
    770	u16 side_effect = 0, ret = 0;
    771	int b = 2, err = -EFAULT;
    772	u32 retval = 0;
    773
    774	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
    775		return -EINVAL;
    776
    777	switch (prog->expected_attach_type) {
    778	case BPF_TRACE_FENTRY:
    779	case BPF_TRACE_FEXIT:
    780		if (bpf_fentry_test1(1) != 2 ||
    781		    bpf_fentry_test2(2, 3) != 5 ||
    782		    bpf_fentry_test3(4, 5, 6) != 15 ||
    783		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
    784		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
    785		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
    786		    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
    787		    bpf_fentry_test8(&arg) != 0)
    788			goto out;
    789		break;
    790	case BPF_MODIFY_RETURN:
    791		ret = bpf_modify_return_test(1, &b);
    792		if (b != 2)
    793			side_effect = 1;
    794		break;
    795	default:
    796		goto out;
    797	}
    798
    799	retval = ((u32)side_effect << 16) | ret;
    800	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
    801		goto out;
    802
    803	err = 0;
    804out:
    805	trace_bpf_test_finish(&err);
    806	return err;
    807}
    808
    809struct bpf_raw_tp_test_run_info {
    810	struct bpf_prog *prog;
    811	void *ctx;
    812	u32 retval;
    813};
    814
    815static void
    816__bpf_prog_test_run_raw_tp(void *data)
    817{
    818	struct bpf_raw_tp_test_run_info *info = data;
    819
    820	rcu_read_lock();
    821	info->retval = bpf_prog_run(info->prog, info->ctx);
    822	rcu_read_unlock();
    823}
    824
    825int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
    826			     const union bpf_attr *kattr,
    827			     union bpf_attr __user *uattr)
    828{
    829	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
    830	__u32 ctx_size_in = kattr->test.ctx_size_in;
    831	struct bpf_raw_tp_test_run_info info;
    832	int cpu = kattr->test.cpu, err = 0;
    833	int current_cpu;
    834
    835	/* doesn't support data_in/out, ctx_out, duration, or repeat */
    836	if (kattr->test.data_in || kattr->test.data_out ||
    837	    kattr->test.ctx_out || kattr->test.duration ||
    838	    kattr->test.repeat || kattr->test.batch_size)
    839		return -EINVAL;
    840
    841	if (ctx_size_in < prog->aux->max_ctx_offset ||
    842	    ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
    843		return -EINVAL;
    844
    845	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
    846		return -EINVAL;
    847
    848	if (ctx_size_in) {
    849		info.ctx = memdup_user(ctx_in, ctx_size_in);
    850		if (IS_ERR(info.ctx))
    851			return PTR_ERR(info.ctx);
    852	} else {
    853		info.ctx = NULL;
    854	}
    855
    856	info.prog = prog;
    857
    858	current_cpu = get_cpu();
    859	if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
    860	    cpu == current_cpu) {
    861		__bpf_prog_test_run_raw_tp(&info);
    862	} else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
    863		/* smp_call_function_single() also checks cpu_online()
    864		 * after csd_lock(). However, since cpu is from user
    865		 * space, let's do an extra quick check to filter out
    866		 * invalid value before smp_call_function_single().
    867		 */
    868		err = -ENXIO;
    869	} else {
    870		err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
    871					       &info, 1);
    872	}
    873	put_cpu();
    874
    875	if (!err &&
    876	    copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
    877		err = -EFAULT;
    878
    879	kfree(info.ctx);
    880	return err;
    881}
    882
    883static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
    884{
    885	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
    886	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
    887	u32 size = kattr->test.ctx_size_in;
    888	void *data;
    889	int err;
    890
    891	if (!data_in && !data_out)
    892		return NULL;
    893
    894	data = kzalloc(max_size, GFP_USER);
    895	if (!data)
    896		return ERR_PTR(-ENOMEM);
    897
    898	if (data_in) {
    899		err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
    900		if (err) {
    901			kfree(data);
    902			return ERR_PTR(err);
    903		}
    904
    905		size = min_t(u32, max_size, size);
    906		if (copy_from_user(data, data_in, size)) {
    907			kfree(data);
    908			return ERR_PTR(-EFAULT);
    909		}
    910	}
    911	return data;
    912}
    913
    914static int bpf_ctx_finish(const union bpf_attr *kattr,
    915			  union bpf_attr __user *uattr, const void *data,
    916			  u32 size)
    917{
    918	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
    919	int err = -EFAULT;
    920	u32 copy_size = size;
    921
    922	if (!data || !data_out)
    923		return 0;
    924
    925	if (copy_size > kattr->test.ctx_size_out) {
    926		copy_size = kattr->test.ctx_size_out;
    927		err = -ENOSPC;
    928	}
    929
    930	if (copy_to_user(data_out, data, copy_size))
    931		goto out;
    932	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
    933		goto out;
    934	if (err != -ENOSPC)
    935		err = 0;
    936out:
    937	return err;
    938}
    939
    940/**
    941 * range_is_zero - test whether buffer is initialized
    942 * @buf: buffer to check
    943 * @from: check from this position
    944 * @to: check up until (excluding) this position
    945 *
    946 * This function returns true if the there is a non-zero byte
    947 * in the buf in the range [from,to).
    948 */
    949static inline bool range_is_zero(void *buf, size_t from, size_t to)
    950{
    951	return !memchr_inv((u8 *)buf + from, 0, to - from);
    952}
    953
    954static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
    955{
    956	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
    957
    958	if (!__skb)
    959		return 0;
    960
    961	/* make sure the fields we don't use are zeroed */
    962	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
    963		return -EINVAL;
    964
    965	/* mark is allowed */
    966
    967	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
    968			   offsetof(struct __sk_buff, priority)))
    969		return -EINVAL;
    970
    971	/* priority is allowed */
    972	/* ingress_ifindex is allowed */
    973	/* ifindex is allowed */
    974
    975	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
    976			   offsetof(struct __sk_buff, cb)))
    977		return -EINVAL;
    978
    979	/* cb is allowed */
    980
    981	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
    982			   offsetof(struct __sk_buff, tstamp)))
    983		return -EINVAL;
    984
    985	/* tstamp is allowed */
    986	/* wire_len is allowed */
    987	/* gso_segs is allowed */
    988
    989	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
    990			   offsetof(struct __sk_buff, gso_size)))
    991		return -EINVAL;
    992
    993	/* gso_size is allowed */
    994
    995	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
    996			   offsetof(struct __sk_buff, hwtstamp)))
    997		return -EINVAL;
    998
    999	/* hwtstamp is allowed */
   1000
   1001	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
   1002			   sizeof(struct __sk_buff)))
   1003		return -EINVAL;
   1004
   1005	skb->mark = __skb->mark;
   1006	skb->priority = __skb->priority;
   1007	skb->skb_iif = __skb->ingress_ifindex;
   1008	skb->tstamp = __skb->tstamp;
   1009	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
   1010
   1011	if (__skb->wire_len == 0) {
   1012		cb->pkt_len = skb->len;
   1013	} else {
   1014		if (__skb->wire_len < skb->len ||
   1015		    __skb->wire_len > GSO_LEGACY_MAX_SIZE)
   1016			return -EINVAL;
   1017		cb->pkt_len = __skb->wire_len;
   1018	}
   1019
   1020	if (__skb->gso_segs > GSO_MAX_SEGS)
   1021		return -EINVAL;
   1022	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
   1023	skb_shinfo(skb)->gso_size = __skb->gso_size;
   1024	skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
   1025
   1026	return 0;
   1027}
   1028
   1029static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
   1030{
   1031	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
   1032
   1033	if (!__skb)
   1034		return;
   1035
   1036	__skb->mark = skb->mark;
   1037	__skb->priority = skb->priority;
   1038	__skb->ingress_ifindex = skb->skb_iif;
   1039	__skb->ifindex = skb->dev->ifindex;
   1040	__skb->tstamp = skb->tstamp;
   1041	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
   1042	__skb->wire_len = cb->pkt_len;
   1043	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
   1044	__skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
   1045}
   1046
   1047static struct proto bpf_dummy_proto = {
   1048	.name   = "bpf_dummy",
   1049	.owner  = THIS_MODULE,
   1050	.obj_size = sizeof(struct sock),
   1051};
   1052
   1053int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
   1054			  union bpf_attr __user *uattr)
   1055{
   1056	bool is_l2 = false, is_direct_pkt_access = false;
   1057	struct net *net = current->nsproxy->net_ns;
   1058	struct net_device *dev = net->loopback_dev;
   1059	u32 size = kattr->test.data_size_in;
   1060	u32 repeat = kattr->test.repeat;
   1061	struct __sk_buff *ctx = NULL;
   1062	u32 retval, duration;
   1063	int hh_len = ETH_HLEN;
   1064	struct sk_buff *skb;
   1065	struct sock *sk;
   1066	void *data;
   1067	int ret;
   1068
   1069	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
   1070		return -EINVAL;
   1071
   1072	data = bpf_test_init(kattr, kattr->test.data_size_in,
   1073			     size, NET_SKB_PAD + NET_IP_ALIGN,
   1074			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
   1075	if (IS_ERR(data))
   1076		return PTR_ERR(data);
   1077
   1078	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
   1079	if (IS_ERR(ctx)) {
   1080		kfree(data);
   1081		return PTR_ERR(ctx);
   1082	}
   1083
   1084	switch (prog->type) {
   1085	case BPF_PROG_TYPE_SCHED_CLS:
   1086	case BPF_PROG_TYPE_SCHED_ACT:
   1087		is_l2 = true;
   1088		fallthrough;
   1089	case BPF_PROG_TYPE_LWT_IN:
   1090	case BPF_PROG_TYPE_LWT_OUT:
   1091	case BPF_PROG_TYPE_LWT_XMIT:
   1092		is_direct_pkt_access = true;
   1093		break;
   1094	default:
   1095		break;
   1096	}
   1097
   1098	sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
   1099	if (!sk) {
   1100		kfree(data);
   1101		kfree(ctx);
   1102		return -ENOMEM;
   1103	}
   1104	sock_init_data(NULL, sk);
   1105
   1106	skb = build_skb(data, 0);
   1107	if (!skb) {
   1108		kfree(data);
   1109		kfree(ctx);
   1110		sk_free(sk);
   1111		return -ENOMEM;
   1112	}
   1113	skb->sk = sk;
   1114
   1115	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
   1116	__skb_put(skb, size);
   1117	if (ctx && ctx->ifindex > 1) {
   1118		dev = dev_get_by_index(net, ctx->ifindex);
   1119		if (!dev) {
   1120			ret = -ENODEV;
   1121			goto out;
   1122		}
   1123	}
   1124	skb->protocol = eth_type_trans(skb, dev);
   1125	skb_reset_network_header(skb);
   1126
   1127	switch (skb->protocol) {
   1128	case htons(ETH_P_IP):
   1129		sk->sk_family = AF_INET;
   1130		if (sizeof(struct iphdr) <= skb_headlen(skb)) {
   1131			sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
   1132			sk->sk_daddr = ip_hdr(skb)->daddr;
   1133		}
   1134		break;
   1135#if IS_ENABLED(CONFIG_IPV6)
   1136	case htons(ETH_P_IPV6):
   1137		sk->sk_family = AF_INET6;
   1138		if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
   1139			sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
   1140			sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
   1141		}
   1142		break;
   1143#endif
   1144	default:
   1145		break;
   1146	}
   1147
   1148	if (is_l2)
   1149		__skb_push(skb, hh_len);
   1150	if (is_direct_pkt_access)
   1151		bpf_compute_data_pointers(skb);
   1152	ret = convert___skb_to_skb(skb, ctx);
   1153	if (ret)
   1154		goto out;
   1155	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
   1156	if (ret)
   1157		goto out;
   1158	if (!is_l2) {
   1159		if (skb_headroom(skb) < hh_len) {
   1160			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
   1161
   1162			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
   1163				ret = -ENOMEM;
   1164				goto out;
   1165			}
   1166		}
   1167		memset(__skb_push(skb, hh_len), 0, hh_len);
   1168	}
   1169	convert_skb_to___skb(skb, ctx);
   1170
   1171	size = skb->len;
   1172	/* bpf program can never convert linear skb to non-linear */
   1173	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
   1174		size = skb_headlen(skb);
   1175	ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
   1176			      duration);
   1177	if (!ret)
   1178		ret = bpf_ctx_finish(kattr, uattr, ctx,
   1179				     sizeof(struct __sk_buff));
   1180out:
   1181	if (dev && dev != net->loopback_dev)
   1182		dev_put(dev);
   1183	kfree_skb(skb);
   1184	sk_free(sk);
   1185	kfree(ctx);
   1186	return ret;
   1187}
   1188
   1189static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
   1190{
   1191	unsigned int ingress_ifindex, rx_queue_index;
   1192	struct netdev_rx_queue *rxqueue;
   1193	struct net_device *device;
   1194
   1195	if (!xdp_md)
   1196		return 0;
   1197
   1198	if (xdp_md->egress_ifindex != 0)
   1199		return -EINVAL;
   1200
   1201	ingress_ifindex = xdp_md->ingress_ifindex;
   1202	rx_queue_index = xdp_md->rx_queue_index;
   1203
   1204	if (!ingress_ifindex && rx_queue_index)
   1205		return -EINVAL;
   1206
   1207	if (ingress_ifindex) {
   1208		device = dev_get_by_index(current->nsproxy->net_ns,
   1209					  ingress_ifindex);
   1210		if (!device)
   1211			return -ENODEV;
   1212
   1213		if (rx_queue_index >= device->real_num_rx_queues)
   1214			goto free_dev;
   1215
   1216		rxqueue = __netif_get_rx_queue(device, rx_queue_index);
   1217
   1218		if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
   1219			goto free_dev;
   1220
   1221		xdp->rxq = &rxqueue->xdp_rxq;
   1222		/* The device is now tracked in the xdp->rxq for later
   1223		 * dev_put()
   1224		 */
   1225	}
   1226
   1227	xdp->data = xdp->data_meta + xdp_md->data;
   1228	return 0;
   1229
   1230free_dev:
   1231	dev_put(device);
   1232	return -EINVAL;
   1233}
   1234
   1235static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
   1236{
   1237	if (!xdp_md)
   1238		return;
   1239
   1240	xdp_md->data = xdp->data - xdp->data_meta;
   1241	xdp_md->data_end = xdp->data_end - xdp->data_meta;
   1242
   1243	if (xdp_md->ingress_ifindex)
   1244		dev_put(xdp->rxq->dev);
   1245}
   1246
   1247int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
   1248			  union bpf_attr __user *uattr)
   1249{
   1250	bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
   1251	u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
   1252	u32 batch_size = kattr->test.batch_size;
   1253	u32 retval = 0, duration, max_data_sz;
   1254	u32 size = kattr->test.data_size_in;
   1255	u32 headroom = XDP_PACKET_HEADROOM;
   1256	u32 repeat = kattr->test.repeat;
   1257	struct netdev_rx_queue *rxqueue;
   1258	struct skb_shared_info *sinfo;
   1259	struct xdp_buff xdp = {};
   1260	int i, ret = -EINVAL;
   1261	struct xdp_md *ctx;
   1262	void *data;
   1263
   1264	if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
   1265	    prog->expected_attach_type == BPF_XDP_CPUMAP)
   1266		return -EINVAL;
   1267
   1268	if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
   1269		return -EINVAL;
   1270
   1271	if (do_live) {
   1272		if (!batch_size)
   1273			batch_size = NAPI_POLL_WEIGHT;
   1274		else if (batch_size > TEST_XDP_MAX_BATCH)
   1275			return -E2BIG;
   1276
   1277		headroom += sizeof(struct xdp_page_head);
   1278	} else if (batch_size) {
   1279		return -EINVAL;
   1280	}
   1281
   1282	ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
   1283	if (IS_ERR(ctx))
   1284		return PTR_ERR(ctx);
   1285
   1286	if (ctx) {
   1287		/* There can't be user provided data before the meta data */
   1288		if (ctx->data_meta || ctx->data_end != size ||
   1289		    ctx->data > ctx->data_end ||
   1290		    unlikely(xdp_metalen_invalid(ctx->data)) ||
   1291		    (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
   1292			goto free_ctx;
   1293		/* Meta data is allocated from the headroom */
   1294		headroom -= ctx->data;
   1295	}
   1296
   1297	max_data_sz = 4096 - headroom - tailroom;
   1298	if (size > max_data_sz) {
   1299		/* disallow live data mode for jumbo frames */
   1300		if (do_live)
   1301			goto free_ctx;
   1302		size = max_data_sz;
   1303	}
   1304
   1305	data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
   1306	if (IS_ERR(data)) {
   1307		ret = PTR_ERR(data);
   1308		goto free_ctx;
   1309	}
   1310
   1311	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
   1312	rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
   1313	xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
   1314	xdp_prepare_buff(&xdp, data, headroom, size, true);
   1315	sinfo = xdp_get_shared_info_from_buff(&xdp);
   1316
   1317	ret = xdp_convert_md_to_buff(ctx, &xdp);
   1318	if (ret)
   1319		goto free_data;
   1320
   1321	if (unlikely(kattr->test.data_size_in > size)) {
   1322		void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
   1323
   1324		while (size < kattr->test.data_size_in) {
   1325			struct page *page;
   1326			skb_frag_t *frag;
   1327			u32 data_len;
   1328
   1329			if (sinfo->nr_frags == MAX_SKB_FRAGS) {
   1330				ret = -ENOMEM;
   1331				goto out;
   1332			}
   1333
   1334			page = alloc_page(GFP_KERNEL);
   1335			if (!page) {
   1336				ret = -ENOMEM;
   1337				goto out;
   1338			}
   1339
   1340			frag = &sinfo->frags[sinfo->nr_frags++];
   1341			__skb_frag_set_page(frag, page);
   1342
   1343			data_len = min_t(u32, kattr->test.data_size_in - size,
   1344					 PAGE_SIZE);
   1345			skb_frag_size_set(frag, data_len);
   1346
   1347			if (copy_from_user(page_address(page), data_in + size,
   1348					   data_len)) {
   1349				ret = -EFAULT;
   1350				goto out;
   1351			}
   1352			sinfo->xdp_frags_size += data_len;
   1353			size += data_len;
   1354		}
   1355		xdp_buff_set_frags_flag(&xdp);
   1356	}
   1357
   1358	if (repeat > 1)
   1359		bpf_prog_change_xdp(NULL, prog);
   1360
   1361	if (do_live)
   1362		ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
   1363	else
   1364		ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
   1365	/* We convert the xdp_buff back to an xdp_md before checking the return
   1366	 * code so the reference count of any held netdevice will be decremented
   1367	 * even if the test run failed.
   1368	 */
   1369	xdp_convert_buff_to_md(&xdp, ctx);
   1370	if (ret)
   1371		goto out;
   1372
   1373	size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
   1374	ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
   1375			      retval, duration);
   1376	if (!ret)
   1377		ret = bpf_ctx_finish(kattr, uattr, ctx,
   1378				     sizeof(struct xdp_md));
   1379
   1380out:
   1381	if (repeat > 1)
   1382		bpf_prog_change_xdp(prog, NULL);
   1383free_data:
   1384	for (i = 0; i < sinfo->nr_frags; i++)
   1385		__free_page(skb_frag_page(&sinfo->frags[i]));
   1386	kfree(data);
   1387free_ctx:
   1388	kfree(ctx);
   1389	return ret;
   1390}
   1391
   1392static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
   1393{
   1394	/* make sure the fields we don't use are zeroed */
   1395	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
   1396		return -EINVAL;
   1397
   1398	/* flags is allowed */
   1399
   1400	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
   1401			   sizeof(struct bpf_flow_keys)))
   1402		return -EINVAL;
   1403
   1404	return 0;
   1405}
   1406
   1407int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
   1408				     const union bpf_attr *kattr,
   1409				     union bpf_attr __user *uattr)
   1410{
   1411	struct bpf_test_timer t = { NO_PREEMPT };
   1412	u32 size = kattr->test.data_size_in;
   1413	struct bpf_flow_dissector ctx = {};
   1414	u32 repeat = kattr->test.repeat;
   1415	struct bpf_flow_keys *user_ctx;
   1416	struct bpf_flow_keys flow_keys;
   1417	const struct ethhdr *eth;
   1418	unsigned int flags = 0;
   1419	u32 retval, duration;
   1420	void *data;
   1421	int ret;
   1422
   1423	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
   1424		return -EINVAL;
   1425
   1426	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
   1427		return -EINVAL;
   1428
   1429	if (size < ETH_HLEN)
   1430		return -EINVAL;
   1431
   1432	data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
   1433	if (IS_ERR(data))
   1434		return PTR_ERR(data);
   1435
   1436	eth = (struct ethhdr *)data;
   1437
   1438	if (!repeat)
   1439		repeat = 1;
   1440
   1441	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
   1442	if (IS_ERR(user_ctx)) {
   1443		kfree(data);
   1444		return PTR_ERR(user_ctx);
   1445	}
   1446	if (user_ctx) {
   1447		ret = verify_user_bpf_flow_keys(user_ctx);
   1448		if (ret)
   1449			goto out;
   1450		flags = user_ctx->flags;
   1451	}
   1452
   1453	ctx.flow_keys = &flow_keys;
   1454	ctx.data = data;
   1455	ctx.data_end = (__u8 *)data + size;
   1456
   1457	bpf_test_timer_enter(&t);
   1458	do {
   1459		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
   1460					  size, flags);
   1461	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
   1462	bpf_test_timer_leave(&t);
   1463
   1464	if (ret < 0)
   1465		goto out;
   1466
   1467	ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
   1468			      sizeof(flow_keys), retval, duration);
   1469	if (!ret)
   1470		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
   1471				     sizeof(struct bpf_flow_keys));
   1472
   1473out:
   1474	kfree(user_ctx);
   1475	kfree(data);
   1476	return ret;
   1477}
   1478
   1479int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
   1480				union bpf_attr __user *uattr)
   1481{
   1482	struct bpf_test_timer t = { NO_PREEMPT };
   1483	struct bpf_prog_array *progs = NULL;
   1484	struct bpf_sk_lookup_kern ctx = {};
   1485	u32 repeat = kattr->test.repeat;
   1486	struct bpf_sk_lookup *user_ctx;
   1487	u32 retval, duration;
   1488	int ret = -EINVAL;
   1489
   1490	if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
   1491		return -EINVAL;
   1492
   1493	if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
   1494		return -EINVAL;
   1495
   1496	if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
   1497	    kattr->test.data_size_out)
   1498		return -EINVAL;
   1499
   1500	if (!repeat)
   1501		repeat = 1;
   1502
   1503	user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
   1504	if (IS_ERR(user_ctx))
   1505		return PTR_ERR(user_ctx);
   1506
   1507	if (!user_ctx)
   1508		return -EINVAL;
   1509
   1510	if (user_ctx->sk)
   1511		goto out;
   1512
   1513	if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
   1514		goto out;
   1515
   1516	if (user_ctx->local_port > U16_MAX) {
   1517		ret = -ERANGE;
   1518		goto out;
   1519	}
   1520
   1521	ctx.family = (u16)user_ctx->family;
   1522	ctx.protocol = (u16)user_ctx->protocol;
   1523	ctx.dport = (u16)user_ctx->local_port;
   1524	ctx.sport = user_ctx->remote_port;
   1525
   1526	switch (ctx.family) {
   1527	case AF_INET:
   1528		ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
   1529		ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
   1530		break;
   1531
   1532#if IS_ENABLED(CONFIG_IPV6)
   1533	case AF_INET6:
   1534		ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
   1535		ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
   1536		break;
   1537#endif
   1538
   1539	default:
   1540		ret = -EAFNOSUPPORT;
   1541		goto out;
   1542	}
   1543
   1544	progs = bpf_prog_array_alloc(1, GFP_KERNEL);
   1545	if (!progs) {
   1546		ret = -ENOMEM;
   1547		goto out;
   1548	}
   1549
   1550	progs->items[0].prog = prog;
   1551
   1552	bpf_test_timer_enter(&t);
   1553	do {
   1554		ctx.selected_sk = NULL;
   1555		retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
   1556	} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
   1557	bpf_test_timer_leave(&t);
   1558
   1559	if (ret < 0)
   1560		goto out;
   1561
   1562	user_ctx->cookie = 0;
   1563	if (ctx.selected_sk) {
   1564		if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
   1565			ret = -EOPNOTSUPP;
   1566			goto out;
   1567		}
   1568
   1569		user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
   1570	}
   1571
   1572	ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
   1573	if (!ret)
   1574		ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
   1575
   1576out:
   1577	bpf_prog_array_free(progs);
   1578	kfree(user_ctx);
   1579	return ret;
   1580}
   1581
   1582int bpf_prog_test_run_syscall(struct bpf_prog *prog,
   1583			      const union bpf_attr *kattr,
   1584			      union bpf_attr __user *uattr)
   1585{
   1586	void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
   1587	__u32 ctx_size_in = kattr->test.ctx_size_in;
   1588	void *ctx = NULL;
   1589	u32 retval;
   1590	int err = 0;
   1591
   1592	/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
   1593	if (kattr->test.data_in || kattr->test.data_out ||
   1594	    kattr->test.ctx_out || kattr->test.duration ||
   1595	    kattr->test.repeat || kattr->test.flags ||
   1596	    kattr->test.batch_size)
   1597		return -EINVAL;
   1598
   1599	if (ctx_size_in < prog->aux->max_ctx_offset ||
   1600	    ctx_size_in > U16_MAX)
   1601		return -EINVAL;
   1602
   1603	if (ctx_size_in) {
   1604		ctx = memdup_user(ctx_in, ctx_size_in);
   1605		if (IS_ERR(ctx))
   1606			return PTR_ERR(ctx);
   1607	}
   1608
   1609	rcu_read_lock_trace();
   1610	retval = bpf_prog_run_pin_on_cpu(prog, ctx);
   1611	rcu_read_unlock_trace();
   1612
   1613	if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
   1614		err = -EFAULT;
   1615		goto out;
   1616	}
   1617	if (ctx_size_in)
   1618		if (copy_to_user(ctx_in, ctx, ctx_size_in))
   1619			err = -EFAULT;
   1620out:
   1621	kfree(ctx);
   1622	return err;
   1623}
   1624
   1625static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
   1626	.owner        = THIS_MODULE,
   1627	.check_set        = &test_sk_check_kfunc_ids,
   1628	.acquire_set      = &test_sk_acquire_kfunc_ids,
   1629	.release_set      = &test_sk_release_kfunc_ids,
   1630	.ret_null_set     = &test_sk_ret_null_kfunc_ids,
   1631	.kptr_acquire_set = &test_sk_kptr_acquire_kfunc_ids
   1632};
   1633
   1634BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
   1635BTF_ID(struct, prog_test_ref_kfunc)
   1636BTF_ID(func, bpf_kfunc_call_test_release)
   1637BTF_ID(struct, prog_test_member)
   1638BTF_ID(func, bpf_kfunc_call_memb_release)
   1639
   1640static int __init bpf_prog_test_run_init(void)
   1641{
   1642	const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
   1643		{
   1644		  .btf_id       = bpf_prog_test_dtor_kfunc_ids[0],
   1645		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
   1646		},
   1647		{
   1648		  .btf_id	= bpf_prog_test_dtor_kfunc_ids[2],
   1649		  .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
   1650		},
   1651	};
   1652	int ret;
   1653
   1654	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
   1655	return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
   1656						  ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
   1657						  THIS_MODULE);
   1658}
   1659late_initcall(bpf_prog_test_run_init);