cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

flow_dissector.c (15201B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <test_progs.h>
      3#include <network_helpers.h>
      4#include <error.h>
      5#include <linux/if.h>
      6#include <linux/if_tun.h>
      7#include <sys/uio.h>
      8
      9#include "bpf_flow.skel.h"
     10
     11#ifndef IP_MF
     12#define IP_MF 0x2000
     13#endif
     14
     15#define CHECK_FLOW_KEYS(desc, got, expected)				\
     16	_CHECK(memcmp(&got, &expected, sizeof(got)) != 0,		\
     17	      desc,							\
     18	      topts.duration,						\
     19	      "nhoff=%u/%u "						\
     20	      "thoff=%u/%u "						\
     21	      "addr_proto=0x%x/0x%x "					\
     22	      "is_frag=%u/%u "						\
     23	      "is_first_frag=%u/%u "					\
     24	      "is_encap=%u/%u "						\
     25	      "ip_proto=0x%x/0x%x "					\
     26	      "n_proto=0x%x/0x%x "					\
     27	      "flow_label=0x%x/0x%x "					\
     28	      "sport=%u/%u "						\
     29	      "dport=%u/%u\n",						\
     30	      got.nhoff, expected.nhoff,				\
     31	      got.thoff, expected.thoff,				\
     32	      got.addr_proto, expected.addr_proto,			\
     33	      got.is_frag, expected.is_frag,				\
     34	      got.is_first_frag, expected.is_first_frag,		\
     35	      got.is_encap, expected.is_encap,				\
     36	      got.ip_proto, expected.ip_proto,				\
     37	      got.n_proto, expected.n_proto,				\
     38	      got.flow_label, expected.flow_label,			\
     39	      got.sport, expected.sport,				\
     40	      got.dport, expected.dport)
     41
     42struct ipv4_pkt {
     43	struct ethhdr eth;
     44	struct iphdr iph;
     45	struct tcphdr tcp;
     46} __packed;
     47
     48struct ipip_pkt {
     49	struct ethhdr eth;
     50	struct iphdr iph;
     51	struct iphdr iph_inner;
     52	struct tcphdr tcp;
     53} __packed;
     54
     55struct svlan_ipv4_pkt {
     56	struct ethhdr eth;
     57	__u16 vlan_tci;
     58	__u16 vlan_proto;
     59	struct iphdr iph;
     60	struct tcphdr tcp;
     61} __packed;
     62
     63struct ipv6_pkt {
     64	struct ethhdr eth;
     65	struct ipv6hdr iph;
     66	struct tcphdr tcp;
     67} __packed;
     68
     69struct ipv6_frag_pkt {
     70	struct ethhdr eth;
     71	struct ipv6hdr iph;
     72	struct frag_hdr {
     73		__u8 nexthdr;
     74		__u8 reserved;
     75		__be16 frag_off;
     76		__be32 identification;
     77	} ipf;
     78	struct tcphdr tcp;
     79} __packed;
     80
     81struct dvlan_ipv6_pkt {
     82	struct ethhdr eth;
     83	__u16 vlan_tci;
     84	__u16 vlan_proto;
     85	__u16 vlan_tci2;
     86	__u16 vlan_proto2;
     87	struct ipv6hdr iph;
     88	struct tcphdr tcp;
     89} __packed;
     90
     91struct test {
     92	const char *name;
     93	union {
     94		struct ipv4_pkt ipv4;
     95		struct svlan_ipv4_pkt svlan_ipv4;
     96		struct ipip_pkt ipip;
     97		struct ipv6_pkt ipv6;
     98		struct ipv6_frag_pkt ipv6_frag;
     99		struct dvlan_ipv6_pkt dvlan_ipv6;
    100	} pkt;
    101	struct bpf_flow_keys keys;
    102	__u32 flags;
    103};
    104
    105#define VLAN_HLEN	4
    106
    107static __u32 duration;
    108struct test tests[] = {
    109	{
    110		.name = "ipv4",
    111		.pkt.ipv4 = {
    112			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
    113			.iph.ihl = 5,
    114			.iph.protocol = IPPROTO_TCP,
    115			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    116			.tcp.doff = 5,
    117			.tcp.source = 80,
    118			.tcp.dest = 8080,
    119		},
    120		.keys = {
    121			.nhoff = ETH_HLEN,
    122			.thoff = ETH_HLEN + sizeof(struct iphdr),
    123			.addr_proto = ETH_P_IP,
    124			.ip_proto = IPPROTO_TCP,
    125			.n_proto = __bpf_constant_htons(ETH_P_IP),
    126			.sport = 80,
    127			.dport = 8080,
    128		},
    129	},
    130	{
    131		.name = "ipv6",
    132		.pkt.ipv6 = {
    133			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
    134			.iph.nexthdr = IPPROTO_TCP,
    135			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    136			.tcp.doff = 5,
    137			.tcp.source = 80,
    138			.tcp.dest = 8080,
    139		},
    140		.keys = {
    141			.nhoff = ETH_HLEN,
    142			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
    143			.addr_proto = ETH_P_IPV6,
    144			.ip_proto = IPPROTO_TCP,
    145			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    146			.sport = 80,
    147			.dport = 8080,
    148		},
    149	},
    150	{
    151		.name = "802.1q-ipv4",
    152		.pkt.svlan_ipv4 = {
    153			.eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
    154			.vlan_proto = __bpf_constant_htons(ETH_P_IP),
    155			.iph.ihl = 5,
    156			.iph.protocol = IPPROTO_TCP,
    157			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    158			.tcp.doff = 5,
    159			.tcp.source = 80,
    160			.tcp.dest = 8080,
    161		},
    162		.keys = {
    163			.nhoff = ETH_HLEN + VLAN_HLEN,
    164			.thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
    165			.addr_proto = ETH_P_IP,
    166			.ip_proto = IPPROTO_TCP,
    167			.n_proto = __bpf_constant_htons(ETH_P_IP),
    168			.sport = 80,
    169			.dport = 8080,
    170		},
    171	},
    172	{
    173		.name = "802.1ad-ipv6",
    174		.pkt.dvlan_ipv6 = {
    175			.eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
    176			.vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
    177			.vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
    178			.iph.nexthdr = IPPROTO_TCP,
    179			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    180			.tcp.doff = 5,
    181			.tcp.source = 80,
    182			.tcp.dest = 8080,
    183		},
    184		.keys = {
    185			.nhoff = ETH_HLEN + VLAN_HLEN * 2,
    186			.thoff = ETH_HLEN + VLAN_HLEN * 2 +
    187				sizeof(struct ipv6hdr),
    188			.addr_proto = ETH_P_IPV6,
    189			.ip_proto = IPPROTO_TCP,
    190			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    191			.sport = 80,
    192			.dport = 8080,
    193		},
    194	},
    195	{
    196		.name = "ipv4-frag",
    197		.pkt.ipv4 = {
    198			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
    199			.iph.ihl = 5,
    200			.iph.protocol = IPPROTO_TCP,
    201			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    202			.iph.frag_off = __bpf_constant_htons(IP_MF),
    203			.tcp.doff = 5,
    204			.tcp.source = 80,
    205			.tcp.dest = 8080,
    206		},
    207		.keys = {
    208			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
    209			.nhoff = ETH_HLEN,
    210			.thoff = ETH_HLEN + sizeof(struct iphdr),
    211			.addr_proto = ETH_P_IP,
    212			.ip_proto = IPPROTO_TCP,
    213			.n_proto = __bpf_constant_htons(ETH_P_IP),
    214			.is_frag = true,
    215			.is_first_frag = true,
    216			.sport = 80,
    217			.dport = 8080,
    218		},
    219		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
    220	},
    221	{
    222		.name = "ipv4-no-frag",
    223		.pkt.ipv4 = {
    224			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
    225			.iph.ihl = 5,
    226			.iph.protocol = IPPROTO_TCP,
    227			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    228			.iph.frag_off = __bpf_constant_htons(IP_MF),
    229			.tcp.doff = 5,
    230			.tcp.source = 80,
    231			.tcp.dest = 8080,
    232		},
    233		.keys = {
    234			.nhoff = ETH_HLEN,
    235			.thoff = ETH_HLEN + sizeof(struct iphdr),
    236			.addr_proto = ETH_P_IP,
    237			.ip_proto = IPPROTO_TCP,
    238			.n_proto = __bpf_constant_htons(ETH_P_IP),
    239			.is_frag = true,
    240			.is_first_frag = true,
    241		},
    242	},
    243	{
    244		.name = "ipv6-frag",
    245		.pkt.ipv6_frag = {
    246			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
    247			.iph.nexthdr = IPPROTO_FRAGMENT,
    248			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    249			.ipf.nexthdr = IPPROTO_TCP,
    250			.tcp.doff = 5,
    251			.tcp.source = 80,
    252			.tcp.dest = 8080,
    253		},
    254		.keys = {
    255			.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
    256			.nhoff = ETH_HLEN,
    257			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
    258				sizeof(struct frag_hdr),
    259			.addr_proto = ETH_P_IPV6,
    260			.ip_proto = IPPROTO_TCP,
    261			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    262			.is_frag = true,
    263			.is_first_frag = true,
    264			.sport = 80,
    265			.dport = 8080,
    266		},
    267		.flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
    268	},
    269	{
    270		.name = "ipv6-no-frag",
    271		.pkt.ipv6_frag = {
    272			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
    273			.iph.nexthdr = IPPROTO_FRAGMENT,
    274			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    275			.ipf.nexthdr = IPPROTO_TCP,
    276			.tcp.doff = 5,
    277			.tcp.source = 80,
    278			.tcp.dest = 8080,
    279		},
    280		.keys = {
    281			.nhoff = ETH_HLEN,
    282			.thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
    283				sizeof(struct frag_hdr),
    284			.addr_proto = ETH_P_IPV6,
    285			.ip_proto = IPPROTO_TCP,
    286			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    287			.is_frag = true,
    288			.is_first_frag = true,
    289		},
    290	},
    291	{
    292		.name = "ipv6-flow-label",
    293		.pkt.ipv6 = {
    294			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
    295			.iph.nexthdr = IPPROTO_TCP,
    296			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    297			.iph.flow_lbl = { 0xb, 0xee, 0xef },
    298			.tcp.doff = 5,
    299			.tcp.source = 80,
    300			.tcp.dest = 8080,
    301		},
    302		.keys = {
    303			.nhoff = ETH_HLEN,
    304			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
    305			.addr_proto = ETH_P_IPV6,
    306			.ip_proto = IPPROTO_TCP,
    307			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    308			.sport = 80,
    309			.dport = 8080,
    310			.flow_label = __bpf_constant_htonl(0xbeeef),
    311		},
    312	},
    313	{
    314		.name = "ipv6-no-flow-label",
    315		.pkt.ipv6 = {
    316			.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
    317			.iph.nexthdr = IPPROTO_TCP,
    318			.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
    319			.iph.flow_lbl = { 0xb, 0xee, 0xef },
    320			.tcp.doff = 5,
    321			.tcp.source = 80,
    322			.tcp.dest = 8080,
    323		},
    324		.keys = {
    325			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
    326			.nhoff = ETH_HLEN,
    327			.thoff = ETH_HLEN + sizeof(struct ipv6hdr),
    328			.addr_proto = ETH_P_IPV6,
    329			.ip_proto = IPPROTO_TCP,
    330			.n_proto = __bpf_constant_htons(ETH_P_IPV6),
    331			.flow_label = __bpf_constant_htonl(0xbeeef),
    332		},
    333		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
    334	},
    335	{
    336		.name = "ipip-encap",
    337		.pkt.ipip = {
    338			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
    339			.iph.ihl = 5,
    340			.iph.protocol = IPPROTO_IPIP,
    341			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    342			.iph_inner.ihl = 5,
    343			.iph_inner.protocol = IPPROTO_TCP,
    344			.iph_inner.tot_len =
    345				__bpf_constant_htons(MAGIC_BYTES) -
    346				sizeof(struct iphdr),
    347			.tcp.doff = 5,
    348			.tcp.source = 80,
    349			.tcp.dest = 8080,
    350		},
    351		.keys = {
    352			.nhoff = ETH_HLEN,
    353			.thoff = ETH_HLEN + sizeof(struct iphdr) +
    354				sizeof(struct iphdr),
    355			.addr_proto = ETH_P_IP,
    356			.ip_proto = IPPROTO_TCP,
    357			.n_proto = __bpf_constant_htons(ETH_P_IP),
    358			.is_encap = true,
    359			.sport = 80,
    360			.dport = 8080,
    361		},
    362	},
    363	{
    364		.name = "ipip-no-encap",
    365		.pkt.ipip = {
    366			.eth.h_proto = __bpf_constant_htons(ETH_P_IP),
    367			.iph.ihl = 5,
    368			.iph.protocol = IPPROTO_IPIP,
    369			.iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
    370			.iph_inner.ihl = 5,
    371			.iph_inner.protocol = IPPROTO_TCP,
    372			.iph_inner.tot_len =
    373				__bpf_constant_htons(MAGIC_BYTES) -
    374				sizeof(struct iphdr),
    375			.tcp.doff = 5,
    376			.tcp.source = 80,
    377			.tcp.dest = 8080,
    378		},
    379		.keys = {
    380			.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
    381			.nhoff = ETH_HLEN,
    382			.thoff = ETH_HLEN + sizeof(struct iphdr),
    383			.addr_proto = ETH_P_IP,
    384			.ip_proto = IPPROTO_IPIP,
    385			.n_proto = __bpf_constant_htons(ETH_P_IP),
    386			.is_encap = true,
    387		},
    388		.flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
    389	},
    390};
    391
    392static int create_tap(const char *ifname)
    393{
    394	struct ifreq ifr = {
    395		.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
    396	};
    397	int fd, ret;
    398
    399	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
    400
    401	fd = open("/dev/net/tun", O_RDWR);
    402	if (fd < 0)
    403		return -1;
    404
    405	ret = ioctl(fd, TUNSETIFF, &ifr);
    406	if (ret)
    407		return -1;
    408
    409	return fd;
    410}
    411
    412static int tx_tap(int fd, void *pkt, size_t len)
    413{
    414	struct iovec iov[] = {
    415		{
    416			.iov_len = len,
    417			.iov_base = pkt,
    418		},
    419	};
    420	return writev(fd, iov, ARRAY_SIZE(iov));
    421}
    422
    423static int ifup(const char *ifname)
    424{
    425	struct ifreq ifr = {};
    426	int sk, ret;
    427
    428	strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
    429
    430	sk = socket(PF_INET, SOCK_DGRAM, 0);
    431	if (sk < 0)
    432		return -1;
    433
    434	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
    435	if (ret) {
    436		close(sk);
    437		return -1;
    438	}
    439
    440	ifr.ifr_flags |= IFF_UP;
    441	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
    442	if (ret) {
    443		close(sk);
    444		return -1;
    445	}
    446
    447	close(sk);
    448	return 0;
    449}
    450
    451static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
    452{
    453	int i, err, map_fd, prog_fd;
    454	struct bpf_program *prog;
    455	char prog_name[32];
    456
    457	map_fd = bpf_map__fd(prog_array);
    458	if (map_fd < 0)
    459		return -1;
    460
    461	for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
    462		snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
    463
    464		prog = bpf_object__find_program_by_name(obj, prog_name);
    465		if (!prog)
    466			return -1;
    467
    468		prog_fd = bpf_program__fd(prog);
    469		if (prog_fd < 0)
    470			return -1;
    471
    472		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
    473		if (err)
    474			return -1;
    475	}
    476	return 0;
    477}
    478
    479static void run_tests_skb_less(int tap_fd, struct bpf_map *keys)
    480{
    481	int i, err, keys_fd;
    482
    483	keys_fd = bpf_map__fd(keys);
    484	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
    485		return;
    486
    487	for (i = 0; i < ARRAY_SIZE(tests); i++) {
    488		/* Keep in sync with 'flags' from eth_get_headlen. */
    489		__u32 eth_get_headlen_flags =
    490			BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
    491		LIBBPF_OPTS(bpf_test_run_opts, topts);
    492		struct bpf_flow_keys flow_keys = {};
    493		__u32 key = (__u32)(tests[i].keys.sport) << 16 |
    494			    tests[i].keys.dport;
    495
    496		/* For skb-less case we can't pass input flags; run
    497		 * only the tests that have a matching set of flags.
    498		 */
    499
    500		if (tests[i].flags != eth_get_headlen_flags)
    501			continue;
    502
    503		err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
    504		CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno);
    505
    506		err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
    507		ASSERT_OK(err, "bpf_map_lookup_elem");
    508
    509		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
    510
    511		err = bpf_map_delete_elem(keys_fd, &key);
    512		ASSERT_OK(err, "bpf_map_delete_elem");
    513	}
    514}
    515
    516static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
    517{
    518	int err, prog_fd;
    519
    520	prog_fd = bpf_program__fd(skel->progs._dissect);
    521	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
    522		return;
    523
    524	err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
    525	if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno))
    526		return;
    527
    528	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
    529
    530	err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
    531	CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
    532}
    533
    534static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
    535{
    536	struct bpf_link *link;
    537	int err, net_fd;
    538
    539	net_fd = open("/proc/self/ns/net", O_RDONLY);
    540	if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno))
    541		return;
    542
    543	link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
    544	if (!ASSERT_OK_PTR(link, "attach_netns"))
    545		goto out_close;
    546
    547	run_tests_skb_less(tap_fd, skel->maps.last_dissection);
    548
    549	err = bpf_link__destroy(link);
    550	CHECK(err, "bpf_link__destroy", "err %d\n", err);
    551out_close:
    552	close(net_fd);
    553}
    554
    555void test_flow_dissector(void)
    556{
    557	int i, err, prog_fd, keys_fd = -1, tap_fd;
    558	struct bpf_flow *skel;
    559
    560	skel = bpf_flow__open_and_load();
    561	if (CHECK(!skel, "skel", "failed to open/load skeleton\n"))
    562		return;
    563
    564	prog_fd = bpf_program__fd(skel->progs._dissect);
    565	if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd))
    566		goto out_destroy_skel;
    567	keys_fd = bpf_map__fd(skel->maps.last_dissection);
    568	if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd))
    569		goto out_destroy_skel;
    570	err = init_prog_array(skel->obj, skel->maps.jmp_table);
    571	if (CHECK(err, "init_prog_array", "err %d\n", err))
    572		goto out_destroy_skel;
    573
    574	for (i = 0; i < ARRAY_SIZE(tests); i++) {
    575		struct bpf_flow_keys flow_keys;
    576		LIBBPF_OPTS(bpf_test_run_opts, topts,
    577			.data_in = &tests[i].pkt,
    578			.data_size_in = sizeof(tests[i].pkt),
    579			.data_out = &flow_keys,
    580		);
    581		static struct bpf_flow_keys ctx = {};
    582
    583		if (tests[i].flags) {
    584			topts.ctx_in = &ctx;
    585			topts.ctx_size_in = sizeof(ctx);
    586			ctx.flags = tests[i].flags;
    587		}
    588
    589		err = bpf_prog_test_run_opts(prog_fd, &topts);
    590		ASSERT_OK(err, "test_run");
    591		ASSERT_EQ(topts.retval, 1, "test_run retval");
    592		ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
    593			  "test_run data_size_out");
    594		CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
    595	}
    596
    597	/* Do the same tests but for skb-less flow dissector.
    598	 * We use a known path in the net/tun driver that calls
    599	 * eth_get_headlen and we manually export bpf_flow_keys
    600	 * via BPF map in this case.
    601	 */
    602
    603	tap_fd = create_tap("tap0");
    604	CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno);
    605	err = ifup("tap0");
    606	CHECK(err, "ifup", "err %d errno %d\n", err, errno);
    607
    608	/* Test direct prog attachment */
    609	test_skb_less_prog_attach(skel, tap_fd);
    610	/* Test indirect prog attachment via link */
    611	test_skb_less_link_create(skel, tap_fd);
    612
    613	close(tap_fd);
    614out_destroy_skel:
    615	bpf_flow__destroy(skel);
    616}