cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

lwt_bpf.c (15129B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
      3 */
      4
      5#include <linux/filter.h>
      6#include <linux/kernel.h>
      7#include <linux/module.h>
      8#include <linux/skbuff.h>
      9#include <linux/types.h>
     10#include <linux/bpf.h>
     11#include <net/lwtunnel.h>
     12#include <net/gre.h>
     13#include <net/ip6_route.h>
     14#include <net/ipv6_stubs.h>
     15
     16struct bpf_lwt_prog {
     17	struct bpf_prog *prog;
     18	char *name;
     19};
     20
     21struct bpf_lwt {
     22	struct bpf_lwt_prog in;
     23	struct bpf_lwt_prog out;
     24	struct bpf_lwt_prog xmit;
     25	int family;
     26};
     27
     28#define MAX_PROG_NAME 256
     29
     30static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
     31{
     32	return (struct bpf_lwt *)lwt->data;
     33}
     34
     35#define NO_REDIRECT false
     36#define CAN_REDIRECT true
     37
     38static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
     39		       struct dst_entry *dst, bool can_redirect)
     40{
     41	int ret;
     42
     43	/* Migration disable and BH disable are needed to protect per-cpu
     44	 * redirect_info between BPF prog and skb_do_redirect().
     45	 */
     46	migrate_disable();
     47	local_bh_disable();
     48	bpf_compute_data_pointers(skb);
     49	ret = bpf_prog_run_save_cb(lwt->prog, skb);
     50
     51	switch (ret) {
     52	case BPF_OK:
     53	case BPF_LWT_REROUTE:
     54		break;
     55
     56	case BPF_REDIRECT:
     57		if (unlikely(!can_redirect)) {
     58			pr_warn_once("Illegal redirect return code in prog %s\n",
     59				     lwt->name ? : "<unknown>");
     60			ret = BPF_OK;
     61		} else {
     62			skb_reset_mac_header(skb);
     63			ret = skb_do_redirect(skb);
     64			if (ret == 0)
     65				ret = BPF_REDIRECT;
     66		}
     67		break;
     68
     69	case BPF_DROP:
     70		kfree_skb(skb);
     71		ret = -EPERM;
     72		break;
     73
     74	default:
     75		pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
     76		kfree_skb(skb);
     77		ret = -EINVAL;
     78		break;
     79	}
     80
     81	local_bh_enable();
     82	migrate_enable();
     83
     84	return ret;
     85}
     86
     87static int bpf_lwt_input_reroute(struct sk_buff *skb)
     88{
     89	int err = -EINVAL;
     90
     91	if (skb->protocol == htons(ETH_P_IP)) {
     92		struct net_device *dev = skb_dst(skb)->dev;
     93		struct iphdr *iph = ip_hdr(skb);
     94
     95		dev_hold(dev);
     96		skb_dst_drop(skb);
     97		err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
     98					   iph->tos, dev);
     99		dev_put(dev);
    100	} else if (skb->protocol == htons(ETH_P_IPV6)) {
    101		skb_dst_drop(skb);
    102		err = ipv6_stub->ipv6_route_input(skb);
    103	} else {
    104		err = -EAFNOSUPPORT;
    105	}
    106
    107	if (err)
    108		goto err;
    109	return dst_input(skb);
    110
    111err:
    112	kfree_skb(skb);
    113	return err;
    114}
    115
    116static int bpf_input(struct sk_buff *skb)
    117{
    118	struct dst_entry *dst = skb_dst(skb);
    119	struct bpf_lwt *bpf;
    120	int ret;
    121
    122	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
    123	if (bpf->in.prog) {
    124		ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
    125		if (ret < 0)
    126			return ret;
    127		if (ret == BPF_LWT_REROUTE)
    128			return bpf_lwt_input_reroute(skb);
    129	}
    130
    131	if (unlikely(!dst->lwtstate->orig_input)) {
    132		kfree_skb(skb);
    133		return -EINVAL;
    134	}
    135
    136	return dst->lwtstate->orig_input(skb);
    137}
    138
    139static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
    140{
    141	struct dst_entry *dst = skb_dst(skb);
    142	struct bpf_lwt *bpf;
    143	int ret;
    144
    145	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
    146	if (bpf->out.prog) {
    147		ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
    148		if (ret < 0)
    149			return ret;
    150	}
    151
    152	if (unlikely(!dst->lwtstate->orig_output)) {
    153		pr_warn_once("orig_output not set on dst for prog %s\n",
    154			     bpf->out.name);
    155		kfree_skb(skb);
    156		return -EINVAL;
    157	}
    158
    159	return dst->lwtstate->orig_output(net, sk, skb);
    160}
    161
    162static int xmit_check_hhlen(struct sk_buff *skb, int hh_len)
    163{
    164	if (skb_headroom(skb) < hh_len) {
    165		int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
    166
    167		if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
    168			return -ENOMEM;
    169	}
    170
    171	return 0;
    172}
    173
    174static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
    175{
    176	struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
    177	int oif = l3mdev ? l3mdev->ifindex : 0;
    178	struct dst_entry *dst = NULL;
    179	int err = -EAFNOSUPPORT;
    180	struct sock *sk;
    181	struct net *net;
    182	bool ipv4;
    183
    184	if (skb->protocol == htons(ETH_P_IP))
    185		ipv4 = true;
    186	else if (skb->protocol == htons(ETH_P_IPV6))
    187		ipv4 = false;
    188	else
    189		goto err;
    190
    191	sk = sk_to_full_sk(skb->sk);
    192	if (sk) {
    193		if (sk->sk_bound_dev_if)
    194			oif = sk->sk_bound_dev_if;
    195		net = sock_net(sk);
    196	} else {
    197		net = dev_net(skb_dst(skb)->dev);
    198	}
    199
    200	if (ipv4) {
    201		struct iphdr *iph = ip_hdr(skb);
    202		struct flowi4 fl4 = {};
    203		struct rtable *rt;
    204
    205		fl4.flowi4_oif = oif;
    206		fl4.flowi4_mark = skb->mark;
    207		fl4.flowi4_uid = sock_net_uid(net, sk);
    208		fl4.flowi4_tos = RT_TOS(iph->tos);
    209		fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
    210		fl4.flowi4_proto = iph->protocol;
    211		fl4.daddr = iph->daddr;
    212		fl4.saddr = iph->saddr;
    213
    214		rt = ip_route_output_key(net, &fl4);
    215		if (IS_ERR(rt)) {
    216			err = PTR_ERR(rt);
    217			goto err;
    218		}
    219		dst = &rt->dst;
    220	} else {
    221		struct ipv6hdr *iph6 = ipv6_hdr(skb);
    222		struct flowi6 fl6 = {};
    223
    224		fl6.flowi6_oif = oif;
    225		fl6.flowi6_mark = skb->mark;
    226		fl6.flowi6_uid = sock_net_uid(net, sk);
    227		fl6.flowlabel = ip6_flowinfo(iph6);
    228		fl6.flowi6_proto = iph6->nexthdr;
    229		fl6.daddr = iph6->daddr;
    230		fl6.saddr = iph6->saddr;
    231
    232		dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
    233		if (IS_ERR(dst)) {
    234			err = PTR_ERR(dst);
    235			goto err;
    236		}
    237	}
    238	if (unlikely(dst->error)) {
    239		err = dst->error;
    240		dst_release(dst);
    241		goto err;
    242	}
    243
    244	/* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
    245	 * was done for the previous dst, so we are doing it here again, in
    246	 * case the new dst needs much more space. The call below is a noop
    247	 * if there is enough header space in skb.
    248	 */
    249	err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
    250	if (unlikely(err))
    251		goto err;
    252
    253	skb_dst_drop(skb);
    254	skb_dst_set(skb, dst);
    255
    256	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
    257	if (unlikely(err))
    258		return err;
    259
    260	/* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
    261	return LWTUNNEL_XMIT_DONE;
    262
    263err:
    264	kfree_skb(skb);
    265	return err;
    266}
    267
    268static int bpf_xmit(struct sk_buff *skb)
    269{
    270	struct dst_entry *dst = skb_dst(skb);
    271	struct bpf_lwt *bpf;
    272
    273	bpf = bpf_lwt_lwtunnel(dst->lwtstate);
    274	if (bpf->xmit.prog) {
    275		int hh_len = dst->dev->hard_header_len;
    276		__be16 proto = skb->protocol;
    277		int ret;
    278
    279		ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
    280		switch (ret) {
    281		case BPF_OK:
    282			/* If the header changed, e.g. via bpf_lwt_push_encap,
    283			 * BPF_LWT_REROUTE below should have been used if the
    284			 * protocol was also changed.
    285			 */
    286			if (skb->protocol != proto) {
    287				kfree_skb(skb);
    288				return -EINVAL;
    289			}
    290			/* If the header was expanded, headroom might be too
    291			 * small for L2 header to come, expand as needed.
    292			 */
    293			ret = xmit_check_hhlen(skb, hh_len);
    294			if (unlikely(ret))
    295				return ret;
    296
    297			return LWTUNNEL_XMIT_CONTINUE;
    298		case BPF_REDIRECT:
    299			return LWTUNNEL_XMIT_DONE;
    300		case BPF_LWT_REROUTE:
    301			return bpf_lwt_xmit_reroute(skb);
    302		default:
    303			return ret;
    304		}
    305	}
    306
    307	return LWTUNNEL_XMIT_CONTINUE;
    308}
    309
    310static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
    311{
    312	if (prog->prog)
    313		bpf_prog_put(prog->prog);
    314
    315	kfree(prog->name);
    316}
    317
    318static void bpf_destroy_state(struct lwtunnel_state *lwt)
    319{
    320	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
    321
    322	bpf_lwt_prog_destroy(&bpf->in);
    323	bpf_lwt_prog_destroy(&bpf->out);
    324	bpf_lwt_prog_destroy(&bpf->xmit);
    325}
    326
    327static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
    328	[LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
    329	[LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
    330				.len = MAX_PROG_NAME },
    331};
    332
    333static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
    334			  enum bpf_prog_type type)
    335{
    336	struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
    337	struct bpf_prog *p;
    338	int ret;
    339	u32 fd;
    340
    341	ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
    342					  bpf_prog_policy, NULL);
    343	if (ret < 0)
    344		return ret;
    345
    346	if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
    347		return -EINVAL;
    348
    349	prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
    350	if (!prog->name)
    351		return -ENOMEM;
    352
    353	fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
    354	p = bpf_prog_get_type(fd, type);
    355	if (IS_ERR(p))
    356		return PTR_ERR(p);
    357
    358	prog->prog = p;
    359
    360	return 0;
    361}
    362
    363static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
    364	[LWT_BPF_IN]		= { .type = NLA_NESTED, },
    365	[LWT_BPF_OUT]		= { .type = NLA_NESTED, },
    366	[LWT_BPF_XMIT]		= { .type = NLA_NESTED, },
    367	[LWT_BPF_XMIT_HEADROOM]	= { .type = NLA_U32 },
    368};
    369
    370static int bpf_build_state(struct net *net, struct nlattr *nla,
    371			   unsigned int family, const void *cfg,
    372			   struct lwtunnel_state **ts,
    373			   struct netlink_ext_ack *extack)
    374{
    375	struct nlattr *tb[LWT_BPF_MAX + 1];
    376	struct lwtunnel_state *newts;
    377	struct bpf_lwt *bpf;
    378	int ret;
    379
    380	if (family != AF_INET && family != AF_INET6)
    381		return -EAFNOSUPPORT;
    382
    383	ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
    384					  extack);
    385	if (ret < 0)
    386		return ret;
    387
    388	if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
    389		return -EINVAL;
    390
    391	newts = lwtunnel_state_alloc(sizeof(*bpf));
    392	if (!newts)
    393		return -ENOMEM;
    394
    395	newts->type = LWTUNNEL_ENCAP_BPF;
    396	bpf = bpf_lwt_lwtunnel(newts);
    397
    398	if (tb[LWT_BPF_IN]) {
    399		newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
    400		ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
    401				     BPF_PROG_TYPE_LWT_IN);
    402		if (ret  < 0)
    403			goto errout;
    404	}
    405
    406	if (tb[LWT_BPF_OUT]) {
    407		newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
    408		ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
    409				     BPF_PROG_TYPE_LWT_OUT);
    410		if (ret < 0)
    411			goto errout;
    412	}
    413
    414	if (tb[LWT_BPF_XMIT]) {
    415		newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
    416		ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
    417				     BPF_PROG_TYPE_LWT_XMIT);
    418		if (ret < 0)
    419			goto errout;
    420	}
    421
    422	if (tb[LWT_BPF_XMIT_HEADROOM]) {
    423		u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
    424
    425		if (headroom > LWT_BPF_MAX_HEADROOM) {
    426			ret = -ERANGE;
    427			goto errout;
    428		}
    429
    430		newts->headroom = headroom;
    431	}
    432
    433	bpf->family = family;
    434	*ts = newts;
    435
    436	return 0;
    437
    438errout:
    439	bpf_destroy_state(newts);
    440	kfree(newts);
    441	return ret;
    442}
    443
    444static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
    445			     struct bpf_lwt_prog *prog)
    446{
    447	struct nlattr *nest;
    448
    449	if (!prog->prog)
    450		return 0;
    451
    452	nest = nla_nest_start_noflag(skb, attr);
    453	if (!nest)
    454		return -EMSGSIZE;
    455
    456	if (prog->name &&
    457	    nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
    458		return -EMSGSIZE;
    459
    460	return nla_nest_end(skb, nest);
    461}
    462
    463static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
    464{
    465	struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
    466
    467	if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
    468	    bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
    469	    bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
    470		return -EMSGSIZE;
    471
    472	return 0;
    473}
    474
    475static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
    476{
    477	int nest_len = nla_total_size(sizeof(struct nlattr)) +
    478		       nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
    479		       0;
    480
    481	return nest_len + /* LWT_BPF_IN */
    482	       nest_len + /* LWT_BPF_OUT */
    483	       nest_len + /* LWT_BPF_XMIT */
    484	       0;
    485}
    486
    487static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
    488{
    489	/* FIXME:
    490	 * The LWT state is currently rebuilt for delete requests which
    491	 * results in a new bpf_prog instance. Comparing names for now.
    492	 */
    493	if (!a->name && !b->name)
    494		return 0;
    495
    496	if (!a->name || !b->name)
    497		return 1;
    498
    499	return strcmp(a->name, b->name);
    500}
    501
    502static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
    503{
    504	struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
    505	struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
    506
    507	return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
    508	       bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
    509	       bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
    510}
    511
    512static const struct lwtunnel_encap_ops bpf_encap_ops = {
    513	.build_state	= bpf_build_state,
    514	.destroy_state	= bpf_destroy_state,
    515	.input		= bpf_input,
    516	.output		= bpf_output,
    517	.xmit		= bpf_xmit,
    518	.fill_encap	= bpf_fill_encap_info,
    519	.get_encap_size = bpf_encap_nlsize,
    520	.cmp_encap	= bpf_encap_cmp,
    521	.owner		= THIS_MODULE,
    522};
    523
    524static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
    525			   int encap_len)
    526{
    527	struct skb_shared_info *shinfo = skb_shinfo(skb);
    528
    529	gso_type |= SKB_GSO_DODGY;
    530	shinfo->gso_type |= gso_type;
    531	skb_decrease_gso_size(shinfo, encap_len);
    532	shinfo->gso_segs = 0;
    533	return 0;
    534}
    535
    536static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
    537{
    538	int next_hdr_offset;
    539	void *next_hdr;
    540	__u8 protocol;
    541
    542	/* SCTP and UDP_L4 gso need more nuanced handling than what
    543	 * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
    544	 * So at the moment only TCP GSO packets are let through.
    545	 */
    546	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
    547		return -ENOTSUPP;
    548
    549	if (ipv4) {
    550		protocol = ip_hdr(skb)->protocol;
    551		next_hdr_offset = sizeof(struct iphdr);
    552		next_hdr = skb_network_header(skb) + next_hdr_offset;
    553	} else {
    554		protocol = ipv6_hdr(skb)->nexthdr;
    555		next_hdr_offset = sizeof(struct ipv6hdr);
    556		next_hdr = skb_network_header(skb) + next_hdr_offset;
    557	}
    558
    559	switch (protocol) {
    560	case IPPROTO_GRE:
    561		next_hdr_offset += sizeof(struct gre_base_hdr);
    562		if (next_hdr_offset > encap_len)
    563			return -EINVAL;
    564
    565		if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
    566			return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
    567					       encap_len);
    568		return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
    569
    570	case IPPROTO_UDP:
    571		next_hdr_offset += sizeof(struct udphdr);
    572		if (next_hdr_offset > encap_len)
    573			return -EINVAL;
    574
    575		if (((struct udphdr *)next_hdr)->check)
    576			return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
    577					       encap_len);
    578		return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
    579
    580	case IPPROTO_IP:
    581	case IPPROTO_IPV6:
    582		if (ipv4)
    583			return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
    584		else
    585			return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
    586
    587	default:
    588		return -EPROTONOSUPPORT;
    589	}
    590}
    591
    592int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
    593{
    594	struct iphdr *iph;
    595	bool ipv4;
    596	int err;
    597
    598	if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
    599		return -EINVAL;
    600
    601	/* validate protocol and length */
    602	iph = (struct iphdr *)hdr;
    603	if (iph->version == 4) {
    604		ipv4 = true;
    605		if (unlikely(len < iph->ihl * 4))
    606			return -EINVAL;
    607	} else if (iph->version == 6) {
    608		ipv4 = false;
    609		if (unlikely(len < sizeof(struct ipv6hdr)))
    610			return -EINVAL;
    611	} else {
    612		return -EINVAL;
    613	}
    614
    615	if (ingress)
    616		err = skb_cow_head(skb, len + skb->mac_len);
    617	else
    618		err = skb_cow_head(skb,
    619				   len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
    620	if (unlikely(err))
    621		return err;
    622
    623	/* push the encap headers and fix pointers */
    624	skb_reset_inner_headers(skb);
    625	skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
    626	skb_set_inner_protocol(skb, skb->protocol);
    627	skb->encapsulation = 1;
    628	skb_push(skb, len);
    629	if (ingress)
    630		skb_postpush_rcsum(skb, iph, len);
    631	skb_reset_network_header(skb);
    632	memcpy(skb_network_header(skb), hdr, len);
    633	bpf_compute_data_pointers(skb);
    634	skb_clear_hash(skb);
    635
    636	if (ipv4) {
    637		skb->protocol = htons(ETH_P_IP);
    638		iph = ip_hdr(skb);
    639
    640		if (!iph->check)
    641			iph->check = ip_fast_csum((unsigned char *)iph,
    642						  iph->ihl);
    643	} else {
    644		skb->protocol = htons(ETH_P_IPV6);
    645	}
    646
    647	if (skb_is_gso(skb))
    648		return handle_gso_encap(skb, ipv4, len);
    649
    650	return 0;
    651}
    652
    653static int __init bpf_lwt_init(void)
    654{
    655	return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
    656}
    657
    658subsys_initcall(bpf_lwt_init)