cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

act_tunnel_key.c (23909B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
      4 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
      5 */
      6
      7#include <linux/module.h>
      8#include <linux/init.h>
      9#include <linux/kernel.h>
     10#include <linux/skbuff.h>
     11#include <linux/rtnetlink.h>
     12#include <net/geneve.h>
     13#include <net/vxlan.h>
     14#include <net/erspan.h>
     15#include <net/netlink.h>
     16#include <net/pkt_sched.h>
     17#include <net/dst.h>
     18#include <net/pkt_cls.h>
     19
     20#include <linux/tc_act/tc_tunnel_key.h>
     21#include <net/tc_act/tc_tunnel_key.h>
     22
     23static unsigned int tunnel_key_net_id;
     24static struct tc_action_ops act_tunnel_key_ops;
     25
     26static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
     27			  struct tcf_result *res)
     28{
     29	struct tcf_tunnel_key *t = to_tunnel_key(a);
     30	struct tcf_tunnel_key_params *params;
     31	int action;
     32
     33	params = rcu_dereference_bh(t->params);
     34
     35	tcf_lastuse_update(&t->tcf_tm);
     36	tcf_action_update_bstats(&t->common, skb);
     37	action = READ_ONCE(t->tcf_action);
     38
     39	switch (params->tcft_action) {
     40	case TCA_TUNNEL_KEY_ACT_RELEASE:
     41		skb_dst_drop(skb);
     42		break;
     43	case TCA_TUNNEL_KEY_ACT_SET:
     44		skb_dst_drop(skb);
     45		skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
     46		break;
     47	default:
     48		WARN_ONCE(1, "Bad tunnel_key action %d.\n",
     49			  params->tcft_action);
     50		break;
     51	}
     52
     53	return action;
     54}
     55
     56static const struct nla_policy
     57enc_opts_policy[TCA_TUNNEL_KEY_ENC_OPTS_MAX + 1] = {
     58	[TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC]	= {
     59		.strict_start_type = TCA_TUNNEL_KEY_ENC_OPTS_VXLAN },
     60	[TCA_TUNNEL_KEY_ENC_OPTS_GENEVE]	= { .type = NLA_NESTED },
     61	[TCA_TUNNEL_KEY_ENC_OPTS_VXLAN]		= { .type = NLA_NESTED },
     62	[TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN]	= { .type = NLA_NESTED },
     63};
     64
     65static const struct nla_policy
     66geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
     67	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]	   = { .type = NLA_U16 },
     68	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]	   = { .type = NLA_U8 },
     69	[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]	   = { .type = NLA_BINARY,
     70						       .len = 128 },
     71};
     72
     73static const struct nla_policy
     74vxlan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1] = {
     75	[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]	   = { .type = NLA_U32 },
     76};
     77
     78static const struct nla_policy
     79erspan_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
     80	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]	   = { .type = NLA_U8 },
     81	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]	   = { .type = NLA_U32 },
     82	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR]	   = { .type = NLA_U8 },
     83	[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]	   = { .type = NLA_U8 },
     84};
     85
     86static int
     87tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
     88			   struct netlink_ext_ack *extack)
     89{
     90	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
     91	int err, data_len, opt_len;
     92	u8 *data;
     93
     94	err = nla_parse_nested_deprecated(tb,
     95					  TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
     96					  nla, geneve_opt_policy, extack);
     97	if (err < 0)
     98		return err;
     99
    100	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
    101	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
    102	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
    103		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
    104		return -EINVAL;
    105	}
    106
    107	data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
    108	data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
    109	if (data_len < 4) {
    110		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
    111		return -ERANGE;
    112	}
    113	if (data_len % 4) {
    114		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
    115		return -ERANGE;
    116	}
    117
    118	opt_len = sizeof(struct geneve_opt) + data_len;
    119	if (dst) {
    120		struct geneve_opt *opt = dst;
    121
    122		WARN_ON(dst_len < opt_len);
    123
    124		opt->opt_class =
    125			nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
    126		opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
    127		opt->length = data_len / 4; /* length is in units of 4 bytes */
    128		opt->r1 = 0;
    129		opt->r2 = 0;
    130		opt->r3 = 0;
    131
    132		memcpy(opt + 1, data, data_len);
    133	}
    134
    135	return opt_len;
    136}
    137
    138static int
    139tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
    140			  struct netlink_ext_ack *extack)
    141{
    142	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX + 1];
    143	int err;
    144
    145	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX, nla,
    146			       vxlan_opt_policy, extack);
    147	if (err < 0)
    148		return err;
    149
    150	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]) {
    151		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
    152		return -EINVAL;
    153	}
    154
    155	if (dst) {
    156		struct vxlan_metadata *md = dst;
    157
    158		md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
    159		md->gbp &= VXLAN_GBP_MASK;
    160	}
    161
    162	return sizeof(struct vxlan_metadata);
    163}
    164
    165static int
    166tunnel_key_copy_erspan_opt(const struct nlattr *nla, void *dst, int dst_len,
    167			   struct netlink_ext_ack *extack)
    168{
    169	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX + 1];
    170	int err;
    171	u8 ver;
    172
    173	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX, nla,
    174			       erspan_opt_policy, extack);
    175	if (err < 0)
    176		return err;
    177
    178	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]) {
    179		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
    180		return -EINVAL;
    181	}
    182
    183	ver = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER]);
    184	if (ver == 1) {
    185		if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX]) {
    186			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
    187			return -EINVAL;
    188		}
    189	} else if (ver == 2) {
    190		if (!tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR] ||
    191		    !tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID]) {
    192			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
    193			return -EINVAL;
    194		}
    195	} else {
    196		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
    197		return -EINVAL;
    198	}
    199
    200	if (dst) {
    201		struct erspan_metadata *md = dst;
    202
    203		md->version = ver;
    204		if (ver == 1) {
    205			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX];
    206			md->u.index = nla_get_be32(nla);
    207		} else {
    208			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR];
    209			md->u.md2.dir = nla_get_u8(nla);
    210			nla = tb[TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID];
    211			set_hwid(&md->u.md2, nla_get_u8(nla));
    212		}
    213	}
    214
    215	return sizeof(struct erspan_metadata);
    216}
    217
    218static int tunnel_key_copy_opts(const struct nlattr *nla, u8 *dst,
    219				int dst_len, struct netlink_ext_ack *extack)
    220{
    221	int err, rem, opt_len, len = nla_len(nla), opts_len = 0, type = 0;
    222	const struct nlattr *attr, *head = nla_data(nla);
    223
    224	err = nla_validate_deprecated(head, len, TCA_TUNNEL_KEY_ENC_OPTS_MAX,
    225				      enc_opts_policy, extack);
    226	if (err)
    227		return err;
    228
    229	nla_for_each_attr(attr, head, len, rem) {
    230		switch (nla_type(attr)) {
    231		case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
    232			if (type && type != TUNNEL_GENEVE_OPT) {
    233				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
    234				return -EINVAL;
    235			}
    236			opt_len = tunnel_key_copy_geneve_opt(attr, dst,
    237							     dst_len, extack);
    238			if (opt_len < 0)
    239				return opt_len;
    240			opts_len += opt_len;
    241			if (opts_len > IP_TUNNEL_OPTS_MAX) {
    242				NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
    243				return -EINVAL;
    244			}
    245			if (dst) {
    246				dst_len -= opt_len;
    247				dst += opt_len;
    248			}
    249			type = TUNNEL_GENEVE_OPT;
    250			break;
    251		case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
    252			if (type) {
    253				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
    254				return -EINVAL;
    255			}
    256			opt_len = tunnel_key_copy_vxlan_opt(attr, dst,
    257							    dst_len, extack);
    258			if (opt_len < 0)
    259				return opt_len;
    260			opts_len += opt_len;
    261			type = TUNNEL_VXLAN_OPT;
    262			break;
    263		case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
    264			if (type) {
    265				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
    266				return -EINVAL;
    267			}
    268			opt_len = tunnel_key_copy_erspan_opt(attr, dst,
    269							     dst_len, extack);
    270			if (opt_len < 0)
    271				return opt_len;
    272			opts_len += opt_len;
    273			type = TUNNEL_ERSPAN_OPT;
    274			break;
    275		}
    276	}
    277
    278	if (!opts_len) {
    279		NL_SET_ERR_MSG(extack, "Empty list of tunnel options");
    280		return -EINVAL;
    281	}
    282
    283	if (rem > 0) {
    284		NL_SET_ERR_MSG(extack, "Trailing data after parsing tunnel key options attributes");
    285		return -EINVAL;
    286	}
    287
    288	return opts_len;
    289}
    290
    291static int tunnel_key_get_opts_len(struct nlattr *nla,
    292				   struct netlink_ext_ack *extack)
    293{
    294	return tunnel_key_copy_opts(nla, NULL, 0, extack);
    295}
    296
    297static int tunnel_key_opts_set(struct nlattr *nla, struct ip_tunnel_info *info,
    298			       int opts_len, struct netlink_ext_ack *extack)
    299{
    300	info->options_len = opts_len;
    301	switch (nla_type(nla_data(nla))) {
    302	case TCA_TUNNEL_KEY_ENC_OPTS_GENEVE:
    303#if IS_ENABLED(CONFIG_INET)
    304		info->key.tun_flags |= TUNNEL_GENEVE_OPT;
    305		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
    306					    opts_len, extack);
    307#else
    308		return -EAFNOSUPPORT;
    309#endif
    310	case TCA_TUNNEL_KEY_ENC_OPTS_VXLAN:
    311#if IS_ENABLED(CONFIG_INET)
    312		info->key.tun_flags |= TUNNEL_VXLAN_OPT;
    313		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
    314					    opts_len, extack);
    315#else
    316		return -EAFNOSUPPORT;
    317#endif
    318	case TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN:
    319#if IS_ENABLED(CONFIG_INET)
    320		info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
    321		return tunnel_key_copy_opts(nla, ip_tunnel_info_opts(info),
    322					    opts_len, extack);
    323#else
    324		return -EAFNOSUPPORT;
    325#endif
    326	default:
    327		NL_SET_ERR_MSG(extack, "Cannot set tunnel options for unknown tunnel type");
    328		return -EINVAL;
    329	}
    330}
    331
    332static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
    333	[TCA_TUNNEL_KEY_PARMS]	    = { .len = sizeof(struct tc_tunnel_key) },
    334	[TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
    335	[TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
    336	[TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
    337	[TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
    338	[TCA_TUNNEL_KEY_ENC_KEY_ID]   = { .type = NLA_U32 },
    339	[TCA_TUNNEL_KEY_ENC_DST_PORT] = {.type = NLA_U16},
    340	[TCA_TUNNEL_KEY_NO_CSUM]      = { .type = NLA_U8 },
    341	[TCA_TUNNEL_KEY_ENC_OPTS]     = { .type = NLA_NESTED },
    342	[TCA_TUNNEL_KEY_ENC_TOS]      = { .type = NLA_U8 },
    343	[TCA_TUNNEL_KEY_ENC_TTL]      = { .type = NLA_U8 },
    344};
    345
    346static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
    347{
    348	if (!p)
    349		return;
    350	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
    351		dst_release(&p->tcft_enc_metadata->dst);
    352
    353	kfree_rcu(p, rcu);
    354}
    355
    356static int tunnel_key_init(struct net *net, struct nlattr *nla,
    357			   struct nlattr *est, struct tc_action **a,
    358			   struct tcf_proto *tp, u32 act_flags,
    359			   struct netlink_ext_ack *extack)
    360{
    361	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
    362	bool bind = act_flags & TCA_ACT_FLAGS_BIND;
    363	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
    364	struct tcf_tunnel_key_params *params_new;
    365	struct metadata_dst *metadata = NULL;
    366	struct tcf_chain *goto_ch = NULL;
    367	struct tc_tunnel_key *parm;
    368	struct tcf_tunnel_key *t;
    369	bool exists = false;
    370	__be16 dst_port = 0;
    371	__be64 key_id = 0;
    372	int opts_len = 0;
    373	__be16 flags = 0;
    374	u8 tos, ttl;
    375	int ret = 0;
    376	u32 index;
    377	int err;
    378
    379	if (!nla) {
    380		NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
    381		return -EINVAL;
    382	}
    383
    384	err = nla_parse_nested_deprecated(tb, TCA_TUNNEL_KEY_MAX, nla,
    385					  tunnel_key_policy, extack);
    386	if (err < 0) {
    387		NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
    388		return err;
    389	}
    390
    391	if (!tb[TCA_TUNNEL_KEY_PARMS]) {
    392		NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
    393		return -EINVAL;
    394	}
    395
    396	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
    397	index = parm->index;
    398	err = tcf_idr_check_alloc(tn, &index, a, bind);
    399	if (err < 0)
    400		return err;
    401	exists = err;
    402	if (exists && bind)
    403		return 0;
    404
    405	switch (parm->t_action) {
    406	case TCA_TUNNEL_KEY_ACT_RELEASE:
    407		break;
    408	case TCA_TUNNEL_KEY_ACT_SET:
    409		if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
    410			__be32 key32;
    411
    412			key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
    413			key_id = key32_to_tunnel_id(key32);
    414			flags = TUNNEL_KEY;
    415		}
    416
    417		flags |= TUNNEL_CSUM;
    418		if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
    419		    nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
    420			flags &= ~TUNNEL_CSUM;
    421
    422		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
    423			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);
    424
    425		if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
    426			opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
    427							   extack);
    428			if (opts_len < 0) {
    429				ret = opts_len;
    430				goto err_out;
    431			}
    432		}
    433
    434		tos = 0;
    435		if (tb[TCA_TUNNEL_KEY_ENC_TOS])
    436			tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
    437		ttl = 0;
    438		if (tb[TCA_TUNNEL_KEY_ENC_TTL])
    439			ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);
    440
    441		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
    442		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
    443			__be32 saddr;
    444			__be32 daddr;
    445
    446			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
    447			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
    448
    449			metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
    450						    dst_port, flags,
    451						    key_id, opts_len);
    452		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
    453			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
    454			struct in6_addr saddr;
    455			struct in6_addr daddr;
    456
    457			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
    458			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
    459
    460			metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
    461						      0, flags,
    462						      key_id, opts_len);
    463		} else {
    464			NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
    465			ret = -EINVAL;
    466			goto err_out;
    467		}
    468
    469		if (!metadata) {
    470			NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
    471			ret = -ENOMEM;
    472			goto err_out;
    473		}
    474
    475#ifdef CONFIG_DST_CACHE
    476		ret = dst_cache_init(&metadata->u.tun_info.dst_cache, GFP_KERNEL);
    477		if (ret)
    478			goto release_tun_meta;
    479#endif
    480
    481		if (opts_len) {
    482			ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
    483						  &metadata->u.tun_info,
    484						  opts_len, extack);
    485			if (ret < 0)
    486				goto release_tun_meta;
    487		}
    488
    489		metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
    490		break;
    491	default:
    492		NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
    493		ret = -EINVAL;
    494		goto err_out;
    495	}
    496
    497	if (!exists) {
    498		ret = tcf_idr_create_from_flags(tn, index, est, a,
    499						&act_tunnel_key_ops, bind,
    500						act_flags);
    501		if (ret) {
    502			NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
    503			goto release_tun_meta;
    504		}
    505
    506		ret = ACT_P_CREATED;
    507	} else if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
    508		NL_SET_ERR_MSG(extack, "TC IDR already exists");
    509		ret = -EEXIST;
    510		goto release_tun_meta;
    511	}
    512
    513	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
    514	if (err < 0) {
    515		ret = err;
    516		exists = true;
    517		goto release_tun_meta;
    518	}
    519	t = to_tunnel_key(*a);
    520
    521	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
    522	if (unlikely(!params_new)) {
    523		NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
    524		ret = -ENOMEM;
    525		exists = true;
    526		goto put_chain;
    527	}
    528	params_new->tcft_action = parm->t_action;
    529	params_new->tcft_enc_metadata = metadata;
    530
    531	spin_lock_bh(&t->tcf_lock);
    532	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
    533	params_new = rcu_replace_pointer(t->params, params_new,
    534					 lockdep_is_held(&t->tcf_lock));
    535	spin_unlock_bh(&t->tcf_lock);
    536	tunnel_key_release_params(params_new);
    537	if (goto_ch)
    538		tcf_chain_put_by_act(goto_ch);
    539
    540	return ret;
    541
    542put_chain:
    543	if (goto_ch)
    544		tcf_chain_put_by_act(goto_ch);
    545
    546release_tun_meta:
    547	if (metadata)
    548		dst_release(&metadata->dst);
    549
    550err_out:
    551	if (exists)
    552		tcf_idr_release(*a, bind);
    553	else
    554		tcf_idr_cleanup(tn, index);
    555	return ret;
    556}
    557
    558static void tunnel_key_release(struct tc_action *a)
    559{
    560	struct tcf_tunnel_key *t = to_tunnel_key(a);
    561	struct tcf_tunnel_key_params *params;
    562
    563	params = rcu_dereference_protected(t->params, 1);
    564	tunnel_key_release_params(params);
    565}
    566
    567static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
    568				       const struct ip_tunnel_info *info)
    569{
    570	int len = info->options_len;
    571	u8 *src = (u8 *)(info + 1);
    572	struct nlattr *start;
    573
    574	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
    575	if (!start)
    576		return -EMSGSIZE;
    577
    578	while (len > 0) {
    579		struct geneve_opt *opt = (struct geneve_opt *)src;
    580
    581		if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
    582				 opt->opt_class) ||
    583		    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
    584			       opt->type) ||
    585		    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
    586			    opt->length * 4, opt + 1)) {
    587			nla_nest_cancel(skb, start);
    588			return -EMSGSIZE;
    589		}
    590
    591		len -= sizeof(struct geneve_opt) + opt->length * 4;
    592		src += sizeof(struct geneve_opt) + opt->length * 4;
    593	}
    594
    595	nla_nest_end(skb, start);
    596	return 0;
    597}
    598
    599static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
    600				      const struct ip_tunnel_info *info)
    601{
    602	struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
    603	struct nlattr *start;
    604
    605	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
    606	if (!start)
    607		return -EMSGSIZE;
    608
    609	if (nla_put_u32(skb, TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) {
    610		nla_nest_cancel(skb, start);
    611		return -EMSGSIZE;
    612	}
    613
    614	nla_nest_end(skb, start);
    615	return 0;
    616}
    617
    618static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
    619				       const struct ip_tunnel_info *info)
    620{
    621	struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
    622	struct nlattr *start;
    623
    624	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
    625	if (!start)
    626		return -EMSGSIZE;
    627
    628	if (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, md->version))
    629		goto err;
    630
    631	if (md->version == 1 &&
    632	    nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
    633		goto err;
    634
    635	if (md->version == 2 &&
    636	    (nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR,
    637			md->u.md2.dir) ||
    638	     nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID,
    639			get_hwid(&md->u.md2))))
    640		goto err;
    641
    642	nla_nest_end(skb, start);
    643	return 0;
    644err:
    645	nla_nest_cancel(skb, start);
    646	return -EMSGSIZE;
    647}
    648
    649static int tunnel_key_opts_dump(struct sk_buff *skb,
    650				const struct ip_tunnel_info *info)
    651{
    652	struct nlattr *start;
    653	int err = -EINVAL;
    654
    655	if (!info->options_len)
    656		return 0;
    657
    658	start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS);
    659	if (!start)
    660		return -EMSGSIZE;
    661
    662	if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
    663		err = tunnel_key_geneve_opts_dump(skb, info);
    664		if (err)
    665			goto err_out;
    666	} else if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
    667		err = tunnel_key_vxlan_opts_dump(skb, info);
    668		if (err)
    669			goto err_out;
    670	} else if (info->key.tun_flags & TUNNEL_ERSPAN_OPT) {
    671		err = tunnel_key_erspan_opts_dump(skb, info);
    672		if (err)
    673			goto err_out;
    674	} else {
    675err_out:
    676		nla_nest_cancel(skb, start);
    677		return err;
    678	}
    679
    680	nla_nest_end(skb, start);
    681	return 0;
    682}
    683
    684static int tunnel_key_dump_addresses(struct sk_buff *skb,
    685				     const struct ip_tunnel_info *info)
    686{
    687	unsigned short family = ip_tunnel_info_af(info);
    688
    689	if (family == AF_INET) {
    690		__be32 saddr = info->key.u.ipv4.src;
    691		__be32 daddr = info->key.u.ipv4.dst;
    692
    693		if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
    694		    !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
    695			return 0;
    696	}
    697
    698	if (family == AF_INET6) {
    699		const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
    700		const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
    701
    702		if (!nla_put_in6_addr(skb,
    703				      TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
    704		    !nla_put_in6_addr(skb,
    705				      TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
    706			return 0;
    707	}
    708
    709	return -EINVAL;
    710}
    711
    712static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
    713			   int bind, int ref)
    714{
    715	unsigned char *b = skb_tail_pointer(skb);
    716	struct tcf_tunnel_key *t = to_tunnel_key(a);
    717	struct tcf_tunnel_key_params *params;
    718	struct tc_tunnel_key opt = {
    719		.index    = t->tcf_index,
    720		.refcnt   = refcount_read(&t->tcf_refcnt) - ref,
    721		.bindcnt  = atomic_read(&t->tcf_bindcnt) - bind,
    722	};
    723	struct tcf_t tm;
    724
    725	spin_lock_bh(&t->tcf_lock);
    726	params = rcu_dereference_protected(t->params,
    727					   lockdep_is_held(&t->tcf_lock));
    728	opt.action   = t->tcf_action;
    729	opt.t_action = params->tcft_action;
    730
    731	if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
    732		goto nla_put_failure;
    733
    734	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
    735		struct ip_tunnel_info *info =
    736			&params->tcft_enc_metadata->u.tun_info;
    737		struct ip_tunnel_key *key = &info->key;
    738		__be32 key_id = tunnel_id_to_key32(key->tun_id);
    739
    740		if (((key->tun_flags & TUNNEL_KEY) &&
    741		     nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
    742		    tunnel_key_dump_addresses(skb,
    743					      &params->tcft_enc_metadata->u.tun_info) ||
    744		    (key->tp_dst &&
    745		      nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
    746				   key->tp_dst)) ||
    747		    nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
    748			       !(key->tun_flags & TUNNEL_CSUM)) ||
    749		    tunnel_key_opts_dump(skb, info))
    750			goto nla_put_failure;
    751
    752		if (key->tos && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TOS, key->tos))
    753			goto nla_put_failure;
    754
    755		if (key->ttl && nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_TTL, key->ttl))
    756			goto nla_put_failure;
    757	}
    758
    759	tcf_tm_dump(&tm, &t->tcf_tm);
    760	if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
    761			  &tm, TCA_TUNNEL_KEY_PAD))
    762		goto nla_put_failure;
    763	spin_unlock_bh(&t->tcf_lock);
    764
    765	return skb->len;
    766
    767nla_put_failure:
    768	spin_unlock_bh(&t->tcf_lock);
    769	nlmsg_trim(skb, b);
    770	return -1;
    771}
    772
    773static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
    774			     struct netlink_callback *cb, int type,
    775			     const struct tc_action_ops *ops,
    776			     struct netlink_ext_ack *extack)
    777{
    778	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
    779
    780	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
    781}
    782
    783static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
    784{
    785	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
    786
    787	return tcf_idr_search(tn, a, index);
    788}
    789
    790static void tcf_tunnel_encap_put_tunnel(void *priv)
    791{
    792	struct ip_tunnel_info *tunnel = priv;
    793
    794	kfree(tunnel);
    795}
    796
    797static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
    798				       const struct tc_action *act)
    799{
    800	entry->tunnel = tcf_tunnel_info_copy(act);
    801	if (!entry->tunnel)
    802		return -ENOMEM;
    803	entry->destructor = tcf_tunnel_encap_put_tunnel;
    804	entry->destructor_priv = entry->tunnel;
    805	return 0;
    806}
    807
    808static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
    809					    void *entry_data,
    810					    u32 *index_inc,
    811					    bool bind,
    812					    struct netlink_ext_ack *extack)
    813{
    814	int err;
    815
    816	if (bind) {
    817		struct flow_action_entry *entry = entry_data;
    818
    819		if (is_tcf_tunnel_set(act)) {
    820			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
    821			err = tcf_tunnel_encap_get_tunnel(entry, act);
    822			if (err)
    823				return err;
    824		} else if (is_tcf_tunnel_release(act)) {
    825			entry->id = FLOW_ACTION_TUNNEL_DECAP;
    826		} else {
    827			NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload");
    828			return -EOPNOTSUPP;
    829		}
    830		*index_inc = 1;
    831	} else {
    832		struct flow_offload_action *fl_action = entry_data;
    833
    834		if (is_tcf_tunnel_set(act))
    835			fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
    836		else if (is_tcf_tunnel_release(act))
    837			fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
    838		else
    839			return -EOPNOTSUPP;
    840	}
    841
    842	return 0;
    843}
    844
    845static struct tc_action_ops act_tunnel_key_ops = {
    846	.kind		=	"tunnel_key",
    847	.id		=	TCA_ID_TUNNEL_KEY,
    848	.owner		=	THIS_MODULE,
    849	.act		=	tunnel_key_act,
    850	.dump		=	tunnel_key_dump,
    851	.init		=	tunnel_key_init,
    852	.cleanup	=	tunnel_key_release,
    853	.walk		=	tunnel_key_walker,
    854	.lookup		=	tunnel_key_search,
    855	.offload_act_setup =	tcf_tunnel_key_offload_act_setup,
    856	.size		=	sizeof(struct tcf_tunnel_key),
    857};
    858
    859static __net_init int tunnel_key_init_net(struct net *net)
    860{
    861	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
    862
    863	return tc_action_net_init(net, tn, &act_tunnel_key_ops);
    864}
    865
    866static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
    867{
    868	tc_action_net_exit(net_list, tunnel_key_net_id);
    869}
    870
    871static struct pernet_operations tunnel_key_net_ops = {
    872	.init = tunnel_key_init_net,
    873	.exit_batch = tunnel_key_exit_net,
    874	.id   = &tunnel_key_net_id,
    875	.size = sizeof(struct tc_action_net),
    876};
    877
    878static int __init tunnel_key_init_module(void)
    879{
    880	return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
    881}
    882
    883static void __exit tunnel_key_cleanup_module(void)
    884{
    885	tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
    886}
    887
    888module_init(tunnel_key_init_module);
    889module_exit(tunnel_key_cleanup_module);
    890
    891MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
    892MODULE_DESCRIPTION("ip tunnel manipulation actions");
    893MODULE_LICENSE("GPL v2");