cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

nft_flow_offload.c (13596B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2#include <linux/kernel.h>
      3#include <linux/module.h>
      4#include <linux/init.h>
      5#include <linux/netlink.h>
      6#include <linux/netfilter.h>
      7#include <linux/workqueue.h>
      8#include <linux/spinlock.h>
      9#include <linux/netfilter/nf_conntrack_common.h>
     10#include <linux/netfilter/nf_tables.h>
     11#include <net/ip.h> /* for ipv4 options. */
     12#include <net/netfilter/nf_tables.h>
     13#include <net/netfilter/nf_tables_core.h>
     14#include <net/netfilter/nf_conntrack_core.h>
     15#include <net/netfilter/nf_conntrack_extend.h>
     16#include <net/netfilter/nf_flow_table.h>
     17
     18struct nft_flow_offload {
     19	struct nft_flowtable	*flowtable;
     20};
     21
     22static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
     23{
     24	if (dst_xfrm(dst))
     25		return FLOW_OFFLOAD_XMIT_XFRM;
     26
     27	return FLOW_OFFLOAD_XMIT_NEIGH;
     28}
     29
     30static void nft_default_forward_path(struct nf_flow_route *route,
     31				     struct dst_entry *dst_cache,
     32				     enum ip_conntrack_dir dir)
     33{
     34	route->tuple[!dir].in.ifindex	= dst_cache->dev->ifindex;
     35	route->tuple[dir].dst		= dst_cache;
     36	route->tuple[dir].xmit_type	= nft_xmit_type(dst_cache);
     37}
     38
     39static bool nft_is_valid_ether_device(const struct net_device *dev)
     40{
     41	if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
     42	    dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
     43		return false;
     44
     45	return true;
     46}
     47
     48static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
     49				     const struct dst_entry *dst_cache,
     50				     const struct nf_conn *ct,
     51				     enum ip_conntrack_dir dir, u8 *ha,
     52				     struct net_device_path_stack *stack)
     53{
     54	const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
     55	struct net_device *dev = dst_cache->dev;
     56	struct neighbour *n;
     57	u8 nud_state;
     58
     59	if (!nft_is_valid_ether_device(dev))
     60		goto out;
     61
     62	n = dst_neigh_lookup(dst_cache, daddr);
     63	if (!n)
     64		return -1;
     65
     66	read_lock_bh(&n->lock);
     67	nud_state = n->nud_state;
     68	ether_addr_copy(ha, n->ha);
     69	read_unlock_bh(&n->lock);
     70	neigh_release(n);
     71
     72	if (!(nud_state & NUD_VALID))
     73		return -1;
     74
     75out:
     76	return dev_fill_forward_path(dev, ha, stack);
     77}
     78
     79struct nft_forward_info {
     80	const struct net_device *indev;
     81	const struct net_device *outdev;
     82	const struct net_device *hw_outdev;
     83	struct id {
     84		__u16	id;
     85		__be16	proto;
     86	} encap[NF_FLOW_TABLE_ENCAP_MAX];
     87	u8 num_encaps;
     88	u8 ingress_vlans;
     89	u8 h_source[ETH_ALEN];
     90	u8 h_dest[ETH_ALEN];
     91	enum flow_offload_xmit_type xmit_type;
     92};
     93
     94static void nft_dev_path_info(const struct net_device_path_stack *stack,
     95			      struct nft_forward_info *info,
     96			      unsigned char *ha, struct nf_flowtable *flowtable)
     97{
     98	const struct net_device_path *path;
     99	int i;
    100
    101	memcpy(info->h_dest, ha, ETH_ALEN);
    102
    103	for (i = 0; i < stack->num_paths; i++) {
    104		path = &stack->path[i];
    105		switch (path->type) {
    106		case DEV_PATH_ETHERNET:
    107		case DEV_PATH_DSA:
    108		case DEV_PATH_VLAN:
    109		case DEV_PATH_PPPOE:
    110			info->indev = path->dev;
    111			if (is_zero_ether_addr(info->h_source))
    112				memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
    113
    114			if (path->type == DEV_PATH_ETHERNET)
    115				break;
    116			if (path->type == DEV_PATH_DSA) {
    117				i = stack->num_paths;
    118				break;
    119			}
    120
    121			/* DEV_PATH_VLAN and DEV_PATH_PPPOE */
    122			if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
    123				info->indev = NULL;
    124				break;
    125			}
    126			if (!info->outdev)
    127				info->outdev = path->dev;
    128			info->encap[info->num_encaps].id = path->encap.id;
    129			info->encap[info->num_encaps].proto = path->encap.proto;
    130			info->num_encaps++;
    131			if (path->type == DEV_PATH_PPPOE)
    132				memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
    133			break;
    134		case DEV_PATH_BRIDGE:
    135			if (is_zero_ether_addr(info->h_source))
    136				memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
    137
    138			switch (path->bridge.vlan_mode) {
    139			case DEV_PATH_BR_VLAN_UNTAG_HW:
    140				info->ingress_vlans |= BIT(info->num_encaps - 1);
    141				break;
    142			case DEV_PATH_BR_VLAN_TAG:
    143				info->encap[info->num_encaps].id = path->bridge.vlan_id;
    144				info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
    145				info->num_encaps++;
    146				break;
    147			case DEV_PATH_BR_VLAN_UNTAG:
    148				info->num_encaps--;
    149				break;
    150			case DEV_PATH_BR_VLAN_KEEP:
    151				break;
    152			}
    153			info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
    154			break;
    155		default:
    156			info->indev = NULL;
    157			break;
    158		}
    159	}
    160	if (!info->outdev)
    161		info->outdev = info->indev;
    162
    163	info->hw_outdev = info->indev;
    164
    165	if (nf_flowtable_hw_offload(flowtable) &&
    166	    nft_is_valid_ether_device(info->indev))
    167		info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
    168}
    169
    170static bool nft_flowtable_find_dev(const struct net_device *dev,
    171				   struct nft_flowtable *ft)
    172{
    173	struct nft_hook *hook;
    174	bool found = false;
    175
    176	list_for_each_entry_rcu(hook, &ft->hook_list, list) {
    177		if (hook->ops.dev != dev)
    178			continue;
    179
    180		found = true;
    181		break;
    182	}
    183
    184	return found;
    185}
    186
    187static void nft_dev_forward_path(struct nf_flow_route *route,
    188				 const struct nf_conn *ct,
    189				 enum ip_conntrack_dir dir,
    190				 struct nft_flowtable *ft)
    191{
    192	const struct dst_entry *dst = route->tuple[dir].dst;
    193	struct net_device_path_stack stack;
    194	struct nft_forward_info info = {};
    195	unsigned char ha[ETH_ALEN];
    196	int i;
    197
    198	if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
    199		nft_dev_path_info(&stack, &info, ha, &ft->data);
    200
    201	if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
    202		return;
    203
    204	route->tuple[!dir].in.ifindex = info.indev->ifindex;
    205	for (i = 0; i < info.num_encaps; i++) {
    206		route->tuple[!dir].in.encap[i].id = info.encap[i].id;
    207		route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
    208	}
    209	route->tuple[!dir].in.num_encaps = info.num_encaps;
    210	route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
    211
    212	if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
    213		memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
    214		memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
    215		route->tuple[dir].out.ifindex = info.outdev->ifindex;
    216		route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
    217		route->tuple[dir].xmit_type = info.xmit_type;
    218	}
    219}
    220
    221static int nft_flow_route(const struct nft_pktinfo *pkt,
    222			  const struct nf_conn *ct,
    223			  struct nf_flow_route *route,
    224			  enum ip_conntrack_dir dir,
    225			  struct nft_flowtable *ft)
    226{
    227	struct dst_entry *this_dst = skb_dst(pkt->skb);
    228	struct dst_entry *other_dst = NULL;
    229	struct flowi fl;
    230
    231	memset(&fl, 0, sizeof(fl));
    232	switch (nft_pf(pkt)) {
    233	case NFPROTO_IPV4:
    234		fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
    235		fl.u.ip4.saddr = ct->tuplehash[!dir].tuple.src.u3.ip;
    236		fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
    237		fl.u.ip4.flowi4_iif = this_dst->dev->ifindex;
    238		fl.u.ip4.flowi4_tos = RT_TOS(ip_hdr(pkt->skb)->tos);
    239		fl.u.ip4.flowi4_mark = pkt->skb->mark;
    240		fl.u.ip4.flowi4_flags = FLOWI_FLAG_ANYSRC;
    241		break;
    242	case NFPROTO_IPV6:
    243		fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
    244		fl.u.ip6.saddr = ct->tuplehash[!dir].tuple.src.u3.in6;
    245		fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
    246		fl.u.ip6.flowi6_iif = this_dst->dev->ifindex;
    247		fl.u.ip6.flowlabel = ip6_flowinfo(ipv6_hdr(pkt->skb));
    248		fl.u.ip6.flowi6_mark = pkt->skb->mark;
    249		fl.u.ip6.flowi6_flags = FLOWI_FLAG_ANYSRC;
    250		break;
    251	}
    252
    253	nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
    254	if (!other_dst)
    255		return -ENOENT;
    256
    257	nft_default_forward_path(route, this_dst, dir);
    258	nft_default_forward_path(route, other_dst, !dir);
    259
    260	if (route->tuple[dir].xmit_type	== FLOW_OFFLOAD_XMIT_NEIGH &&
    261	    route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
    262		nft_dev_forward_path(route, ct, dir, ft);
    263		nft_dev_forward_path(route, ct, !dir, ft);
    264	}
    265
    266	return 0;
    267}
    268
    269static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
    270{
    271	if (skb_sec_path(skb))
    272		return true;
    273
    274	if (family == NFPROTO_IPV4) {
    275		const struct ip_options *opt;
    276
    277		opt = &(IPCB(skb)->opt);
    278
    279		if (unlikely(opt->optlen))
    280			return true;
    281	}
    282
    283	return false;
    284}
    285
    286static void nft_flow_offload_eval(const struct nft_expr *expr,
    287				  struct nft_regs *regs,
    288				  const struct nft_pktinfo *pkt)
    289{
    290	struct nft_flow_offload *priv = nft_expr_priv(expr);
    291	struct nf_flowtable *flowtable = &priv->flowtable->data;
    292	struct tcphdr _tcph, *tcph = NULL;
    293	struct nf_flow_route route = {};
    294	enum ip_conntrack_info ctinfo;
    295	struct flow_offload *flow;
    296	enum ip_conntrack_dir dir;
    297	struct nf_conn *ct;
    298	int ret;
    299
    300	if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
    301		goto out;
    302
    303	ct = nf_ct_get(pkt->skb, &ctinfo);
    304	if (!ct)
    305		goto out;
    306
    307	switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
    308	case IPPROTO_TCP:
    309		tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt),
    310					  sizeof(_tcph), &_tcph);
    311		if (unlikely(!tcph || tcph->fin || tcph->rst ||
    312			     !nf_conntrack_tcp_established(ct)))
    313			goto out;
    314		break;
    315	case IPPROTO_UDP:
    316		break;
    317#ifdef CONFIG_NF_CT_PROTO_GRE
    318	case IPPROTO_GRE: {
    319		struct nf_conntrack_tuple *tuple;
    320
    321		if (ct->status & IPS_NAT_MASK)
    322			goto out;
    323		tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
    324		/* No support for GRE v1 */
    325		if (tuple->src.u.gre.key || tuple->dst.u.gre.key)
    326			goto out;
    327		break;
    328	}
    329#endif
    330	default:
    331		goto out;
    332	}
    333
    334	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
    335	    ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
    336		goto out;
    337
    338	if (!nf_ct_is_confirmed(ct))
    339		goto out;
    340
    341	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
    342		goto out;
    343
    344	dir = CTINFO2DIR(ctinfo);
    345	if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
    346		goto err_flow_route;
    347
    348	flow = flow_offload_alloc(ct);
    349	if (!flow)
    350		goto err_flow_alloc;
    351
    352	if (flow_offload_route_init(flow, &route) < 0)
    353		goto err_flow_add;
    354
    355	if (tcph) {
    356		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
    357		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
    358	}
    359
    360	ret = flow_offload_add(flowtable, flow);
    361	if (ret < 0)
    362		goto err_flow_add;
    363
    364	dst_release(route.tuple[!dir].dst);
    365	return;
    366
    367err_flow_add:
    368	flow_offload_free(flow);
    369err_flow_alloc:
    370	dst_release(route.tuple[!dir].dst);
    371err_flow_route:
    372	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
    373out:
    374	regs->verdict.code = NFT_BREAK;
    375}
    376
    377static int nft_flow_offload_validate(const struct nft_ctx *ctx,
    378				     const struct nft_expr *expr,
    379				     const struct nft_data **data)
    380{
    381	unsigned int hook_mask = (1 << NF_INET_FORWARD);
    382
    383	return nft_chain_validate_hooks(ctx->chain, hook_mask);
    384}
    385
    386static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
    387	[NFTA_FLOW_TABLE_NAME]	= { .type = NLA_STRING,
    388				    .len = NFT_NAME_MAXLEN - 1 },
    389};
    390
    391static int nft_flow_offload_init(const struct nft_ctx *ctx,
    392				 const struct nft_expr *expr,
    393				 const struct nlattr * const tb[])
    394{
    395	struct nft_flow_offload *priv = nft_expr_priv(expr);
    396	u8 genmask = nft_genmask_next(ctx->net);
    397	struct nft_flowtable *flowtable;
    398
    399	if (!tb[NFTA_FLOW_TABLE_NAME])
    400		return -EINVAL;
    401
    402	flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
    403					 genmask);
    404	if (IS_ERR(flowtable))
    405		return PTR_ERR(flowtable);
    406
    407	priv->flowtable = flowtable;
    408	flowtable->use++;
    409
    410	return nf_ct_netns_get(ctx->net, ctx->family);
    411}
    412
    413static void nft_flow_offload_deactivate(const struct nft_ctx *ctx,
    414					const struct nft_expr *expr,
    415					enum nft_trans_phase phase)
    416{
    417	struct nft_flow_offload *priv = nft_expr_priv(expr);
    418
    419	nf_tables_deactivate_flowtable(ctx, priv->flowtable, phase);
    420}
    421
    422static void nft_flow_offload_activate(const struct nft_ctx *ctx,
    423				      const struct nft_expr *expr)
    424{
    425	struct nft_flow_offload *priv = nft_expr_priv(expr);
    426
    427	priv->flowtable->use++;
    428}
    429
    430static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
    431				     const struct nft_expr *expr)
    432{
    433	nf_ct_netns_put(ctx->net, ctx->family);
    434}
    435
    436static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
    437{
    438	struct nft_flow_offload *priv = nft_expr_priv(expr);
    439
    440	if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
    441		goto nla_put_failure;
    442
    443	return 0;
    444
    445nla_put_failure:
    446	return -1;
    447}
    448
    449static struct nft_expr_type nft_flow_offload_type;
    450static const struct nft_expr_ops nft_flow_offload_ops = {
    451	.type		= &nft_flow_offload_type,
    452	.size		= NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
    453	.eval		= nft_flow_offload_eval,
    454	.init		= nft_flow_offload_init,
    455	.activate	= nft_flow_offload_activate,
    456	.deactivate	= nft_flow_offload_deactivate,
    457	.destroy	= nft_flow_offload_destroy,
    458	.validate	= nft_flow_offload_validate,
    459	.dump		= nft_flow_offload_dump,
    460	.reduce		= NFT_REDUCE_READONLY,
    461};
    462
    463static struct nft_expr_type nft_flow_offload_type __read_mostly = {
    464	.name		= "flow_offload",
    465	.ops		= &nft_flow_offload_ops,
    466	.policy		= nft_flow_offload_policy,
    467	.maxattr	= NFTA_FLOW_MAX,
    468	.owner		= THIS_MODULE,
    469};
    470
    471static int flow_offload_netdev_event(struct notifier_block *this,
    472				     unsigned long event, void *ptr)
    473{
    474	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
    475
    476	if (event != NETDEV_DOWN)
    477		return NOTIFY_DONE;
    478
    479	nf_flow_table_cleanup(dev);
    480
    481	return NOTIFY_DONE;
    482}
    483
    484static struct notifier_block flow_offload_netdev_notifier = {
    485	.notifier_call	= flow_offload_netdev_event,
    486};
    487
    488static int __init nft_flow_offload_module_init(void)
    489{
    490	int err;
    491
    492	err = register_netdevice_notifier(&flow_offload_netdev_notifier);
    493	if (err)
    494		goto err;
    495
    496	err = nft_register_expr(&nft_flow_offload_type);
    497	if (err < 0)
    498		goto register_expr;
    499
    500	return 0;
    501
    502register_expr:
    503	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
    504err:
    505	return err;
    506}
    507
    508static void __exit nft_flow_offload_module_exit(void)
    509{
    510	nft_unregister_expr(&nft_flow_offload_type);
    511	unregister_netdevice_notifier(&flow_offload_netdev_notifier);
    512}
    513
    514module_init(nft_flow_offload_module_init);
    515module_exit(nft_flow_offload_module_exit);
    516
    517MODULE_LICENSE("GPL");
    518MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
    519MODULE_ALIAS_NFT_EXPR("flow_offload");
    520MODULE_DESCRIPTION("nftables hardware flow offload module");