cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

br_forward.c (7943B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	Forwarding decision
      4 *	Linux ethernet bridge
      5 *
      6 *	Authors:
      7 *	Lennert Buytenhek		<buytenh@gnu.org>
      8 */
      9
     10#include <linux/err.h>
     11#include <linux/slab.h>
     12#include <linux/kernel.h>
     13#include <linux/netdevice.h>
     14#include <linux/netpoll.h>
     15#include <linux/skbuff.h>
     16#include <linux/if_vlan.h>
     17#include <linux/netfilter_bridge.h>
     18#include "br_private.h"
     19
     20/* Don't forward packets to originating port or forwarding disabled */
     21static inline int should_deliver(const struct net_bridge_port *p,
     22				 const struct sk_buff *skb)
     23{
     24	struct net_bridge_vlan_group *vg;
     25
     26	vg = nbp_vlan_group_rcu(p);
     27	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
     28		p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
     29		nbp_switchdev_allowed_egress(p, skb) &&
     30		!br_skb_isolated(p, skb);
     31}
     32
     33int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
     34{
     35	skb_push(skb, ETH_HLEN);
     36	if (!is_skb_forwardable(skb->dev, skb))
     37		goto drop;
     38
     39	br_drop_fake_rtable(skb);
     40
     41	if (skb->ip_summed == CHECKSUM_PARTIAL &&
     42	    eth_type_vlan(skb->protocol)) {
     43		int depth;
     44
     45		if (!__vlan_get_protocol(skb, skb->protocol, &depth))
     46			goto drop;
     47
     48		skb_set_network_header(skb, depth);
     49	}
     50
     51	br_switchdev_frame_set_offload_fwd_mark(skb);
     52
     53	dev_queue_xmit(skb);
     54
     55	return 0;
     56
     57drop:
     58	kfree_skb(skb);
     59	return 0;
     60}
     61EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
     62
     63int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
     64{
     65	skb_clear_tstamp(skb);
     66	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
     67		       net, sk, skb, NULL, skb->dev,
     68		       br_dev_queue_push_xmit);
     69
     70}
     71EXPORT_SYMBOL_GPL(br_forward_finish);
     72
     73static void __br_forward(const struct net_bridge_port *to,
     74			 struct sk_buff *skb, bool local_orig)
     75{
     76	struct net_bridge_vlan_group *vg;
     77	struct net_device *indev;
     78	struct net *net;
     79	int br_hook;
     80
     81	/* Mark the skb for forwarding offload early so that br_handle_vlan()
     82	 * can know whether to pop the VLAN header on egress or keep it.
     83	 */
     84	nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
     85
     86	vg = nbp_vlan_group_rcu(to);
     87	skb = br_handle_vlan(to->br, to, vg, skb);
     88	if (!skb)
     89		return;
     90
     91	indev = skb->dev;
     92	skb->dev = to->dev;
     93	if (!local_orig) {
     94		if (skb_warn_if_lro(skb)) {
     95			kfree_skb(skb);
     96			return;
     97		}
     98		br_hook = NF_BR_FORWARD;
     99		skb_forward_csum(skb);
    100		net = dev_net(indev);
    101	} else {
    102		if (unlikely(netpoll_tx_running(to->br->dev))) {
    103			skb_push(skb, ETH_HLEN);
    104			if (!is_skb_forwardable(skb->dev, skb))
    105				kfree_skb(skb);
    106			else
    107				br_netpoll_send_skb(to, skb);
    108			return;
    109		}
    110		br_hook = NF_BR_LOCAL_OUT;
    111		net = dev_net(skb->dev);
    112		indev = NULL;
    113	}
    114
    115	NF_HOOK(NFPROTO_BRIDGE, br_hook,
    116		net, NULL, skb, indev, skb->dev,
    117		br_forward_finish);
    118}
    119
    120static int deliver_clone(const struct net_bridge_port *prev,
    121			 struct sk_buff *skb, bool local_orig)
    122{
    123	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
    124
    125	skb = skb_clone(skb, GFP_ATOMIC);
    126	if (!skb) {
    127		dev->stats.tx_dropped++;
    128		return -ENOMEM;
    129	}
    130
    131	__br_forward(prev, skb, local_orig);
    132	return 0;
    133}
    134
    135/**
    136 * br_forward - forward a packet to a specific port
    137 * @to: destination port
    138 * @skb: packet being forwarded
    139 * @local_rcv: packet will be received locally after forwarding
    140 * @local_orig: packet is locally originated
    141 *
    142 * Should be called with rcu_read_lock.
    143 */
    144void br_forward(const struct net_bridge_port *to,
    145		struct sk_buff *skb, bool local_rcv, bool local_orig)
    146{
    147	if (unlikely(!to))
    148		goto out;
    149
    150	/* redirect to backup link if the destination port is down */
    151	if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
    152		struct net_bridge_port *backup_port;
    153
    154		backup_port = rcu_dereference(to->backup_port);
    155		if (unlikely(!backup_port))
    156			goto out;
    157		to = backup_port;
    158	}
    159
    160	if (should_deliver(to, skb)) {
    161		if (local_rcv)
    162			deliver_clone(to, skb, local_orig);
    163		else
    164			__br_forward(to, skb, local_orig);
    165		return;
    166	}
    167
    168out:
    169	if (!local_rcv)
    170		kfree_skb(skb);
    171}
    172EXPORT_SYMBOL_GPL(br_forward);
    173
    174static struct net_bridge_port *maybe_deliver(
    175	struct net_bridge_port *prev, struct net_bridge_port *p,
    176	struct sk_buff *skb, bool local_orig)
    177{
    178	u8 igmp_type = br_multicast_igmp_type(skb);
    179	int err;
    180
    181	if (!should_deliver(p, skb))
    182		return prev;
    183
    184	nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
    185
    186	if (!prev)
    187		goto out;
    188
    189	err = deliver_clone(prev, skb, local_orig);
    190	if (err)
    191		return ERR_PTR(err);
    192out:
    193	br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
    194
    195	return p;
    196}
    197
    198/* called under rcu_read_lock */
    199void br_flood(struct net_bridge *br, struct sk_buff *skb,
    200	      enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
    201{
    202	struct net_bridge_port *prev = NULL;
    203	struct net_bridge_port *p;
    204
    205	list_for_each_entry_rcu(p, &br->port_list, list) {
    206		/* Do not flood unicast traffic to ports that turn it off, nor
    207		 * other traffic if flood off, except for traffic we originate
    208		 */
    209		switch (pkt_type) {
    210		case BR_PKT_UNICAST:
    211			if (!(p->flags & BR_FLOOD))
    212				continue;
    213			break;
    214		case BR_PKT_MULTICAST:
    215			if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
    216				continue;
    217			break;
    218		case BR_PKT_BROADCAST:
    219			if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
    220				continue;
    221			break;
    222		}
    223
    224		/* Do not flood to ports that enable proxy ARP */
    225		if (p->flags & BR_PROXYARP)
    226			continue;
    227		if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
    228		    BR_INPUT_SKB_CB(skb)->proxyarp_replied)
    229			continue;
    230
    231		prev = maybe_deliver(prev, p, skb, local_orig);
    232		if (IS_ERR(prev))
    233			goto out;
    234	}
    235
    236	if (!prev)
    237		goto out;
    238
    239	if (local_rcv)
    240		deliver_clone(prev, skb, local_orig);
    241	else
    242		__br_forward(prev, skb, local_orig);
    243	return;
    244
    245out:
    246	if (!local_rcv)
    247		kfree_skb(skb);
    248}
    249
    250#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
    251static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
    252			       const unsigned char *addr, bool local_orig)
    253{
    254	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
    255	const unsigned char *src = eth_hdr(skb)->h_source;
    256
    257	if (!should_deliver(p, skb))
    258		return;
    259
    260	/* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
    261	if (skb->dev == p->dev && ether_addr_equal(src, addr))
    262		return;
    263
    264	skb = skb_copy(skb, GFP_ATOMIC);
    265	if (!skb) {
    266		dev->stats.tx_dropped++;
    267		return;
    268	}
    269
    270	if (!is_broadcast_ether_addr(addr))
    271		memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
    272
    273	__br_forward(p, skb, local_orig);
    274}
    275
    276/* called with rcu_read_lock */
    277void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
    278			struct sk_buff *skb,
    279			struct net_bridge_mcast *brmctx,
    280			bool local_rcv, bool local_orig)
    281{
    282	struct net_bridge_port *prev = NULL;
    283	struct net_bridge_port_group *p;
    284	bool allow_mode_include = true;
    285	struct hlist_node *rp;
    286
    287	rp = br_multicast_get_first_rport_node(brmctx, skb);
    288
    289	if (mdst) {
    290		p = rcu_dereference(mdst->ports);
    291		if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
    292		    br_multicast_is_star_g(&mdst->addr))
    293			allow_mode_include = false;
    294	} else {
    295		p = NULL;
    296	}
    297
    298	while (p || rp) {
    299		struct net_bridge_port *port, *lport, *rport;
    300
    301		lport = p ? p->key.port : NULL;
    302		rport = br_multicast_rport_from_node_skb(rp, skb);
    303
    304		if ((unsigned long)lport > (unsigned long)rport) {
    305			port = lport;
    306
    307			if (port->flags & BR_MULTICAST_TO_UNICAST) {
    308				maybe_deliver_addr(lport, skb, p->eth_addr,
    309						   local_orig);
    310				goto delivered;
    311			}
    312			if ((!allow_mode_include &&
    313			     p->filter_mode == MCAST_INCLUDE) ||
    314			    (p->flags & MDB_PG_FLAGS_BLOCKED))
    315				goto delivered;
    316		} else {
    317			port = rport;
    318		}
    319
    320		prev = maybe_deliver(prev, port, skb, local_orig);
    321		if (IS_ERR(prev))
    322			goto out;
    323delivered:
    324		if ((unsigned long)lport >= (unsigned long)port)
    325			p = rcu_dereference(p->next);
    326		if ((unsigned long)rport >= (unsigned long)port)
    327			rp = rcu_dereference(hlist_next_rcu(rp));
    328	}
    329
    330	if (!prev)
    331		goto out;
    332
    333	if (local_rcv)
    334		deliver_clone(prev, skb, local_orig);
    335	else
    336		__br_forward(prev, skb, local_orig);
    337	return;
    338
    339out:
    340	if (!local_rcv)
    341		kfree_skb(skb);
    342}
    343#endif