cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

macvlan.c (47768B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright (c) 2007 Patrick McHardy <kaber@trash.net>
      4 *
      5 * The code this is based on carried the following copyright notice:
      6 * ---
      7 * (C) Copyright 2001-2006
      8 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com
      9 * Re-worked by Ben Greear <greearb@candelatech.com>
     10 * ---
     11 */
     12#include <linux/kernel.h>
     13#include <linux/types.h>
     14#include <linux/module.h>
     15#include <linux/init.h>
     16#include <linux/errno.h>
     17#include <linux/slab.h>
     18#include <linux/string.h>
     19#include <linux/rculist.h>
     20#include <linux/notifier.h>
     21#include <linux/netdevice.h>
     22#include <linux/etherdevice.h>
     23#include <linux/net_tstamp.h>
     24#include <linux/ethtool.h>
     25#include <linux/if_arp.h>
     26#include <linux/if_vlan.h>
     27#include <linux/if_link.h>
     28#include <linux/if_macvlan.h>
     29#include <linux/hash.h>
     30#include <linux/workqueue.h>
     31#include <net/rtnetlink.h>
     32#include <net/xfrm.h>
     33#include <linux/netpoll.h>
     34#include <linux/phy.h>
     35
     36#define MACVLAN_HASH_BITS	8
     37#define MACVLAN_HASH_SIZE	(1<<MACVLAN_HASH_BITS)
     38#define MACVLAN_DEFAULT_BC_QUEUE_LEN	1000
     39
     40#define MACVLAN_F_PASSTHRU	1
     41#define MACVLAN_F_ADDRCHANGE	2
     42
     43struct macvlan_port {
     44	struct net_device	*dev;
     45	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
     46	struct list_head	vlans;
     47	struct sk_buff_head	bc_queue;
     48	struct work_struct	bc_work;
     49	u32			bc_queue_len_used;
     50	u32			flags;
     51	int			count;
     52	struct hlist_head	vlan_source_hash[MACVLAN_HASH_SIZE];
     53	DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
     54	unsigned char           perm_addr[ETH_ALEN];
     55};
     56
     57struct macvlan_source_entry {
     58	struct hlist_node	hlist;
     59	struct macvlan_dev	*vlan;
     60	unsigned char		addr[6+2] __aligned(sizeof(u16));
     61	struct rcu_head		rcu;
     62};
     63
     64struct macvlan_skb_cb {
     65	const struct macvlan_dev *src;
     66};
     67
     68#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
     69
     70static void macvlan_port_destroy(struct net_device *dev);
     71static void update_port_bc_queue_len(struct macvlan_port *port);
     72
     73static inline bool macvlan_passthru(const struct macvlan_port *port)
     74{
     75	return port->flags & MACVLAN_F_PASSTHRU;
     76}
     77
     78static inline void macvlan_set_passthru(struct macvlan_port *port)
     79{
     80	port->flags |= MACVLAN_F_PASSTHRU;
     81}
     82
     83static inline bool macvlan_addr_change(const struct macvlan_port *port)
     84{
     85	return port->flags & MACVLAN_F_ADDRCHANGE;
     86}
     87
     88static inline void macvlan_set_addr_change(struct macvlan_port *port)
     89{
     90	port->flags |= MACVLAN_F_ADDRCHANGE;
     91}
     92
     93static inline void macvlan_clear_addr_change(struct macvlan_port *port)
     94{
     95	port->flags &= ~MACVLAN_F_ADDRCHANGE;
     96}
     97
     98/* Hash Ethernet address */
     99static u32 macvlan_eth_hash(const unsigned char *addr)
    100{
    101	u64 value = get_unaligned((u64 *)addr);
    102
    103	/* only want 6 bytes */
    104#ifdef __BIG_ENDIAN
    105	value >>= 16;
    106#else
    107	value <<= 16;
    108#endif
    109	return hash_64(value, MACVLAN_HASH_BITS);
    110}
    111
    112static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
    113{
    114	return rcu_dereference(dev->rx_handler_data);
    115}
    116
    117static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
    118{
    119	return rtnl_dereference(dev->rx_handler_data);
    120}
    121
    122static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
    123					       const unsigned char *addr)
    124{
    125	struct macvlan_dev *vlan;
    126	u32 idx = macvlan_eth_hash(addr);
    127
    128	hlist_for_each_entry_rcu(vlan, &port->vlan_hash[idx], hlist,
    129				 lockdep_rtnl_is_held()) {
    130		if (ether_addr_equal_64bits(vlan->dev->dev_addr, addr))
    131			return vlan;
    132	}
    133	return NULL;
    134}
    135
    136static struct macvlan_source_entry *macvlan_hash_lookup_source(
    137	const struct macvlan_dev *vlan,
    138	const unsigned char *addr)
    139{
    140	struct macvlan_source_entry *entry;
    141	u32 idx = macvlan_eth_hash(addr);
    142	struct hlist_head *h = &vlan->port->vlan_source_hash[idx];
    143
    144	hlist_for_each_entry_rcu(entry, h, hlist) {
    145		if (ether_addr_equal_64bits(entry->addr, addr) &&
    146		    entry->vlan == vlan)
    147			return entry;
    148	}
    149	return NULL;
    150}
    151
    152static int macvlan_hash_add_source(struct macvlan_dev *vlan,
    153				   const unsigned char *addr)
    154{
    155	struct macvlan_port *port = vlan->port;
    156	struct macvlan_source_entry *entry;
    157	struct hlist_head *h;
    158
    159	entry = macvlan_hash_lookup_source(vlan, addr);
    160	if (entry)
    161		return 0;
    162
    163	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
    164	if (!entry)
    165		return -ENOMEM;
    166
    167	ether_addr_copy(entry->addr, addr);
    168	entry->vlan = vlan;
    169	h = &port->vlan_source_hash[macvlan_eth_hash(addr)];
    170	hlist_add_head_rcu(&entry->hlist, h);
    171	vlan->macaddr_count++;
    172
    173	return 0;
    174}
    175
    176static void macvlan_hash_add(struct macvlan_dev *vlan)
    177{
    178	struct macvlan_port *port = vlan->port;
    179	const unsigned char *addr = vlan->dev->dev_addr;
    180	u32 idx = macvlan_eth_hash(addr);
    181
    182	hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[idx]);
    183}
    184
    185static void macvlan_hash_del_source(struct macvlan_source_entry *entry)
    186{
    187	hlist_del_rcu(&entry->hlist);
    188	kfree_rcu(entry, rcu);
    189}
    190
    191static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync)
    192{
    193	hlist_del_rcu(&vlan->hlist);
    194	if (sync)
    195		synchronize_rcu();
    196}
    197
    198static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
    199					const unsigned char *addr)
    200{
    201	macvlan_hash_del(vlan, true);
    202	/* Now that we are unhashed it is safe to change the device
    203	 * address without confusing packet delivery.
    204	 */
    205	eth_hw_addr_set(vlan->dev, addr);
    206	macvlan_hash_add(vlan);
    207}
    208
    209static bool macvlan_addr_busy(const struct macvlan_port *port,
    210			      const unsigned char *addr)
    211{
    212	/* Test to see if the specified address is
    213	 * currently in use by the underlying device or
    214	 * another macvlan.
    215	 */
    216	if (!macvlan_passthru(port) && !macvlan_addr_change(port) &&
    217	    ether_addr_equal_64bits(port->dev->dev_addr, addr))
    218		return true;
    219
    220	if (macvlan_hash_lookup(port, addr))
    221		return true;
    222
    223	return false;
    224}
    225
    226
    227static int macvlan_broadcast_one(struct sk_buff *skb,
    228				 const struct macvlan_dev *vlan,
    229				 const struct ethhdr *eth, bool local)
    230{
    231	struct net_device *dev = vlan->dev;
    232
    233	if (local)
    234		return __dev_forward_skb(dev, skb);
    235
    236	skb->dev = dev;
    237	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
    238		skb->pkt_type = PACKET_BROADCAST;
    239	else
    240		skb->pkt_type = PACKET_MULTICAST;
    241
    242	return 0;
    243}
    244
    245static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
    246{
    247	return (u32)(((unsigned long)vlan) >> L1_CACHE_SHIFT);
    248}
    249
    250
    251static unsigned int mc_hash(const struct macvlan_dev *vlan,
    252			    const unsigned char *addr)
    253{
    254	u32 val = __get_unaligned_cpu32(addr + 2);
    255
    256	val ^= macvlan_hash_mix(vlan);
    257	return hash_32(val, MACVLAN_MC_FILTER_BITS);
    258}
    259
    260static void macvlan_broadcast(struct sk_buff *skb,
    261			      const struct macvlan_port *port,
    262			      struct net_device *src,
    263			      enum macvlan_mode mode)
    264{
    265	const struct ethhdr *eth = eth_hdr(skb);
    266	const struct macvlan_dev *vlan;
    267	struct sk_buff *nskb;
    268	unsigned int i;
    269	int err;
    270	unsigned int hash;
    271
    272	if (skb->protocol == htons(ETH_P_PAUSE))
    273		return;
    274
    275	hash_for_each_rcu(port->vlan_hash, i, vlan, hlist) {
    276		if (vlan->dev == src || !(vlan->mode & mode))
    277			continue;
    278
    279		hash = mc_hash(vlan, eth->h_dest);
    280		if (!test_bit(hash, vlan->mc_filter))
    281			continue;
    282
    283		err = NET_RX_DROP;
    284		nskb = skb_clone(skb, GFP_ATOMIC);
    285		if (likely(nskb))
    286			err = macvlan_broadcast_one(nskb, vlan, eth,
    287					mode == MACVLAN_MODE_BRIDGE) ?:
    288			      netif_rx(nskb);
    289		macvlan_count_rx(vlan, skb->len + ETH_HLEN,
    290				 err == NET_RX_SUCCESS, true);
    291	}
    292}
    293
    294static void macvlan_process_broadcast(struct work_struct *w)
    295{
    296	struct macvlan_port *port = container_of(w, struct macvlan_port,
    297						 bc_work);
    298	struct sk_buff *skb;
    299	struct sk_buff_head list;
    300
    301	__skb_queue_head_init(&list);
    302
    303	spin_lock_bh(&port->bc_queue.lock);
    304	skb_queue_splice_tail_init(&port->bc_queue, &list);
    305	spin_unlock_bh(&port->bc_queue.lock);
    306
    307	while ((skb = __skb_dequeue(&list))) {
    308		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
    309
    310		rcu_read_lock();
    311
    312		if (!src)
    313			/* frame comes from an external address */
    314			macvlan_broadcast(skb, port, NULL,
    315					  MACVLAN_MODE_PRIVATE |
    316					  MACVLAN_MODE_VEPA    |
    317					  MACVLAN_MODE_PASSTHRU|
    318					  MACVLAN_MODE_BRIDGE);
    319		else if (src->mode == MACVLAN_MODE_VEPA)
    320			/* flood to everyone except source */
    321			macvlan_broadcast(skb, port, src->dev,
    322					  MACVLAN_MODE_VEPA |
    323					  MACVLAN_MODE_BRIDGE);
    324		else
    325			/*
    326			 * flood only to VEPA ports, bridge ports
    327			 * already saw the frame on the way out.
    328			 */
    329			macvlan_broadcast(skb, port, src->dev,
    330					  MACVLAN_MODE_VEPA);
    331
    332		rcu_read_unlock();
    333
    334		if (src)
    335			dev_put(src->dev);
    336		consume_skb(skb);
    337
    338		cond_resched();
    339	}
    340}
    341
    342static void macvlan_broadcast_enqueue(struct macvlan_port *port,
    343				      const struct macvlan_dev *src,
    344				      struct sk_buff *skb)
    345{
    346	struct sk_buff *nskb;
    347	int err = -ENOMEM;
    348
    349	nskb = skb_clone(skb, GFP_ATOMIC);
    350	if (!nskb)
    351		goto err;
    352
    353	MACVLAN_SKB_CB(nskb)->src = src;
    354
    355	spin_lock(&port->bc_queue.lock);
    356	if (skb_queue_len(&port->bc_queue) < port->bc_queue_len_used) {
    357		if (src)
    358			dev_hold(src->dev);
    359		__skb_queue_tail(&port->bc_queue, nskb);
    360		err = 0;
    361	}
    362	spin_unlock(&port->bc_queue.lock);
    363
    364	schedule_work(&port->bc_work);
    365
    366	if (err)
    367		goto free_nskb;
    368
    369	return;
    370
    371free_nskb:
    372	kfree_skb(nskb);
    373err:
    374	dev_core_stats_rx_dropped_inc(skb->dev);
    375}
    376
    377static void macvlan_flush_sources(struct macvlan_port *port,
    378				  struct macvlan_dev *vlan)
    379{
    380	struct macvlan_source_entry *entry;
    381	struct hlist_node *next;
    382	int i;
    383
    384	hash_for_each_safe(port->vlan_source_hash, i, next, entry, hlist)
    385		if (entry->vlan == vlan)
    386			macvlan_hash_del_source(entry);
    387
    388	vlan->macaddr_count = 0;
    389}
    390
    391static void macvlan_forward_source_one(struct sk_buff *skb,
    392				       struct macvlan_dev *vlan)
    393{
    394	struct sk_buff *nskb;
    395	struct net_device *dev;
    396	int len;
    397	int ret;
    398
    399	dev = vlan->dev;
    400	if (unlikely(!(dev->flags & IFF_UP)))
    401		return;
    402
    403	nskb = skb_clone(skb, GFP_ATOMIC);
    404	if (!nskb)
    405		return;
    406
    407	len = nskb->len + ETH_HLEN;
    408	nskb->dev = dev;
    409
    410	if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
    411		nskb->pkt_type = PACKET_HOST;
    412
    413	ret = __netif_rx(nskb);
    414	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
    415}
    416
    417static bool macvlan_forward_source(struct sk_buff *skb,
    418				   struct macvlan_port *port,
    419				   const unsigned char *addr)
    420{
    421	struct macvlan_source_entry *entry;
    422	u32 idx = macvlan_eth_hash(addr);
    423	struct hlist_head *h = &port->vlan_source_hash[idx];
    424	bool consume = false;
    425
    426	hlist_for_each_entry_rcu(entry, h, hlist) {
    427		if (ether_addr_equal_64bits(entry->addr, addr)) {
    428			if (entry->vlan->flags & MACVLAN_FLAG_NODST)
    429				consume = true;
    430			macvlan_forward_source_one(skb, entry->vlan);
    431		}
    432	}
    433
    434	return consume;
    435}
    436
    437/* called under rcu_read_lock() from netif_receive_skb */
    438static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
    439{
    440	struct macvlan_port *port;
    441	struct sk_buff *skb = *pskb;
    442	const struct ethhdr *eth = eth_hdr(skb);
    443	const struct macvlan_dev *vlan;
    444	const struct macvlan_dev *src;
    445	struct net_device *dev;
    446	unsigned int len = 0;
    447	int ret;
    448	rx_handler_result_t handle_res;
    449
    450	/* Packets from dev_loopback_xmit() do not have L2 header, bail out */
    451	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
    452		return RX_HANDLER_PASS;
    453
    454	port = macvlan_port_get_rcu(skb->dev);
    455	if (is_multicast_ether_addr(eth->h_dest)) {
    456		unsigned int hash;
    457
    458		skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
    459		if (!skb)
    460			return RX_HANDLER_CONSUMED;
    461		*pskb = skb;
    462		eth = eth_hdr(skb);
    463		if (macvlan_forward_source(skb, port, eth->h_source)) {
    464			kfree_skb(skb);
    465			return RX_HANDLER_CONSUMED;
    466		}
    467		src = macvlan_hash_lookup(port, eth->h_source);
    468		if (src && src->mode != MACVLAN_MODE_VEPA &&
    469		    src->mode != MACVLAN_MODE_BRIDGE) {
    470			/* forward to original port. */
    471			vlan = src;
    472			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
    473			      __netif_rx(skb);
    474			handle_res = RX_HANDLER_CONSUMED;
    475			goto out;
    476		}
    477
    478		hash = mc_hash(NULL, eth->h_dest);
    479		if (test_bit(hash, port->mc_filter))
    480			macvlan_broadcast_enqueue(port, src, skb);
    481
    482		return RX_HANDLER_PASS;
    483	}
    484
    485	if (macvlan_forward_source(skb, port, eth->h_source)) {
    486		kfree_skb(skb);
    487		return RX_HANDLER_CONSUMED;
    488	}
    489	if (macvlan_passthru(port))
    490		vlan = list_first_or_null_rcu(&port->vlans,
    491					      struct macvlan_dev, list);
    492	else
    493		vlan = macvlan_hash_lookup(port, eth->h_dest);
    494	if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
    495		return RX_HANDLER_PASS;
    496
    497	dev = vlan->dev;
    498	if (unlikely(!(dev->flags & IFF_UP))) {
    499		kfree_skb(skb);
    500		return RX_HANDLER_CONSUMED;
    501	}
    502	len = skb->len + ETH_HLEN;
    503	skb = skb_share_check(skb, GFP_ATOMIC);
    504	if (!skb) {
    505		ret = NET_RX_DROP;
    506		handle_res = RX_HANDLER_CONSUMED;
    507		goto out;
    508	}
    509
    510	*pskb = skb;
    511	skb->dev = dev;
    512	skb->pkt_type = PACKET_HOST;
    513
    514	ret = NET_RX_SUCCESS;
    515	handle_res = RX_HANDLER_ANOTHER;
    516out:
    517	macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
    518	return handle_res;
    519}
    520
    521static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
    522{
    523	const struct macvlan_dev *vlan = netdev_priv(dev);
    524	const struct macvlan_port *port = vlan->port;
    525	const struct macvlan_dev *dest;
    526
    527	if (vlan->mode == MACVLAN_MODE_BRIDGE) {
    528		const struct ethhdr *eth = skb_eth_hdr(skb);
    529
    530		/* send to other bridge ports directly */
    531		if (is_multicast_ether_addr(eth->h_dest)) {
    532			skb_reset_mac_header(skb);
    533			macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
    534			goto xmit_world;
    535		}
    536
    537		dest = macvlan_hash_lookup(port, eth->h_dest);
    538		if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
    539			/* send to lowerdev first for its network taps */
    540			dev_forward_skb(vlan->lowerdev, skb);
    541
    542			return NET_XMIT_SUCCESS;
    543		}
    544	}
    545xmit_world:
    546	skb->dev = vlan->lowerdev;
    547	return dev_queue_xmit_accel(skb,
    548				    netdev_get_sb_channel(dev) ? dev : NULL);
    549}
    550
    551static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
    552{
    553#ifdef CONFIG_NET_POLL_CONTROLLER
    554	return netpoll_send_skb(vlan->netpoll, skb);
    555#else
    556	BUG();
    557	return NETDEV_TX_OK;
    558#endif
    559}
    560
    561static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
    562				      struct net_device *dev)
    563{
    564	struct macvlan_dev *vlan = netdev_priv(dev);
    565	unsigned int len = skb->len;
    566	int ret;
    567
    568	if (unlikely(netpoll_tx_running(dev)))
    569		return macvlan_netpoll_send_skb(vlan, skb);
    570
    571	ret = macvlan_queue_xmit(skb, dev);
    572
    573	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
    574		struct vlan_pcpu_stats *pcpu_stats;
    575
    576		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
    577		u64_stats_update_begin(&pcpu_stats->syncp);
    578		pcpu_stats->tx_packets++;
    579		pcpu_stats->tx_bytes += len;
    580		u64_stats_update_end(&pcpu_stats->syncp);
    581	} else {
    582		this_cpu_inc(vlan->pcpu_stats->tx_dropped);
    583	}
    584	return ret;
    585}
    586
    587static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
    588			       unsigned short type, const void *daddr,
    589			       const void *saddr, unsigned len)
    590{
    591	const struct macvlan_dev *vlan = netdev_priv(dev);
    592	struct net_device *lowerdev = vlan->lowerdev;
    593
    594	return dev_hard_header(skb, lowerdev, type, daddr,
    595			       saddr ? : dev->dev_addr, len);
    596}
    597
    598static const struct header_ops macvlan_hard_header_ops = {
    599	.create  	= macvlan_hard_header,
    600	.parse		= eth_header_parse,
    601	.cache		= eth_header_cache,
    602	.cache_update	= eth_header_cache_update,
    603};
    604
    605static int macvlan_open(struct net_device *dev)
    606{
    607	struct macvlan_dev *vlan = netdev_priv(dev);
    608	struct net_device *lowerdev = vlan->lowerdev;
    609	int err;
    610
    611	if (macvlan_passthru(vlan->port)) {
    612		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
    613			err = dev_set_promiscuity(lowerdev, 1);
    614			if (err < 0)
    615				goto out;
    616		}
    617		goto hash_add;
    618	}
    619
    620	err = -EADDRINUSE;
    621	if (macvlan_addr_busy(vlan->port, dev->dev_addr))
    622		goto out;
    623
    624	/* Attempt to populate accel_priv which is used to offload the L2
    625	 * forwarding requests for unicast packets.
    626	 */
    627	if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
    628		vlan->accel_priv =
    629		      lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
    630
    631	/* If earlier attempt to offload failed, or accel_priv is not
    632	 * populated we must add the unicast address to the lower device.
    633	 */
    634	if (IS_ERR_OR_NULL(vlan->accel_priv)) {
    635		vlan->accel_priv = NULL;
    636		err = dev_uc_add(lowerdev, dev->dev_addr);
    637		if (err < 0)
    638			goto out;
    639	}
    640
    641	if (dev->flags & IFF_ALLMULTI) {
    642		err = dev_set_allmulti(lowerdev, 1);
    643		if (err < 0)
    644			goto del_unicast;
    645	}
    646
    647	if (dev->flags & IFF_PROMISC) {
    648		err = dev_set_promiscuity(lowerdev, 1);
    649		if (err < 0)
    650			goto clear_multi;
    651	}
    652
    653hash_add:
    654	macvlan_hash_add(vlan);
    655	return 0;
    656
    657clear_multi:
    658	if (dev->flags & IFF_ALLMULTI)
    659		dev_set_allmulti(lowerdev, -1);
    660del_unicast:
    661	if (vlan->accel_priv) {
    662		lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
    663							   vlan->accel_priv);
    664		vlan->accel_priv = NULL;
    665	} else {
    666		dev_uc_del(lowerdev, dev->dev_addr);
    667	}
    668out:
    669	return err;
    670}
    671
    672static int macvlan_stop(struct net_device *dev)
    673{
    674	struct macvlan_dev *vlan = netdev_priv(dev);
    675	struct net_device *lowerdev = vlan->lowerdev;
    676
    677	if (vlan->accel_priv) {
    678		lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
    679							   vlan->accel_priv);
    680		vlan->accel_priv = NULL;
    681	}
    682
    683	dev_uc_unsync(lowerdev, dev);
    684	dev_mc_unsync(lowerdev, dev);
    685
    686	if (macvlan_passthru(vlan->port)) {
    687		if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
    688			dev_set_promiscuity(lowerdev, -1);
    689		goto hash_del;
    690	}
    691
    692	if (dev->flags & IFF_ALLMULTI)
    693		dev_set_allmulti(lowerdev, -1);
    694
    695	if (dev->flags & IFF_PROMISC)
    696		dev_set_promiscuity(lowerdev, -1);
    697
    698	dev_uc_del(lowerdev, dev->dev_addr);
    699
    700hash_del:
    701	macvlan_hash_del(vlan, !dev->dismantle);
    702	return 0;
    703}
    704
    705static int macvlan_sync_address(struct net_device *dev,
    706				const unsigned char *addr)
    707{
    708	struct macvlan_dev *vlan = netdev_priv(dev);
    709	struct net_device *lowerdev = vlan->lowerdev;
    710	struct macvlan_port *port = vlan->port;
    711	int err;
    712
    713	if (!(dev->flags & IFF_UP)) {
    714		/* Just copy in the new address */
    715		eth_hw_addr_set(dev, addr);
    716	} else {
    717		/* Rehash and update the device filters */
    718		if (macvlan_addr_busy(vlan->port, addr))
    719			return -EADDRINUSE;
    720
    721		if (!macvlan_passthru(port)) {
    722			err = dev_uc_add(lowerdev, addr);
    723			if (err)
    724				return err;
    725
    726			dev_uc_del(lowerdev, dev->dev_addr);
    727		}
    728
    729		macvlan_hash_change_addr(vlan, addr);
    730	}
    731	if (macvlan_passthru(port) && !macvlan_addr_change(port)) {
    732		/* Since addr_change isn't set, we are here due to lower
    733		 * device change.  Save the lower-dev address so we can
    734		 * restore it later.
    735		 */
    736		ether_addr_copy(vlan->port->perm_addr,
    737				lowerdev->dev_addr);
    738	}
    739	macvlan_clear_addr_change(port);
    740	return 0;
    741}
    742
    743static int macvlan_set_mac_address(struct net_device *dev, void *p)
    744{
    745	struct macvlan_dev *vlan = netdev_priv(dev);
    746	struct sockaddr *addr = p;
    747
    748	if (!is_valid_ether_addr(addr->sa_data))
    749		return -EADDRNOTAVAIL;
    750
    751	/* If the addresses are the same, this is a no-op */
    752	if (ether_addr_equal(dev->dev_addr, addr->sa_data))
    753		return 0;
    754
    755	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
    756		macvlan_set_addr_change(vlan->port);
    757		return dev_set_mac_address(vlan->lowerdev, addr, NULL);
    758	}
    759
    760	if (macvlan_addr_busy(vlan->port, addr->sa_data))
    761		return -EADDRINUSE;
    762
    763	return macvlan_sync_address(dev, addr->sa_data);
    764}
    765
    766static void macvlan_change_rx_flags(struct net_device *dev, int change)
    767{
    768	struct macvlan_dev *vlan = netdev_priv(dev);
    769	struct net_device *lowerdev = vlan->lowerdev;
    770
    771	if (dev->flags & IFF_UP) {
    772		if (change & IFF_ALLMULTI)
    773			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
    774		if (change & IFF_PROMISC)
    775			dev_set_promiscuity(lowerdev,
    776					    dev->flags & IFF_PROMISC ? 1 : -1);
    777
    778	}
    779}
    780
    781static void macvlan_compute_filter(unsigned long *mc_filter,
    782				   struct net_device *dev,
    783				   struct macvlan_dev *vlan)
    784{
    785	if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
    786		bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ);
    787	} else {
    788		struct netdev_hw_addr *ha;
    789		DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ);
    790
    791		bitmap_zero(filter, MACVLAN_MC_FILTER_SZ);
    792		netdev_for_each_mc_addr(ha, dev) {
    793			__set_bit(mc_hash(vlan, ha->addr), filter);
    794		}
    795
    796		__set_bit(mc_hash(vlan, dev->broadcast), filter);
    797
    798		bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ);
    799	}
    800}
    801
    802static void macvlan_set_mac_lists(struct net_device *dev)
    803{
    804	struct macvlan_dev *vlan = netdev_priv(dev);
    805
    806	macvlan_compute_filter(vlan->mc_filter, dev, vlan);
    807
    808	dev_uc_sync(vlan->lowerdev, dev);
    809	dev_mc_sync(vlan->lowerdev, dev);
    810
    811	/* This is slightly inaccurate as we're including the subscription
    812	 * list of vlan->lowerdev too.
    813	 *
    814	 * Bug alert: This only works if everyone has the same broadcast
    815	 * address as lowerdev.  As soon as someone changes theirs this
    816	 * will break.
    817	 *
    818	 * However, this is already broken as when you change your broadcast
    819	 * address we don't get called.
    820	 *
    821	 * The solution is to maintain a list of broadcast addresses like
    822	 * we do for uc/mc, if you care.
    823	 */
    824	macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL);
    825}
    826
    827static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
    828{
    829	struct macvlan_dev *vlan = netdev_priv(dev);
    830
    831	if (vlan->lowerdev->mtu < new_mtu)
    832		return -EINVAL;
    833	dev->mtu = new_mtu;
    834	return 0;
    835}
    836
    837static int macvlan_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
    838{
    839	struct net_device *real_dev = macvlan_dev_real_dev(dev);
    840	const struct net_device_ops *ops = real_dev->netdev_ops;
    841	struct ifreq ifrr;
    842	int err = -EOPNOTSUPP;
    843
    844	strscpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
    845	ifrr.ifr_ifru = ifr->ifr_ifru;
    846
    847	switch (cmd) {
    848	case SIOCSHWTSTAMP:
    849		if (!net_eq(dev_net(dev), &init_net))
    850			break;
    851		fallthrough;
    852	case SIOCGHWTSTAMP:
    853		if (netif_device_present(real_dev) && ops->ndo_eth_ioctl)
    854			err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
    855		break;
    856	}
    857
    858	if (!err)
    859		ifr->ifr_ifru = ifrr.ifr_ifru;
    860
    861	return err;
    862}
    863
    864/*
    865 * macvlan network devices have devices nesting below it and are a special
    866 * "super class" of normal network devices; split their locks off into a
    867 * separate class since they always nest.
    868 */
    869static struct lock_class_key macvlan_netdev_addr_lock_key;
    870
    871#define ALWAYS_ON_OFFLOADS \
    872	(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
    873	 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
    874
    875#define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX)
    876
    877#define MACVLAN_FEATURES \
    878	(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
    879	 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \
    880	 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
    881	 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
    882
    883#define MACVLAN_STATE_MASK \
    884	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
    885
    886static void macvlan_set_lockdep_class(struct net_device *dev)
    887{
    888	netdev_lockdep_set_classes(dev);
    889	lockdep_set_class(&dev->addr_list_lock,
    890			  &macvlan_netdev_addr_lock_key);
    891}
    892
    893static int macvlan_init(struct net_device *dev)
    894{
    895	struct macvlan_dev *vlan = netdev_priv(dev);
    896	struct net_device *lowerdev = vlan->lowerdev;
    897	struct macvlan_port *port = vlan->port;
    898
    899	dev->state		= (dev->state & ~MACVLAN_STATE_MASK) |
    900				  (lowerdev->state & MACVLAN_STATE_MASK);
    901	dev->features 		= lowerdev->features & MACVLAN_FEATURES;
    902	dev->features		|= ALWAYS_ON_FEATURES;
    903	dev->hw_features	|= NETIF_F_LRO;
    904	dev->vlan_features	= lowerdev->vlan_features & MACVLAN_FEATURES;
    905	dev->vlan_features	|= ALWAYS_ON_OFFLOADS;
    906	dev->hw_enc_features    |= dev->features;
    907	netif_inherit_tso_max(dev, lowerdev);
    908	dev->hard_header_len	= lowerdev->hard_header_len;
    909	macvlan_set_lockdep_class(dev);
    910
    911	vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
    912	if (!vlan->pcpu_stats)
    913		return -ENOMEM;
    914
    915	port->count += 1;
    916
    917	/* Get macvlan's reference to lowerdev */
    918	dev_hold_track(lowerdev, &vlan->dev_tracker, GFP_KERNEL);
    919
    920	return 0;
    921}
    922
    923static void macvlan_uninit(struct net_device *dev)
    924{
    925	struct macvlan_dev *vlan = netdev_priv(dev);
    926	struct macvlan_port *port = vlan->port;
    927
    928	free_percpu(vlan->pcpu_stats);
    929
    930	macvlan_flush_sources(port, vlan);
    931	port->count -= 1;
    932	if (!port->count)
    933		macvlan_port_destroy(port->dev);
    934}
    935
    936static void macvlan_dev_get_stats64(struct net_device *dev,
    937				    struct rtnl_link_stats64 *stats)
    938{
    939	struct macvlan_dev *vlan = netdev_priv(dev);
    940
    941	if (vlan->pcpu_stats) {
    942		struct vlan_pcpu_stats *p;
    943		u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
    944		u32 rx_errors = 0, tx_dropped = 0;
    945		unsigned int start;
    946		int i;
    947
    948		for_each_possible_cpu(i) {
    949			p = per_cpu_ptr(vlan->pcpu_stats, i);
    950			do {
    951				start = u64_stats_fetch_begin_irq(&p->syncp);
    952				rx_packets	= p->rx_packets;
    953				rx_bytes	= p->rx_bytes;
    954				rx_multicast	= p->rx_multicast;
    955				tx_packets	= p->tx_packets;
    956				tx_bytes	= p->tx_bytes;
    957			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
    958
    959			stats->rx_packets	+= rx_packets;
    960			stats->rx_bytes		+= rx_bytes;
    961			stats->multicast	+= rx_multicast;
    962			stats->tx_packets	+= tx_packets;
    963			stats->tx_bytes		+= tx_bytes;
    964			/* rx_errors & tx_dropped are u32, updated
    965			 * without syncp protection.
    966			 */
    967			rx_errors	+= p->rx_errors;
    968			tx_dropped	+= p->tx_dropped;
    969		}
    970		stats->rx_errors	= rx_errors;
    971		stats->rx_dropped	= rx_errors;
    972		stats->tx_dropped	= tx_dropped;
    973	}
    974}
    975
    976static int macvlan_vlan_rx_add_vid(struct net_device *dev,
    977				   __be16 proto, u16 vid)
    978{
    979	struct macvlan_dev *vlan = netdev_priv(dev);
    980	struct net_device *lowerdev = vlan->lowerdev;
    981
    982	return vlan_vid_add(lowerdev, proto, vid);
    983}
    984
    985static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
    986				    __be16 proto, u16 vid)
    987{
    988	struct macvlan_dev *vlan = netdev_priv(dev);
    989	struct net_device *lowerdev = vlan->lowerdev;
    990
    991	vlan_vid_del(lowerdev, proto, vid);
    992	return 0;
    993}
    994
    995static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
    996			   struct net_device *dev,
    997			   const unsigned char *addr, u16 vid,
    998			   u16 flags,
    999			   struct netlink_ext_ack *extack)
   1000{
   1001	struct macvlan_dev *vlan = netdev_priv(dev);
   1002	int err = -EINVAL;
   1003
   1004	/* Support unicast filter only on passthru devices.
   1005	 * Multicast filter should be allowed on all devices.
   1006	 */
   1007	if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
   1008		return -EOPNOTSUPP;
   1009
   1010	if (flags & NLM_F_REPLACE)
   1011		return -EOPNOTSUPP;
   1012
   1013	if (is_unicast_ether_addr(addr))
   1014		err = dev_uc_add_excl(dev, addr);
   1015	else if (is_multicast_ether_addr(addr))
   1016		err = dev_mc_add_excl(dev, addr);
   1017
   1018	return err;
   1019}
   1020
   1021static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
   1022			   struct net_device *dev,
   1023			   const unsigned char *addr, u16 vid,
   1024			   struct netlink_ext_ack *extack)
   1025{
   1026	struct macvlan_dev *vlan = netdev_priv(dev);
   1027	int err = -EINVAL;
   1028
   1029	/* Support unicast filter only on passthru devices.
   1030	 * Multicast filter should be allowed on all devices.
   1031	 */
   1032	if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr))
   1033		return -EOPNOTSUPP;
   1034
   1035	if (is_unicast_ether_addr(addr))
   1036		err = dev_uc_del(dev, addr);
   1037	else if (is_multicast_ether_addr(addr))
   1038		err = dev_mc_del(dev, addr);
   1039
   1040	return err;
   1041}
   1042
   1043static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
   1044					struct ethtool_drvinfo *drvinfo)
   1045{
   1046	strlcpy(drvinfo->driver, "macvlan", sizeof(drvinfo->driver));
   1047	strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version));
   1048}
   1049
   1050static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
   1051					      struct ethtool_link_ksettings *cmd)
   1052{
   1053	const struct macvlan_dev *vlan = netdev_priv(dev);
   1054
   1055	return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
   1056}
   1057
   1058static int macvlan_ethtool_get_ts_info(struct net_device *dev,
   1059				       struct ethtool_ts_info *info)
   1060{
   1061	struct net_device *real_dev = macvlan_dev_real_dev(dev);
   1062	const struct ethtool_ops *ops = real_dev->ethtool_ops;
   1063	struct phy_device *phydev = real_dev->phydev;
   1064
   1065	if (phy_has_tsinfo(phydev)) {
   1066		return phy_ts_info(phydev, info);
   1067	} else if (ops->get_ts_info) {
   1068		return ops->get_ts_info(real_dev, info);
   1069	} else {
   1070		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
   1071			SOF_TIMESTAMPING_SOFTWARE;
   1072		info->phc_index = -1;
   1073	}
   1074
   1075	return 0;
   1076}
   1077
   1078static netdev_features_t macvlan_fix_features(struct net_device *dev,
   1079					      netdev_features_t features)
   1080{
   1081	struct macvlan_dev *vlan = netdev_priv(dev);
   1082	netdev_features_t lowerdev_features = vlan->lowerdev->features;
   1083	netdev_features_t mask;
   1084
   1085	features |= NETIF_F_ALL_FOR_ALL;
   1086	features &= (vlan->set_features | ~MACVLAN_FEATURES);
   1087	mask = features;
   1088
   1089	lowerdev_features &= (features | ~NETIF_F_LRO);
   1090	features = netdev_increment_features(lowerdev_features, features, mask);
   1091	features |= ALWAYS_ON_FEATURES;
   1092	features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
   1093
   1094	return features;
   1095}
   1096
   1097#ifdef CONFIG_NET_POLL_CONTROLLER
   1098static void macvlan_dev_poll_controller(struct net_device *dev)
   1099{
   1100	return;
   1101}
   1102
   1103static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo)
   1104{
   1105	struct macvlan_dev *vlan = netdev_priv(dev);
   1106	struct net_device *real_dev = vlan->lowerdev;
   1107	struct netpoll *netpoll;
   1108	int err;
   1109
   1110	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
   1111	err = -ENOMEM;
   1112	if (!netpoll)
   1113		goto out;
   1114
   1115	err = __netpoll_setup(netpoll, real_dev);
   1116	if (err) {
   1117		kfree(netpoll);
   1118		goto out;
   1119	}
   1120
   1121	vlan->netpoll = netpoll;
   1122
   1123out:
   1124	return err;
   1125}
   1126
   1127static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
   1128{
   1129	struct macvlan_dev *vlan = netdev_priv(dev);
   1130	struct netpoll *netpoll = vlan->netpoll;
   1131
   1132	if (!netpoll)
   1133		return;
   1134
   1135	vlan->netpoll = NULL;
   1136
   1137	__netpoll_free(netpoll);
   1138}
   1139#endif	/* CONFIG_NET_POLL_CONTROLLER */
   1140
   1141static int macvlan_dev_get_iflink(const struct net_device *dev)
   1142{
   1143	struct macvlan_dev *vlan = netdev_priv(dev);
   1144
   1145	return vlan->lowerdev->ifindex;
   1146}
   1147
   1148static const struct ethtool_ops macvlan_ethtool_ops = {
   1149	.get_link		= ethtool_op_get_link,
   1150	.get_link_ksettings	= macvlan_ethtool_get_link_ksettings,
   1151	.get_drvinfo		= macvlan_ethtool_get_drvinfo,
   1152	.get_ts_info		= macvlan_ethtool_get_ts_info,
   1153};
   1154
   1155static const struct net_device_ops macvlan_netdev_ops = {
   1156	.ndo_init		= macvlan_init,
   1157	.ndo_uninit		= macvlan_uninit,
   1158	.ndo_open		= macvlan_open,
   1159	.ndo_stop		= macvlan_stop,
   1160	.ndo_start_xmit		= macvlan_start_xmit,
   1161	.ndo_change_mtu		= macvlan_change_mtu,
   1162	.ndo_eth_ioctl		= macvlan_eth_ioctl,
   1163	.ndo_fix_features	= macvlan_fix_features,
   1164	.ndo_change_rx_flags	= macvlan_change_rx_flags,
   1165	.ndo_set_mac_address	= macvlan_set_mac_address,
   1166	.ndo_set_rx_mode	= macvlan_set_mac_lists,
   1167	.ndo_get_stats64	= macvlan_dev_get_stats64,
   1168	.ndo_validate_addr	= eth_validate_addr,
   1169	.ndo_vlan_rx_add_vid	= macvlan_vlan_rx_add_vid,
   1170	.ndo_vlan_rx_kill_vid	= macvlan_vlan_rx_kill_vid,
   1171	.ndo_fdb_add		= macvlan_fdb_add,
   1172	.ndo_fdb_del		= macvlan_fdb_del,
   1173	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
   1174#ifdef CONFIG_NET_POLL_CONTROLLER
   1175	.ndo_poll_controller	= macvlan_dev_poll_controller,
   1176	.ndo_netpoll_setup	= macvlan_dev_netpoll_setup,
   1177	.ndo_netpoll_cleanup	= macvlan_dev_netpoll_cleanup,
   1178#endif
   1179	.ndo_get_iflink		= macvlan_dev_get_iflink,
   1180	.ndo_features_check	= passthru_features_check,
   1181};
   1182
   1183static void macvlan_dev_free(struct net_device *dev)
   1184{
   1185	struct macvlan_dev *vlan = netdev_priv(dev);
   1186
   1187	/* Get rid of the macvlan's reference to lowerdev */
   1188	dev_put_track(vlan->lowerdev, &vlan->dev_tracker);
   1189}
   1190
   1191void macvlan_common_setup(struct net_device *dev)
   1192{
   1193	ether_setup(dev);
   1194
   1195	dev->min_mtu		= 0;
   1196	dev->max_mtu		= ETH_MAX_MTU;
   1197	dev->priv_flags	       &= ~IFF_TX_SKB_SHARING;
   1198	netif_keep_dst(dev);
   1199	dev->priv_flags	       |= IFF_UNICAST_FLT | IFF_CHANGE_PROTO_DOWN;
   1200	dev->netdev_ops		= &macvlan_netdev_ops;
   1201	dev->needs_free_netdev	= true;
   1202	dev->priv_destructor	= macvlan_dev_free;
   1203	dev->header_ops		= &macvlan_hard_header_ops;
   1204	dev->ethtool_ops	= &macvlan_ethtool_ops;
   1205}
   1206EXPORT_SYMBOL_GPL(macvlan_common_setup);
   1207
   1208static void macvlan_setup(struct net_device *dev)
   1209{
   1210	macvlan_common_setup(dev);
   1211	dev->priv_flags |= IFF_NO_QUEUE;
   1212}
   1213
   1214static int macvlan_port_create(struct net_device *dev)
   1215{
   1216	struct macvlan_port *port;
   1217	unsigned int i;
   1218	int err;
   1219
   1220	if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
   1221		return -EINVAL;
   1222
   1223	if (netdev_is_rx_handler_busy(dev))
   1224		return -EBUSY;
   1225
   1226	port = kzalloc(sizeof(*port), GFP_KERNEL);
   1227	if (port == NULL)
   1228		return -ENOMEM;
   1229
   1230	port->dev = dev;
   1231	ether_addr_copy(port->perm_addr, dev->dev_addr);
   1232	INIT_LIST_HEAD(&port->vlans);
   1233	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
   1234		INIT_HLIST_HEAD(&port->vlan_hash[i]);
   1235	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
   1236		INIT_HLIST_HEAD(&port->vlan_source_hash[i]);
   1237
   1238	port->bc_queue_len_used = 0;
   1239	skb_queue_head_init(&port->bc_queue);
   1240	INIT_WORK(&port->bc_work, macvlan_process_broadcast);
   1241
   1242	err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
   1243	if (err)
   1244		kfree(port);
   1245	else
   1246		dev->priv_flags |= IFF_MACVLAN_PORT;
   1247	return err;
   1248}
   1249
   1250static void macvlan_port_destroy(struct net_device *dev)
   1251{
   1252	struct macvlan_port *port = macvlan_port_get_rtnl(dev);
   1253	struct sk_buff *skb;
   1254
   1255	dev->priv_flags &= ~IFF_MACVLAN_PORT;
   1256	netdev_rx_handler_unregister(dev);
   1257
   1258	/* After this point, no packet can schedule bc_work anymore,
   1259	 * but we need to cancel it and purge left skbs if any.
   1260	 */
   1261	cancel_work_sync(&port->bc_work);
   1262
   1263	while ((skb = __skb_dequeue(&port->bc_queue))) {
   1264		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
   1265
   1266		if (src)
   1267			dev_put(src->dev);
   1268
   1269		kfree_skb(skb);
   1270	}
   1271
   1272	/* If the lower device address has been changed by passthru
   1273	 * macvlan, put it back.
   1274	 */
   1275	if (macvlan_passthru(port) &&
   1276	    !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) {
   1277		struct sockaddr sa;
   1278
   1279		sa.sa_family = port->dev->type;
   1280		memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
   1281		dev_set_mac_address(port->dev, &sa, NULL);
   1282	}
   1283
   1284	kfree(port);
   1285}
   1286
   1287static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
   1288			    struct netlink_ext_ack *extack)
   1289{
   1290	struct nlattr *nla, *head;
   1291	int rem, len;
   1292
   1293	if (tb[IFLA_ADDRESS]) {
   1294		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
   1295			return -EINVAL;
   1296		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
   1297			return -EADDRNOTAVAIL;
   1298	}
   1299
   1300	if (!data)
   1301		return 0;
   1302
   1303	if (data[IFLA_MACVLAN_FLAGS] &&
   1304	    nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~(MACVLAN_FLAG_NOPROMISC |
   1305						      MACVLAN_FLAG_NODST))
   1306		return -EINVAL;
   1307
   1308	if (data[IFLA_MACVLAN_MODE]) {
   1309		switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
   1310		case MACVLAN_MODE_PRIVATE:
   1311		case MACVLAN_MODE_VEPA:
   1312		case MACVLAN_MODE_BRIDGE:
   1313		case MACVLAN_MODE_PASSTHRU:
   1314		case MACVLAN_MODE_SOURCE:
   1315			break;
   1316		default:
   1317			return -EINVAL;
   1318		}
   1319	}
   1320
   1321	if (data[IFLA_MACVLAN_MACADDR_MODE]) {
   1322		switch (nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE])) {
   1323		case MACVLAN_MACADDR_ADD:
   1324		case MACVLAN_MACADDR_DEL:
   1325		case MACVLAN_MACADDR_FLUSH:
   1326		case MACVLAN_MACADDR_SET:
   1327			break;
   1328		default:
   1329			return -EINVAL;
   1330		}
   1331	}
   1332
   1333	if (data[IFLA_MACVLAN_MACADDR]) {
   1334		if (nla_len(data[IFLA_MACVLAN_MACADDR]) != ETH_ALEN)
   1335			return -EINVAL;
   1336
   1337		if (!is_valid_ether_addr(nla_data(data[IFLA_MACVLAN_MACADDR])))
   1338			return -EADDRNOTAVAIL;
   1339	}
   1340
   1341	if (data[IFLA_MACVLAN_MACADDR_DATA]) {
   1342		head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
   1343		len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
   1344
   1345		nla_for_each_attr(nla, head, len, rem) {
   1346			if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
   1347			    nla_len(nla) != ETH_ALEN)
   1348				return -EINVAL;
   1349
   1350			if (!is_valid_ether_addr(nla_data(nla)))
   1351				return -EADDRNOTAVAIL;
   1352		}
   1353	}
   1354
   1355	if (data[IFLA_MACVLAN_MACADDR_COUNT])
   1356		return -EINVAL;
   1357
   1358	return 0;
   1359}
   1360
   1361/*
   1362 * reconfigure list of remote source mac address
   1363 * (only for macvlan devices in source mode)
   1364 * Note regarding alignment: all netlink data is aligned to 4 Byte, which
   1365 * suffices for both ether_addr_copy and ether_addr_equal_64bits usage.
   1366 */
   1367static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
   1368				      struct nlattr *data[])
   1369{
   1370	char *addr = NULL;
   1371	int ret, rem, len;
   1372	struct nlattr *nla, *head;
   1373	struct macvlan_source_entry *entry;
   1374
   1375	if (data[IFLA_MACVLAN_MACADDR])
   1376		addr = nla_data(data[IFLA_MACVLAN_MACADDR]);
   1377
   1378	if (mode == MACVLAN_MACADDR_ADD) {
   1379		if (!addr)
   1380			return -EINVAL;
   1381
   1382		return macvlan_hash_add_source(vlan, addr);
   1383
   1384	} else if (mode == MACVLAN_MACADDR_DEL) {
   1385		if (!addr)
   1386			return -EINVAL;
   1387
   1388		entry = macvlan_hash_lookup_source(vlan, addr);
   1389		if (entry) {
   1390			macvlan_hash_del_source(entry);
   1391			vlan->macaddr_count--;
   1392		}
   1393	} else if (mode == MACVLAN_MACADDR_FLUSH) {
   1394		macvlan_flush_sources(vlan->port, vlan);
   1395	} else if (mode == MACVLAN_MACADDR_SET) {
   1396		macvlan_flush_sources(vlan->port, vlan);
   1397
   1398		if (addr) {
   1399			ret = macvlan_hash_add_source(vlan, addr);
   1400			if (ret)
   1401				return ret;
   1402		}
   1403
   1404		if (!data[IFLA_MACVLAN_MACADDR_DATA])
   1405			return 0;
   1406
   1407		head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
   1408		len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
   1409
   1410		nla_for_each_attr(nla, head, len, rem) {
   1411			addr = nla_data(nla);
   1412			ret = macvlan_hash_add_source(vlan, addr);
   1413			if (ret)
   1414				return ret;
   1415		}
   1416	} else {
   1417		return -EINVAL;
   1418	}
   1419
   1420	return 0;
   1421}
   1422
   1423int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
   1424			   struct nlattr *tb[], struct nlattr *data[],
   1425			   struct netlink_ext_ack *extack)
   1426{
   1427	struct macvlan_dev *vlan = netdev_priv(dev);
   1428	struct macvlan_port *port;
   1429	struct net_device *lowerdev;
   1430	int err;
   1431	int macmode;
   1432	bool create = false;
   1433
   1434	if (!tb[IFLA_LINK])
   1435		return -EINVAL;
   1436
   1437	lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
   1438	if (lowerdev == NULL)
   1439		return -ENODEV;
   1440
   1441	/* When creating macvlans or macvtaps on top of other macvlans - use
   1442	 * the real device as the lowerdev.
   1443	 */
   1444	if (netif_is_macvlan(lowerdev))
   1445		lowerdev = macvlan_dev_real_dev(lowerdev);
   1446
   1447	if (!tb[IFLA_MTU])
   1448		dev->mtu = lowerdev->mtu;
   1449	else if (dev->mtu > lowerdev->mtu)
   1450		return -EINVAL;
   1451
   1452	/* MTU range: 68 - lowerdev->max_mtu */
   1453	dev->min_mtu = ETH_MIN_MTU;
   1454	dev->max_mtu = lowerdev->max_mtu;
   1455
   1456	if (!tb[IFLA_ADDRESS])
   1457		eth_hw_addr_random(dev);
   1458
   1459	if (!netif_is_macvlan_port(lowerdev)) {
   1460		err = macvlan_port_create(lowerdev);
   1461		if (err < 0)
   1462			return err;
   1463		create = true;
   1464	}
   1465	port = macvlan_port_get_rtnl(lowerdev);
   1466
   1467	/* Only 1 macvlan device can be created in passthru mode */
   1468	if (macvlan_passthru(port)) {
   1469		/* The macvlan port must be not created this time,
   1470		 * still goto destroy_macvlan_port for readability.
   1471		 */
   1472		err = -EINVAL;
   1473		goto destroy_macvlan_port;
   1474	}
   1475
   1476	vlan->lowerdev = lowerdev;
   1477	vlan->dev      = dev;
   1478	vlan->port     = port;
   1479	vlan->set_features = MACVLAN_FEATURES;
   1480
   1481	vlan->mode     = MACVLAN_MODE_VEPA;
   1482	if (data && data[IFLA_MACVLAN_MODE])
   1483		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
   1484
   1485	if (data && data[IFLA_MACVLAN_FLAGS])
   1486		vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
   1487
   1488	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
   1489		if (port->count) {
   1490			err = -EINVAL;
   1491			goto destroy_macvlan_port;
   1492		}
   1493		macvlan_set_passthru(port);
   1494		eth_hw_addr_inherit(dev, lowerdev);
   1495	}
   1496
   1497	if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
   1498		if (vlan->mode != MACVLAN_MODE_SOURCE) {
   1499			err = -EINVAL;
   1500			goto destroy_macvlan_port;
   1501		}
   1502		macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
   1503		err = macvlan_changelink_sources(vlan, macmode, data);
   1504		if (err)
   1505			goto destroy_macvlan_port;
   1506	}
   1507
   1508	vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN;
   1509	if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN])
   1510		vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]);
   1511
   1512	err = register_netdevice(dev);
   1513	if (err < 0)
   1514		goto destroy_macvlan_port;
   1515
   1516	dev->priv_flags |= IFF_MACVLAN;
   1517	err = netdev_upper_dev_link(lowerdev, dev, extack);
   1518	if (err)
   1519		goto unregister_netdev;
   1520
   1521	list_add_tail_rcu(&vlan->list, &port->vlans);
   1522	update_port_bc_queue_len(vlan->port);
   1523	netif_stacked_transfer_operstate(lowerdev, dev);
   1524	linkwatch_fire_event(dev);
   1525
   1526	return 0;
   1527
   1528unregister_netdev:
   1529	/* macvlan_uninit would free the macvlan port */
   1530	unregister_netdevice(dev);
   1531	return err;
   1532destroy_macvlan_port:
   1533	/* the macvlan port may be freed by macvlan_uninit when fail to register.
   1534	 * so we destroy the macvlan port only when it's valid.
   1535	 */
   1536	if (create && macvlan_port_get_rtnl(lowerdev))
   1537		macvlan_port_destroy(port->dev);
   1538	return err;
   1539}
   1540EXPORT_SYMBOL_GPL(macvlan_common_newlink);
   1541
   1542static int macvlan_newlink(struct net *src_net, struct net_device *dev,
   1543			   struct nlattr *tb[], struct nlattr *data[],
   1544			   struct netlink_ext_ack *extack)
   1545{
   1546	return macvlan_common_newlink(src_net, dev, tb, data, extack);
   1547}
   1548
   1549void macvlan_dellink(struct net_device *dev, struct list_head *head)
   1550{
   1551	struct macvlan_dev *vlan = netdev_priv(dev);
   1552
   1553	if (vlan->mode == MACVLAN_MODE_SOURCE)
   1554		macvlan_flush_sources(vlan->port, vlan);
   1555	list_del_rcu(&vlan->list);
   1556	update_port_bc_queue_len(vlan->port);
   1557	unregister_netdevice_queue(dev, head);
   1558	netdev_upper_dev_unlink(vlan->lowerdev, dev);
   1559}
   1560EXPORT_SYMBOL_GPL(macvlan_dellink);
   1561
   1562static int macvlan_changelink(struct net_device *dev,
   1563			      struct nlattr *tb[], struct nlattr *data[],
   1564			      struct netlink_ext_ack *extack)
   1565{
   1566	struct macvlan_dev *vlan = netdev_priv(dev);
   1567	enum macvlan_mode mode;
   1568	bool set_mode = false;
   1569	enum macvlan_macaddr_mode macmode;
   1570	int ret;
   1571
   1572	/* Validate mode, but don't set yet: setting flags may fail. */
   1573	if (data && data[IFLA_MACVLAN_MODE]) {
   1574		set_mode = true;
   1575		mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
   1576		/* Passthrough mode can't be set or cleared dynamically */
   1577		if ((mode == MACVLAN_MODE_PASSTHRU) !=
   1578		    (vlan->mode == MACVLAN_MODE_PASSTHRU))
   1579			return -EINVAL;
   1580		if (vlan->mode == MACVLAN_MODE_SOURCE &&
   1581		    vlan->mode != mode)
   1582			macvlan_flush_sources(vlan->port, vlan);
   1583	}
   1584
   1585	if (data && data[IFLA_MACVLAN_FLAGS]) {
   1586		__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
   1587		bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
   1588		if (macvlan_passthru(vlan->port) && promisc) {
   1589			int err;
   1590
   1591			if (flags & MACVLAN_FLAG_NOPROMISC)
   1592				err = dev_set_promiscuity(vlan->lowerdev, -1);
   1593			else
   1594				err = dev_set_promiscuity(vlan->lowerdev, 1);
   1595			if (err < 0)
   1596				return err;
   1597		}
   1598		vlan->flags = flags;
   1599	}
   1600
   1601	if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) {
   1602		vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]);
   1603		update_port_bc_queue_len(vlan->port);
   1604	}
   1605
   1606	if (set_mode)
   1607		vlan->mode = mode;
   1608	if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
   1609		if (vlan->mode != MACVLAN_MODE_SOURCE)
   1610			return -EINVAL;
   1611		macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]);
   1612		ret = macvlan_changelink_sources(vlan, macmode, data);
   1613		if (ret)
   1614			return ret;
   1615	}
   1616	return 0;
   1617}
   1618
   1619static size_t macvlan_get_size_mac(const struct macvlan_dev *vlan)
   1620{
   1621	if (vlan->macaddr_count == 0)
   1622		return 0;
   1623	return nla_total_size(0) /* IFLA_MACVLAN_MACADDR_DATA */
   1624		+ vlan->macaddr_count * nla_total_size(sizeof(u8) * ETH_ALEN);
   1625}
   1626
   1627static size_t macvlan_get_size(const struct net_device *dev)
   1628{
   1629	struct macvlan_dev *vlan = netdev_priv(dev);
   1630
   1631	return (0
   1632		+ nla_total_size(4) /* IFLA_MACVLAN_MODE */
   1633		+ nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
   1634		+ nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */
   1635		+ macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */
   1636		+ nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */
   1637		+ nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */
   1638		);
   1639}
   1640
   1641static int macvlan_fill_info_macaddr(struct sk_buff *skb,
   1642				     const struct macvlan_dev *vlan,
   1643				     const int i)
   1644{
   1645	struct hlist_head *h = &vlan->port->vlan_source_hash[i];
   1646	struct macvlan_source_entry *entry;
   1647
   1648	hlist_for_each_entry_rcu(entry, h, hlist) {
   1649		if (entry->vlan != vlan)
   1650			continue;
   1651		if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr))
   1652			return 1;
   1653	}
   1654	return 0;
   1655}
   1656
   1657static int macvlan_fill_info(struct sk_buff *skb,
   1658				const struct net_device *dev)
   1659{
   1660	struct macvlan_dev *vlan = netdev_priv(dev);
   1661	struct macvlan_port *port = vlan->port;
   1662	int i;
   1663	struct nlattr *nest;
   1664
   1665	if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
   1666		goto nla_put_failure;
   1667	if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
   1668		goto nla_put_failure;
   1669	if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count))
   1670		goto nla_put_failure;
   1671	if (vlan->macaddr_count > 0) {
   1672		nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA);
   1673		if (nest == NULL)
   1674			goto nla_put_failure;
   1675
   1676		for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
   1677			if (macvlan_fill_info_macaddr(skb, vlan, i))
   1678				goto nla_put_failure;
   1679		}
   1680		nla_nest_end(skb, nest);
   1681	}
   1682	if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req))
   1683		goto nla_put_failure;
   1684	if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used))
   1685		goto nla_put_failure;
   1686	return 0;
   1687
   1688nla_put_failure:
   1689	return -EMSGSIZE;
   1690}
   1691
   1692static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
   1693	[IFLA_MACVLAN_MODE]  = { .type = NLA_U32 },
   1694	[IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
   1695	[IFLA_MACVLAN_MACADDR_MODE] = { .type = NLA_U32 },
   1696	[IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
   1697	[IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED },
   1698	[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
   1699	[IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 },
   1700	[IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT },
   1701};
   1702
   1703int macvlan_link_register(struct rtnl_link_ops *ops)
   1704{
   1705	/* common fields */
   1706	ops->validate		= macvlan_validate;
   1707	ops->maxtype		= IFLA_MACVLAN_MAX;
   1708	ops->policy		= macvlan_policy;
   1709	ops->changelink		= macvlan_changelink;
   1710	ops->get_size		= macvlan_get_size;
   1711	ops->fill_info		= macvlan_fill_info;
   1712
   1713	return rtnl_link_register(ops);
   1714};
   1715EXPORT_SYMBOL_GPL(macvlan_link_register);
   1716
   1717static struct net *macvlan_get_link_net(const struct net_device *dev)
   1718{
   1719	return dev_net(macvlan_dev_real_dev(dev));
   1720}
   1721
   1722static struct rtnl_link_ops macvlan_link_ops = {
   1723	.kind		= "macvlan",
   1724	.setup		= macvlan_setup,
   1725	.newlink	= macvlan_newlink,
   1726	.dellink	= macvlan_dellink,
   1727	.get_link_net	= macvlan_get_link_net,
   1728	.priv_size      = sizeof(struct macvlan_dev),
   1729};
   1730
   1731static void update_port_bc_queue_len(struct macvlan_port *port)
   1732{
   1733	u32 max_bc_queue_len_req = 0;
   1734	struct macvlan_dev *vlan;
   1735
   1736	list_for_each_entry(vlan, &port->vlans, list) {
   1737		if (vlan->bc_queue_len_req > max_bc_queue_len_req)
   1738			max_bc_queue_len_req = vlan->bc_queue_len_req;
   1739	}
   1740	port->bc_queue_len_used = max_bc_queue_len_req;
   1741}
   1742
   1743static int macvlan_device_event(struct notifier_block *unused,
   1744				unsigned long event, void *ptr)
   1745{
   1746	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   1747	struct macvlan_dev *vlan, *next;
   1748	struct macvlan_port *port;
   1749	LIST_HEAD(list_kill);
   1750
   1751	if (!netif_is_macvlan_port(dev))
   1752		return NOTIFY_DONE;
   1753
   1754	port = macvlan_port_get_rtnl(dev);
   1755
   1756	switch (event) {
   1757	case NETDEV_UP:
   1758	case NETDEV_DOWN:
   1759	case NETDEV_CHANGE:
   1760		list_for_each_entry(vlan, &port->vlans, list)
   1761			netif_stacked_transfer_operstate(vlan->lowerdev,
   1762							 vlan->dev);
   1763		break;
   1764	case NETDEV_FEAT_CHANGE:
   1765		list_for_each_entry(vlan, &port->vlans, list) {
   1766			netif_inherit_tso_max(vlan->dev, dev);
   1767			netdev_update_features(vlan->dev);
   1768		}
   1769		break;
   1770	case NETDEV_CHANGEMTU:
   1771		list_for_each_entry(vlan, &port->vlans, list) {
   1772			if (vlan->dev->mtu <= dev->mtu)
   1773				continue;
   1774			dev_set_mtu(vlan->dev, dev->mtu);
   1775		}
   1776		break;
   1777	case NETDEV_CHANGEADDR:
   1778		if (!macvlan_passthru(port))
   1779			return NOTIFY_DONE;
   1780
   1781		vlan = list_first_entry_or_null(&port->vlans,
   1782						struct macvlan_dev,
   1783						list);
   1784
   1785		if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
   1786			return NOTIFY_BAD;
   1787
   1788		break;
   1789	case NETDEV_UNREGISTER:
   1790		/* twiddle thumbs on netns device moves */
   1791		if (dev->reg_state != NETREG_UNREGISTERING)
   1792			break;
   1793
   1794		list_for_each_entry_safe(vlan, next, &port->vlans, list)
   1795			vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
   1796		unregister_netdevice_many(&list_kill);
   1797		break;
   1798	case NETDEV_PRE_TYPE_CHANGE:
   1799		/* Forbid underlying device to change its type. */
   1800		return NOTIFY_BAD;
   1801
   1802	case NETDEV_NOTIFY_PEERS:
   1803	case NETDEV_BONDING_FAILOVER:
   1804	case NETDEV_RESEND_IGMP:
   1805		/* Propagate to all vlans */
   1806		list_for_each_entry(vlan, &port->vlans, list)
   1807			call_netdevice_notifiers(event, vlan->dev);
   1808	}
   1809	return NOTIFY_DONE;
   1810}
   1811
   1812static struct notifier_block macvlan_notifier_block __read_mostly = {
   1813	.notifier_call	= macvlan_device_event,
   1814};
   1815
   1816static int __init macvlan_init_module(void)
   1817{
   1818	int err;
   1819
   1820	register_netdevice_notifier(&macvlan_notifier_block);
   1821
   1822	err = macvlan_link_register(&macvlan_link_ops);
   1823	if (err < 0)
   1824		goto err1;
   1825	return 0;
   1826err1:
   1827	unregister_netdevice_notifier(&macvlan_notifier_block);
   1828	return err;
   1829}
   1830
   1831static void __exit macvlan_cleanup_module(void)
   1832{
   1833	rtnl_link_unregister(&macvlan_link_ops);
   1834	unregister_netdevice_notifier(&macvlan_notifier_block);
   1835}
   1836
   1837module_init(macvlan_init_module);
   1838module_exit(macvlan_cleanup_module);
   1839
   1840MODULE_LICENSE("GPL");
   1841MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
   1842MODULE_DESCRIPTION("Driver for MAC address based VLANs");
   1843MODULE_ALIAS_RTNL_LINK("macvlan");