cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

team.c (74674B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * drivers/net/team/team.c - Network team device driver
      4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
      5 */
      6
      7#include <linux/ethtool.h>
      8#include <linux/kernel.h>
      9#include <linux/types.h>
     10#include <linux/module.h>
     11#include <linux/init.h>
     12#include <linux/slab.h>
     13#include <linux/rcupdate.h>
     14#include <linux/errno.h>
     15#include <linux/ctype.h>
     16#include <linux/notifier.h>
     17#include <linux/netdevice.h>
     18#include <linux/netpoll.h>
     19#include <linux/if_vlan.h>
     20#include <linux/if_arp.h>
     21#include <linux/socket.h>
     22#include <linux/etherdevice.h>
     23#include <linux/rtnetlink.h>
     24#include <net/rtnetlink.h>
     25#include <net/genetlink.h>
     26#include <net/netlink.h>
     27#include <net/sch_generic.h>
     28#include <generated/utsrelease.h>
     29#include <linux/if_team.h>
     30
     31#define DRV_NAME "team"
     32
     33
     34/**********
     35 * Helpers
     36 **********/
     37
     38static struct team_port *team_port_get_rtnl(const struct net_device *dev)
     39{
     40	struct team_port *port = rtnl_dereference(dev->rx_handler_data);
     41
     42	return netif_is_team_port(dev) ? port : NULL;
     43}
     44
     45/*
     46 * Since the ability to change device address for open port device is tested in
     47 * team_port_add, this function can be called without control of return value
     48 */
     49static int __set_port_dev_addr(struct net_device *port_dev,
     50			       const unsigned char *dev_addr)
     51{
     52	struct sockaddr_storage addr;
     53
     54	memcpy(addr.__data, dev_addr, port_dev->addr_len);
     55	addr.ss_family = port_dev->type;
     56	return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
     57}
     58
     59static int team_port_set_orig_dev_addr(struct team_port *port)
     60{
     61	return __set_port_dev_addr(port->dev, port->orig.dev_addr);
     62}
     63
     64static int team_port_set_team_dev_addr(struct team *team,
     65				       struct team_port *port)
     66{
     67	return __set_port_dev_addr(port->dev, team->dev->dev_addr);
     68}
     69
     70int team_modeop_port_enter(struct team *team, struct team_port *port)
     71{
     72	return team_port_set_team_dev_addr(team, port);
     73}
     74EXPORT_SYMBOL(team_modeop_port_enter);
     75
     76void team_modeop_port_change_dev_addr(struct team *team,
     77				      struct team_port *port)
     78{
     79	team_port_set_team_dev_addr(team, port);
     80}
     81EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
     82
     83static void team_lower_state_changed(struct team_port *port)
     84{
     85	struct netdev_lag_lower_state_info info;
     86
     87	info.link_up = port->linkup;
     88	info.tx_enabled = team_port_enabled(port);
     89	netdev_lower_state_changed(port->dev, &info);
     90}
     91
     92static void team_refresh_port_linkup(struct team_port *port)
     93{
     94	bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
     95						      port->state.linkup;
     96
     97	if (port->linkup != new_linkup) {
     98		port->linkup = new_linkup;
     99		team_lower_state_changed(port);
    100	}
    101}
    102
    103
    104/*******************
    105 * Options handling
    106 *******************/
    107
    108struct team_option_inst { /* One for each option instance */
    109	struct list_head list;
    110	struct list_head tmp_list;
    111	struct team_option *option;
    112	struct team_option_inst_info info;
    113	bool changed;
    114	bool removed;
    115};
    116
    117static struct team_option *__team_find_option(struct team *team,
    118					      const char *opt_name)
    119{
    120	struct team_option *option;
    121
    122	list_for_each_entry(option, &team->option_list, list) {
    123		if (strcmp(option->name, opt_name) == 0)
    124			return option;
    125	}
    126	return NULL;
    127}
    128
    129static void __team_option_inst_del(struct team_option_inst *opt_inst)
    130{
    131	list_del(&opt_inst->list);
    132	kfree(opt_inst);
    133}
    134
    135static void __team_option_inst_del_option(struct team *team,
    136					  struct team_option *option)
    137{
    138	struct team_option_inst *opt_inst, *tmp;
    139
    140	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
    141		if (opt_inst->option == option)
    142			__team_option_inst_del(opt_inst);
    143	}
    144}
    145
    146static int __team_option_inst_add(struct team *team, struct team_option *option,
    147				  struct team_port *port)
    148{
    149	struct team_option_inst *opt_inst;
    150	unsigned int array_size;
    151	unsigned int i;
    152	int err;
    153
    154	array_size = option->array_size;
    155	if (!array_size)
    156		array_size = 1; /* No array but still need one instance */
    157
    158	for (i = 0; i < array_size; i++) {
    159		opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
    160		if (!opt_inst)
    161			return -ENOMEM;
    162		opt_inst->option = option;
    163		opt_inst->info.port = port;
    164		opt_inst->info.array_index = i;
    165		opt_inst->changed = true;
    166		opt_inst->removed = false;
    167		list_add_tail(&opt_inst->list, &team->option_inst_list);
    168		if (option->init) {
    169			err = option->init(team, &opt_inst->info);
    170			if (err)
    171				return err;
    172		}
    173
    174	}
    175	return 0;
    176}
    177
    178static int __team_option_inst_add_option(struct team *team,
    179					 struct team_option *option)
    180{
    181	int err;
    182
    183	if (!option->per_port) {
    184		err = __team_option_inst_add(team, option, NULL);
    185		if (err)
    186			goto inst_del_option;
    187	}
    188	return 0;
    189
    190inst_del_option:
    191	__team_option_inst_del_option(team, option);
    192	return err;
    193}
    194
    195static void __team_option_inst_mark_removed_option(struct team *team,
    196						   struct team_option *option)
    197{
    198	struct team_option_inst *opt_inst;
    199
    200	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
    201		if (opt_inst->option == option) {
    202			opt_inst->changed = true;
    203			opt_inst->removed = true;
    204		}
    205	}
    206}
    207
    208static void __team_option_inst_del_port(struct team *team,
    209					struct team_port *port)
    210{
    211	struct team_option_inst *opt_inst, *tmp;
    212
    213	list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
    214		if (opt_inst->option->per_port &&
    215		    opt_inst->info.port == port)
    216			__team_option_inst_del(opt_inst);
    217	}
    218}
    219
    220static int __team_option_inst_add_port(struct team *team,
    221				       struct team_port *port)
    222{
    223	struct team_option *option;
    224	int err;
    225
    226	list_for_each_entry(option, &team->option_list, list) {
    227		if (!option->per_port)
    228			continue;
    229		err = __team_option_inst_add(team, option, port);
    230		if (err)
    231			goto inst_del_port;
    232	}
    233	return 0;
    234
    235inst_del_port:
    236	__team_option_inst_del_port(team, port);
    237	return err;
    238}
    239
    240static void __team_option_inst_mark_removed_port(struct team *team,
    241						 struct team_port *port)
    242{
    243	struct team_option_inst *opt_inst;
    244
    245	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
    246		if (opt_inst->info.port == port) {
    247			opt_inst->changed = true;
    248			opt_inst->removed = true;
    249		}
    250	}
    251}
    252
    253static int __team_options_register(struct team *team,
    254				   const struct team_option *option,
    255				   size_t option_count)
    256{
    257	int i;
    258	struct team_option **dst_opts;
    259	int err;
    260
    261	dst_opts = kcalloc(option_count, sizeof(struct team_option *),
    262			   GFP_KERNEL);
    263	if (!dst_opts)
    264		return -ENOMEM;
    265	for (i = 0; i < option_count; i++, option++) {
    266		if (__team_find_option(team, option->name)) {
    267			err = -EEXIST;
    268			goto alloc_rollback;
    269		}
    270		dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
    271		if (!dst_opts[i]) {
    272			err = -ENOMEM;
    273			goto alloc_rollback;
    274		}
    275	}
    276
    277	for (i = 0; i < option_count; i++) {
    278		err = __team_option_inst_add_option(team, dst_opts[i]);
    279		if (err)
    280			goto inst_rollback;
    281		list_add_tail(&dst_opts[i]->list, &team->option_list);
    282	}
    283
    284	kfree(dst_opts);
    285	return 0;
    286
    287inst_rollback:
    288	for (i--; i >= 0; i--)
    289		__team_option_inst_del_option(team, dst_opts[i]);
    290
    291	i = option_count;
    292alloc_rollback:
    293	for (i--; i >= 0; i--)
    294		kfree(dst_opts[i]);
    295
    296	kfree(dst_opts);
    297	return err;
    298}
    299
    300static void __team_options_mark_removed(struct team *team,
    301					const struct team_option *option,
    302					size_t option_count)
    303{
    304	int i;
    305
    306	for (i = 0; i < option_count; i++, option++) {
    307		struct team_option *del_opt;
    308
    309		del_opt = __team_find_option(team, option->name);
    310		if (del_opt)
    311			__team_option_inst_mark_removed_option(team, del_opt);
    312	}
    313}
    314
    315static void __team_options_unregister(struct team *team,
    316				      const struct team_option *option,
    317				      size_t option_count)
    318{
    319	int i;
    320
    321	for (i = 0; i < option_count; i++, option++) {
    322		struct team_option *del_opt;
    323
    324		del_opt = __team_find_option(team, option->name);
    325		if (del_opt) {
    326			__team_option_inst_del_option(team, del_opt);
    327			list_del(&del_opt->list);
    328			kfree(del_opt);
    329		}
    330	}
    331}
    332
    333static void __team_options_change_check(struct team *team);
    334
    335int team_options_register(struct team *team,
    336			  const struct team_option *option,
    337			  size_t option_count)
    338{
    339	int err;
    340
    341	err = __team_options_register(team, option, option_count);
    342	if (err)
    343		return err;
    344	__team_options_change_check(team);
    345	return 0;
    346}
    347EXPORT_SYMBOL(team_options_register);
    348
    349void team_options_unregister(struct team *team,
    350			     const struct team_option *option,
    351			     size_t option_count)
    352{
    353	__team_options_mark_removed(team, option, option_count);
    354	__team_options_change_check(team);
    355	__team_options_unregister(team, option, option_count);
    356}
    357EXPORT_SYMBOL(team_options_unregister);
    358
    359static int team_option_get(struct team *team,
    360			   struct team_option_inst *opt_inst,
    361			   struct team_gsetter_ctx *ctx)
    362{
    363	if (!opt_inst->option->getter)
    364		return -EOPNOTSUPP;
    365	return opt_inst->option->getter(team, ctx);
    366}
    367
    368static int team_option_set(struct team *team,
    369			   struct team_option_inst *opt_inst,
    370			   struct team_gsetter_ctx *ctx)
    371{
    372	if (!opt_inst->option->setter)
    373		return -EOPNOTSUPP;
    374	return opt_inst->option->setter(team, ctx);
    375}
    376
    377void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
    378{
    379	struct team_option_inst *opt_inst;
    380
    381	opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
    382	opt_inst->changed = true;
    383}
    384EXPORT_SYMBOL(team_option_inst_set_change);
    385
    386void team_options_change_check(struct team *team)
    387{
    388	__team_options_change_check(team);
    389}
    390EXPORT_SYMBOL(team_options_change_check);
    391
    392
    393/****************
    394 * Mode handling
    395 ****************/
    396
    397static LIST_HEAD(mode_list);
    398static DEFINE_SPINLOCK(mode_list_lock);
    399
    400struct team_mode_item {
    401	struct list_head list;
    402	const struct team_mode *mode;
    403};
    404
    405static struct team_mode_item *__find_mode(const char *kind)
    406{
    407	struct team_mode_item *mitem;
    408
    409	list_for_each_entry(mitem, &mode_list, list) {
    410		if (strcmp(mitem->mode->kind, kind) == 0)
    411			return mitem;
    412	}
    413	return NULL;
    414}
    415
    416static bool is_good_mode_name(const char *name)
    417{
    418	while (*name != '\0') {
    419		if (!isalpha(*name) && !isdigit(*name) && *name != '_')
    420			return false;
    421		name++;
    422	}
    423	return true;
    424}
    425
    426int team_mode_register(const struct team_mode *mode)
    427{
    428	int err = 0;
    429	struct team_mode_item *mitem;
    430
    431	if (!is_good_mode_name(mode->kind) ||
    432	    mode->priv_size > TEAM_MODE_PRIV_SIZE)
    433		return -EINVAL;
    434
    435	mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
    436	if (!mitem)
    437		return -ENOMEM;
    438
    439	spin_lock(&mode_list_lock);
    440	if (__find_mode(mode->kind)) {
    441		err = -EEXIST;
    442		kfree(mitem);
    443		goto unlock;
    444	}
    445	mitem->mode = mode;
    446	list_add_tail(&mitem->list, &mode_list);
    447unlock:
    448	spin_unlock(&mode_list_lock);
    449	return err;
    450}
    451EXPORT_SYMBOL(team_mode_register);
    452
    453void team_mode_unregister(const struct team_mode *mode)
    454{
    455	struct team_mode_item *mitem;
    456
    457	spin_lock(&mode_list_lock);
    458	mitem = __find_mode(mode->kind);
    459	if (mitem) {
    460		list_del_init(&mitem->list);
    461		kfree(mitem);
    462	}
    463	spin_unlock(&mode_list_lock);
    464}
    465EXPORT_SYMBOL(team_mode_unregister);
    466
    467static const struct team_mode *team_mode_get(const char *kind)
    468{
    469	struct team_mode_item *mitem;
    470	const struct team_mode *mode = NULL;
    471
    472	if (!try_module_get(THIS_MODULE))
    473		return NULL;
    474
    475	spin_lock(&mode_list_lock);
    476	mitem = __find_mode(kind);
    477	if (!mitem) {
    478		spin_unlock(&mode_list_lock);
    479		request_module("team-mode-%s", kind);
    480		spin_lock(&mode_list_lock);
    481		mitem = __find_mode(kind);
    482	}
    483	if (mitem) {
    484		mode = mitem->mode;
    485		if (!try_module_get(mode->owner))
    486			mode = NULL;
    487	}
    488
    489	spin_unlock(&mode_list_lock);
    490	module_put(THIS_MODULE);
    491	return mode;
    492}
    493
    494static void team_mode_put(const struct team_mode *mode)
    495{
    496	module_put(mode->owner);
    497}
    498
    499static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
    500{
    501	dev_kfree_skb_any(skb);
    502	return false;
    503}
    504
    505static rx_handler_result_t team_dummy_receive(struct team *team,
    506					      struct team_port *port,
    507					      struct sk_buff *skb)
    508{
    509	return RX_HANDLER_ANOTHER;
    510}
    511
    512static const struct team_mode __team_no_mode = {
    513	.kind		= "*NOMODE*",
    514};
    515
    516static bool team_is_mode_set(struct team *team)
    517{
    518	return team->mode != &__team_no_mode;
    519}
    520
    521static void team_set_no_mode(struct team *team)
    522{
    523	team->user_carrier_enabled = false;
    524	team->mode = &__team_no_mode;
    525}
    526
    527static void team_adjust_ops(struct team *team)
    528{
    529	/*
    530	 * To avoid checks in rx/tx skb paths, ensure here that non-null and
    531	 * correct ops are always set.
    532	 */
    533
    534	if (!team->en_port_count || !team_is_mode_set(team) ||
    535	    !team->mode->ops->transmit)
    536		team->ops.transmit = team_dummy_transmit;
    537	else
    538		team->ops.transmit = team->mode->ops->transmit;
    539
    540	if (!team->en_port_count || !team_is_mode_set(team) ||
    541	    !team->mode->ops->receive)
    542		team->ops.receive = team_dummy_receive;
    543	else
    544		team->ops.receive = team->mode->ops->receive;
    545}
    546
    547/*
    548 * We can benefit from the fact that it's ensured no port is present
    549 * at the time of mode change. Therefore no packets are in fly so there's no
    550 * need to set mode operations in any special way.
    551 */
    552static int __team_change_mode(struct team *team,
    553			      const struct team_mode *new_mode)
    554{
    555	/* Check if mode was previously set and do cleanup if so */
    556	if (team_is_mode_set(team)) {
    557		void (*exit_op)(struct team *team) = team->ops.exit;
    558
    559		/* Clear ops area so no callback is called any longer */
    560		memset(&team->ops, 0, sizeof(struct team_mode_ops));
    561		team_adjust_ops(team);
    562
    563		if (exit_op)
    564			exit_op(team);
    565		team_mode_put(team->mode);
    566		team_set_no_mode(team);
    567		/* zero private data area */
    568		memset(&team->mode_priv, 0,
    569		       sizeof(struct team) - offsetof(struct team, mode_priv));
    570	}
    571
    572	if (!new_mode)
    573		return 0;
    574
    575	if (new_mode->ops->init) {
    576		int err;
    577
    578		err = new_mode->ops->init(team);
    579		if (err)
    580			return err;
    581	}
    582
    583	team->mode = new_mode;
    584	memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
    585	team_adjust_ops(team);
    586
    587	return 0;
    588}
    589
    590static int team_change_mode(struct team *team, const char *kind)
    591{
    592	const struct team_mode *new_mode;
    593	struct net_device *dev = team->dev;
    594	int err;
    595
    596	if (!list_empty(&team->port_list)) {
    597		netdev_err(dev, "No ports can be present during mode change\n");
    598		return -EBUSY;
    599	}
    600
    601	if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
    602		netdev_err(dev, "Unable to change to the same mode the team is in\n");
    603		return -EINVAL;
    604	}
    605
    606	new_mode = team_mode_get(kind);
    607	if (!new_mode) {
    608		netdev_err(dev, "Mode \"%s\" not found\n", kind);
    609		return -EINVAL;
    610	}
    611
    612	err = __team_change_mode(team, new_mode);
    613	if (err) {
    614		netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
    615		team_mode_put(new_mode);
    616		return err;
    617	}
    618
    619	netdev_info(dev, "Mode changed to \"%s\"\n", kind);
    620	return 0;
    621}
    622
    623
    624/*********************
    625 * Peers notification
    626 *********************/
    627
    628static void team_notify_peers_work(struct work_struct *work)
    629{
    630	struct team *team;
    631	int val;
    632
    633	team = container_of(work, struct team, notify_peers.dw.work);
    634
    635	if (!rtnl_trylock()) {
    636		schedule_delayed_work(&team->notify_peers.dw, 0);
    637		return;
    638	}
    639	val = atomic_dec_if_positive(&team->notify_peers.count_pending);
    640	if (val < 0) {
    641		rtnl_unlock();
    642		return;
    643	}
    644	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
    645	rtnl_unlock();
    646	if (val)
    647		schedule_delayed_work(&team->notify_peers.dw,
    648				      msecs_to_jiffies(team->notify_peers.interval));
    649}
    650
    651static void team_notify_peers(struct team *team)
    652{
    653	if (!team->notify_peers.count || !netif_running(team->dev))
    654		return;
    655	atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
    656	schedule_delayed_work(&team->notify_peers.dw, 0);
    657}
    658
    659static void team_notify_peers_init(struct team *team)
    660{
    661	INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
    662}
    663
    664static void team_notify_peers_fini(struct team *team)
    665{
    666	cancel_delayed_work_sync(&team->notify_peers.dw);
    667}
    668
    669
    670/*******************************
    671 * Send multicast group rejoins
    672 *******************************/
    673
    674static void team_mcast_rejoin_work(struct work_struct *work)
    675{
    676	struct team *team;
    677	int val;
    678
    679	team = container_of(work, struct team, mcast_rejoin.dw.work);
    680
    681	if (!rtnl_trylock()) {
    682		schedule_delayed_work(&team->mcast_rejoin.dw, 0);
    683		return;
    684	}
    685	val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
    686	if (val < 0) {
    687		rtnl_unlock();
    688		return;
    689	}
    690	call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
    691	rtnl_unlock();
    692	if (val)
    693		schedule_delayed_work(&team->mcast_rejoin.dw,
    694				      msecs_to_jiffies(team->mcast_rejoin.interval));
    695}
    696
    697static void team_mcast_rejoin(struct team *team)
    698{
    699	if (!team->mcast_rejoin.count || !netif_running(team->dev))
    700		return;
    701	atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
    702	schedule_delayed_work(&team->mcast_rejoin.dw, 0);
    703}
    704
    705static void team_mcast_rejoin_init(struct team *team)
    706{
    707	INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
    708}
    709
    710static void team_mcast_rejoin_fini(struct team *team)
    711{
    712	cancel_delayed_work_sync(&team->mcast_rejoin.dw);
    713}
    714
    715
    716/************************
    717 * Rx path frame handler
    718 ************************/
    719
    720/* note: already called with rcu_read_lock */
    721static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
    722{
    723	struct sk_buff *skb = *pskb;
    724	struct team_port *port;
    725	struct team *team;
    726	rx_handler_result_t res;
    727
    728	skb = skb_share_check(skb, GFP_ATOMIC);
    729	if (!skb)
    730		return RX_HANDLER_CONSUMED;
    731
    732	*pskb = skb;
    733
    734	port = team_port_get_rcu(skb->dev);
    735	team = port->team;
    736	if (!team_port_enabled(port)) {
    737		if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
    738			/* link-local packets are mostly useful when stack receives them
    739			 * with the link they arrive on.
    740			 */
    741			return RX_HANDLER_PASS;
    742		/* allow exact match delivery for disabled ports */
    743		res = RX_HANDLER_EXACT;
    744	} else {
    745		res = team->ops.receive(team, port, skb);
    746	}
    747	if (res == RX_HANDLER_ANOTHER) {
    748		struct team_pcpu_stats *pcpu_stats;
    749
    750		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
    751		u64_stats_update_begin(&pcpu_stats->syncp);
    752		pcpu_stats->rx_packets++;
    753		pcpu_stats->rx_bytes += skb->len;
    754		if (skb->pkt_type == PACKET_MULTICAST)
    755			pcpu_stats->rx_multicast++;
    756		u64_stats_update_end(&pcpu_stats->syncp);
    757
    758		skb->dev = team->dev;
    759	} else if (res == RX_HANDLER_EXACT) {
    760		this_cpu_inc(team->pcpu_stats->rx_nohandler);
    761	} else {
    762		this_cpu_inc(team->pcpu_stats->rx_dropped);
    763	}
    764
    765	return res;
    766}
    767
    768
    769/*************************************
    770 * Multiqueue Tx port select override
    771 *************************************/
    772
    773static int team_queue_override_init(struct team *team)
    774{
    775	struct list_head *listarr;
    776	unsigned int queue_cnt = team->dev->num_tx_queues - 1;
    777	unsigned int i;
    778
    779	if (!queue_cnt)
    780		return 0;
    781	listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
    782				GFP_KERNEL);
    783	if (!listarr)
    784		return -ENOMEM;
    785	team->qom_lists = listarr;
    786	for (i = 0; i < queue_cnt; i++)
    787		INIT_LIST_HEAD(listarr++);
    788	return 0;
    789}
    790
    791static void team_queue_override_fini(struct team *team)
    792{
    793	kfree(team->qom_lists);
    794}
    795
    796static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
    797{
    798	return &team->qom_lists[queue_id - 1];
    799}
    800
    801/*
    802 * note: already called with rcu_read_lock
    803 */
    804static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
    805{
    806	struct list_head *qom_list;
    807	struct team_port *port;
    808
    809	if (!team->queue_override_enabled || !skb->queue_mapping)
    810		return false;
    811	qom_list = __team_get_qom_list(team, skb->queue_mapping);
    812	list_for_each_entry_rcu(port, qom_list, qom_list) {
    813		if (!team_dev_queue_xmit(team, port, skb))
    814			return true;
    815	}
    816	return false;
    817}
    818
    819static void __team_queue_override_port_del(struct team *team,
    820					   struct team_port *port)
    821{
    822	if (!port->queue_id)
    823		return;
    824	list_del_rcu(&port->qom_list);
    825}
    826
    827static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
    828						      struct team_port *cur)
    829{
    830	if (port->priority < cur->priority)
    831		return true;
    832	if (port->priority > cur->priority)
    833		return false;
    834	if (port->index < cur->index)
    835		return true;
    836	return false;
    837}
    838
    839static void __team_queue_override_port_add(struct team *team,
    840					   struct team_port *port)
    841{
    842	struct team_port *cur;
    843	struct list_head *qom_list;
    844	struct list_head *node;
    845
    846	if (!port->queue_id)
    847		return;
    848	qom_list = __team_get_qom_list(team, port->queue_id);
    849	node = qom_list;
    850	list_for_each_entry(cur, qom_list, qom_list) {
    851		if (team_queue_override_port_has_gt_prio_than(port, cur))
    852			break;
    853		node = &cur->qom_list;
    854	}
    855	list_add_tail_rcu(&port->qom_list, node);
    856}
    857
    858static void __team_queue_override_enabled_check(struct team *team)
    859{
    860	struct team_port *port;
    861	bool enabled = false;
    862
    863	list_for_each_entry(port, &team->port_list, list) {
    864		if (port->queue_id) {
    865			enabled = true;
    866			break;
    867		}
    868	}
    869	if (enabled == team->queue_override_enabled)
    870		return;
    871	netdev_dbg(team->dev, "%s queue override\n",
    872		   enabled ? "Enabling" : "Disabling");
    873	team->queue_override_enabled = enabled;
    874}
    875
    876static void team_queue_override_port_prio_changed(struct team *team,
    877						  struct team_port *port)
    878{
    879	if (!port->queue_id || team_port_enabled(port))
    880		return;
    881	__team_queue_override_port_del(team, port);
    882	__team_queue_override_port_add(team, port);
    883	__team_queue_override_enabled_check(team);
    884}
    885
    886static void team_queue_override_port_change_queue_id(struct team *team,
    887						     struct team_port *port,
    888						     u16 new_queue_id)
    889{
    890	if (team_port_enabled(port)) {
    891		__team_queue_override_port_del(team, port);
    892		port->queue_id = new_queue_id;
    893		__team_queue_override_port_add(team, port);
    894		__team_queue_override_enabled_check(team);
    895	} else {
    896		port->queue_id = new_queue_id;
    897	}
    898}
    899
    900static void team_queue_override_port_add(struct team *team,
    901					 struct team_port *port)
    902{
    903	__team_queue_override_port_add(team, port);
    904	__team_queue_override_enabled_check(team);
    905}
    906
    907static void team_queue_override_port_del(struct team *team,
    908					 struct team_port *port)
    909{
    910	__team_queue_override_port_del(team, port);
    911	__team_queue_override_enabled_check(team);
    912}
    913
    914
    915/****************
    916 * Port handling
    917 ****************/
    918
    919static bool team_port_find(const struct team *team,
    920			   const struct team_port *port)
    921{
    922	struct team_port *cur;
    923
    924	list_for_each_entry(cur, &team->port_list, list)
    925		if (cur == port)
    926			return true;
    927	return false;
    928}
    929
    930/*
    931 * Enable/disable port by adding to enabled port hashlist and setting
    932 * port->index (Might be racy so reader could see incorrect ifindex when
    933 * processing a flying packet, but that is not a problem). Write guarded
    934 * by team->lock.
    935 */
    936static void team_port_enable(struct team *team,
    937			     struct team_port *port)
    938{
    939	if (team_port_enabled(port))
    940		return;
    941	port->index = team->en_port_count++;
    942	hlist_add_head_rcu(&port->hlist,
    943			   team_port_index_hash(team, port->index));
    944	team_adjust_ops(team);
    945	team_queue_override_port_add(team, port);
    946	if (team->ops.port_enabled)
    947		team->ops.port_enabled(team, port);
    948	team_notify_peers(team);
    949	team_mcast_rejoin(team);
    950	team_lower_state_changed(port);
    951}
    952
    953static void __reconstruct_port_hlist(struct team *team, int rm_index)
    954{
    955	int i;
    956	struct team_port *port;
    957
    958	for (i = rm_index + 1; i < team->en_port_count; i++) {
    959		port = team_get_port_by_index(team, i);
    960		hlist_del_rcu(&port->hlist);
    961		port->index--;
    962		hlist_add_head_rcu(&port->hlist,
    963				   team_port_index_hash(team, port->index));
    964	}
    965}
    966
    967static void team_port_disable(struct team *team,
    968			      struct team_port *port)
    969{
    970	if (!team_port_enabled(port))
    971		return;
    972	if (team->ops.port_disabled)
    973		team->ops.port_disabled(team, port);
    974	hlist_del_rcu(&port->hlist);
    975	__reconstruct_port_hlist(team, port->index);
    976	port->index = -1;
    977	team->en_port_count--;
    978	team_queue_override_port_del(team, port);
    979	team_adjust_ops(team);
    980	team_lower_state_changed(port);
    981}
    982
    983#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
    984			    NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
    985			    NETIF_F_HIGHDMA | NETIF_F_LRO)
    986
    987#define TEAM_ENC_FEATURES	(NETIF_F_HW_CSUM | NETIF_F_SG | \
    988				 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
    989
    990static void __team_compute_features(struct team *team)
    991{
    992	struct team_port *port;
    993	netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
    994					  NETIF_F_ALL_FOR_ALL;
    995	netdev_features_t enc_features  = TEAM_ENC_FEATURES;
    996	unsigned short max_hard_header_len = ETH_HLEN;
    997	unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
    998					IFF_XMIT_DST_RELEASE_PERM;
    999
   1000	rcu_read_lock();
   1001	list_for_each_entry_rcu(port, &team->port_list, list) {
   1002		vlan_features = netdev_increment_features(vlan_features,
   1003					port->dev->vlan_features,
   1004					TEAM_VLAN_FEATURES);
   1005		enc_features =
   1006			netdev_increment_features(enc_features,
   1007						  port->dev->hw_enc_features,
   1008						  TEAM_ENC_FEATURES);
   1009
   1010
   1011		dst_release_flag &= port->dev->priv_flags;
   1012		if (port->dev->hard_header_len > max_hard_header_len)
   1013			max_hard_header_len = port->dev->hard_header_len;
   1014	}
   1015	rcu_read_unlock();
   1016
   1017	team->dev->vlan_features = vlan_features;
   1018	team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
   1019				     NETIF_F_HW_VLAN_CTAG_TX |
   1020				     NETIF_F_HW_VLAN_STAG_TX;
   1021	team->dev->hard_header_len = max_hard_header_len;
   1022
   1023	team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
   1024	if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
   1025		team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
   1026}
   1027
   1028static void team_compute_features(struct team *team)
   1029{
   1030	__team_compute_features(team);
   1031	netdev_change_features(team->dev);
   1032}
   1033
   1034static int team_port_enter(struct team *team, struct team_port *port)
   1035{
   1036	int err = 0;
   1037
   1038	dev_hold(team->dev);
   1039	if (team->ops.port_enter) {
   1040		err = team->ops.port_enter(team, port);
   1041		if (err) {
   1042			netdev_err(team->dev, "Device %s failed to enter team mode\n",
   1043				   port->dev->name);
   1044			goto err_port_enter;
   1045		}
   1046	}
   1047
   1048	return 0;
   1049
   1050err_port_enter:
   1051	dev_put(team->dev);
   1052
   1053	return err;
   1054}
   1055
   1056static void team_port_leave(struct team *team, struct team_port *port)
   1057{
   1058	if (team->ops.port_leave)
   1059		team->ops.port_leave(team, port);
   1060	dev_put(team->dev);
   1061}
   1062
   1063#ifdef CONFIG_NET_POLL_CONTROLLER
   1064static int __team_port_enable_netpoll(struct team_port *port)
   1065{
   1066	struct netpoll *np;
   1067	int err;
   1068
   1069	np = kzalloc(sizeof(*np), GFP_KERNEL);
   1070	if (!np)
   1071		return -ENOMEM;
   1072
   1073	err = __netpoll_setup(np, port->dev);
   1074	if (err) {
   1075		kfree(np);
   1076		return err;
   1077	}
   1078	port->np = np;
   1079	return err;
   1080}
   1081
   1082static int team_port_enable_netpoll(struct team_port *port)
   1083{
   1084	if (!port->team->dev->npinfo)
   1085		return 0;
   1086
   1087	return __team_port_enable_netpoll(port);
   1088}
   1089
   1090static void team_port_disable_netpoll(struct team_port *port)
   1091{
   1092	struct netpoll *np = port->np;
   1093
   1094	if (!np)
   1095		return;
   1096	port->np = NULL;
   1097
   1098	__netpoll_free(np);
   1099}
   1100#else
   1101static int team_port_enable_netpoll(struct team_port *port)
   1102{
   1103	return 0;
   1104}
   1105static void team_port_disable_netpoll(struct team_port *port)
   1106{
   1107}
   1108#endif
   1109
   1110static int team_upper_dev_link(struct team *team, struct team_port *port,
   1111			       struct netlink_ext_ack *extack)
   1112{
   1113	struct netdev_lag_upper_info lag_upper_info;
   1114	int err;
   1115
   1116	lag_upper_info.tx_type = team->mode->lag_tx_type;
   1117	lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
   1118	err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
   1119					   &lag_upper_info, extack);
   1120	if (err)
   1121		return err;
   1122	port->dev->priv_flags |= IFF_TEAM_PORT;
   1123	return 0;
   1124}
   1125
   1126static void team_upper_dev_unlink(struct team *team, struct team_port *port)
   1127{
   1128	netdev_upper_dev_unlink(port->dev, team->dev);
   1129	port->dev->priv_flags &= ~IFF_TEAM_PORT;
   1130}
   1131
   1132static void __team_port_change_port_added(struct team_port *port, bool linkup);
   1133static int team_dev_type_check_change(struct net_device *dev,
   1134				      struct net_device *port_dev);
   1135
   1136static int team_port_add(struct team *team, struct net_device *port_dev,
   1137			 struct netlink_ext_ack *extack)
   1138{
   1139	struct net_device *dev = team->dev;
   1140	struct team_port *port;
   1141	char *portname = port_dev->name;
   1142	int err;
   1143
   1144	if (port_dev->flags & IFF_LOOPBACK) {
   1145		NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
   1146		netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
   1147			   portname);
   1148		return -EINVAL;
   1149	}
   1150
   1151	if (netif_is_team_port(port_dev)) {
   1152		NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
   1153		netdev_err(dev, "Device %s is already a port "
   1154				"of a team device\n", portname);
   1155		return -EBUSY;
   1156	}
   1157
   1158	if (dev == port_dev) {
   1159		NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
   1160		netdev_err(dev, "Cannot enslave team device to itself\n");
   1161		return -EINVAL;
   1162	}
   1163
   1164	if (netdev_has_upper_dev(dev, port_dev)) {
   1165		NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
   1166		netdev_err(dev, "Device %s is already an upper device of the team interface\n",
   1167			   portname);
   1168		return -EBUSY;
   1169	}
   1170
   1171	if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
   1172	    vlan_uses_dev(dev)) {
   1173		NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
   1174		netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
   1175			   portname);
   1176		return -EPERM;
   1177	}
   1178
   1179	err = team_dev_type_check_change(dev, port_dev);
   1180	if (err)
   1181		return err;
   1182
   1183	if (port_dev->flags & IFF_UP) {
   1184		NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
   1185		netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
   1186			   portname);
   1187		return -EBUSY;
   1188	}
   1189
   1190	port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
   1191		       GFP_KERNEL);
   1192	if (!port)
   1193		return -ENOMEM;
   1194
   1195	port->dev = port_dev;
   1196	port->team = team;
   1197	INIT_LIST_HEAD(&port->qom_list);
   1198
   1199	port->orig.mtu = port_dev->mtu;
   1200	err = dev_set_mtu(port_dev, dev->mtu);
   1201	if (err) {
   1202		netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
   1203		goto err_set_mtu;
   1204	}
   1205
   1206	memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
   1207
   1208	err = team_port_enter(team, port);
   1209	if (err) {
   1210		netdev_err(dev, "Device %s failed to enter team mode\n",
   1211			   portname);
   1212		goto err_port_enter;
   1213	}
   1214
   1215	err = dev_open(port_dev, extack);
   1216	if (err) {
   1217		netdev_dbg(dev, "Device %s opening failed\n",
   1218			   portname);
   1219		goto err_dev_open;
   1220	}
   1221
   1222	err = vlan_vids_add_by_dev(port_dev, dev);
   1223	if (err) {
   1224		netdev_err(dev, "Failed to add vlan ids to device %s\n",
   1225				portname);
   1226		goto err_vids_add;
   1227	}
   1228
   1229	err = team_port_enable_netpoll(port);
   1230	if (err) {
   1231		netdev_err(dev, "Failed to enable netpoll on device %s\n",
   1232			   portname);
   1233		goto err_enable_netpoll;
   1234	}
   1235
   1236	if (!(dev->features & NETIF_F_LRO))
   1237		dev_disable_lro(port_dev);
   1238
   1239	err = netdev_rx_handler_register(port_dev, team_handle_frame,
   1240					 port);
   1241	if (err) {
   1242		netdev_err(dev, "Device %s failed to register rx_handler\n",
   1243			   portname);
   1244		goto err_handler_register;
   1245	}
   1246
   1247	err = team_upper_dev_link(team, port, extack);
   1248	if (err) {
   1249		netdev_err(dev, "Device %s failed to set upper link\n",
   1250			   portname);
   1251		goto err_set_upper_link;
   1252	}
   1253
   1254	err = __team_option_inst_add_port(team, port);
   1255	if (err) {
   1256		netdev_err(dev, "Device %s failed to add per-port options\n",
   1257			   portname);
   1258		goto err_option_port_add;
   1259	}
   1260
   1261	/* set promiscuity level to new slave */
   1262	if (dev->flags & IFF_PROMISC) {
   1263		err = dev_set_promiscuity(port_dev, 1);
   1264		if (err)
   1265			goto err_set_slave_promisc;
   1266	}
   1267
   1268	/* set allmulti level to new slave */
   1269	if (dev->flags & IFF_ALLMULTI) {
   1270		err = dev_set_allmulti(port_dev, 1);
   1271		if (err) {
   1272			if (dev->flags & IFF_PROMISC)
   1273				dev_set_promiscuity(port_dev, -1);
   1274			goto err_set_slave_promisc;
   1275		}
   1276	}
   1277
   1278	netif_addr_lock_bh(dev);
   1279	dev_uc_sync_multiple(port_dev, dev);
   1280	dev_mc_sync_multiple(port_dev, dev);
   1281	netif_addr_unlock_bh(dev);
   1282
   1283	port->index = -1;
   1284	list_add_tail_rcu(&port->list, &team->port_list);
   1285	team_port_enable(team, port);
   1286	__team_compute_features(team);
   1287	__team_port_change_port_added(port, !!netif_oper_up(port_dev));
   1288	__team_options_change_check(team);
   1289
   1290	netdev_info(dev, "Port device %s added\n", portname);
   1291
   1292	return 0;
   1293
   1294err_set_slave_promisc:
   1295	__team_option_inst_del_port(team, port);
   1296
   1297err_option_port_add:
   1298	team_upper_dev_unlink(team, port);
   1299
   1300err_set_upper_link:
   1301	netdev_rx_handler_unregister(port_dev);
   1302
   1303err_handler_register:
   1304	team_port_disable_netpoll(port);
   1305
   1306err_enable_netpoll:
   1307	vlan_vids_del_by_dev(port_dev, dev);
   1308
   1309err_vids_add:
   1310	dev_close(port_dev);
   1311
   1312err_dev_open:
   1313	team_port_leave(team, port);
   1314	team_port_set_orig_dev_addr(port);
   1315
   1316err_port_enter:
   1317	dev_set_mtu(port_dev, port->orig.mtu);
   1318
   1319err_set_mtu:
   1320	kfree(port);
   1321
   1322	return err;
   1323}
   1324
   1325static void __team_port_change_port_removed(struct team_port *port);
   1326
   1327static int team_port_del(struct team *team, struct net_device *port_dev)
   1328{
   1329	struct net_device *dev = team->dev;
   1330	struct team_port *port;
   1331	char *portname = port_dev->name;
   1332
   1333	port = team_port_get_rtnl(port_dev);
   1334	if (!port || !team_port_find(team, port)) {
   1335		netdev_err(dev, "Device %s does not act as a port of this team\n",
   1336			   portname);
   1337		return -ENOENT;
   1338	}
   1339
   1340	team_port_disable(team, port);
   1341	list_del_rcu(&port->list);
   1342
   1343	if (dev->flags & IFF_PROMISC)
   1344		dev_set_promiscuity(port_dev, -1);
   1345	if (dev->flags & IFF_ALLMULTI)
   1346		dev_set_allmulti(port_dev, -1);
   1347
   1348	team_upper_dev_unlink(team, port);
   1349	netdev_rx_handler_unregister(port_dev);
   1350	team_port_disable_netpoll(port);
   1351	vlan_vids_del_by_dev(port_dev, dev);
   1352	dev_uc_unsync(port_dev, dev);
   1353	dev_mc_unsync(port_dev, dev);
   1354	dev_close(port_dev);
   1355	team_port_leave(team, port);
   1356
   1357	__team_option_inst_mark_removed_port(team, port);
   1358	__team_options_change_check(team);
   1359	__team_option_inst_del_port(team, port);
   1360	__team_port_change_port_removed(port);
   1361
   1362	team_port_set_orig_dev_addr(port);
   1363	dev_set_mtu(port_dev, port->orig.mtu);
   1364	kfree_rcu(port, rcu);
   1365	netdev_info(dev, "Port device %s removed\n", portname);
   1366	__team_compute_features(team);
   1367
   1368	return 0;
   1369}
   1370
   1371
   1372/*****************
   1373 * Net device ops
   1374 *****************/
   1375
   1376static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
   1377{
   1378	ctx->data.str_val = team->mode->kind;
   1379	return 0;
   1380}
   1381
   1382static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
   1383{
   1384	return team_change_mode(team, ctx->data.str_val);
   1385}
   1386
   1387static int team_notify_peers_count_get(struct team *team,
   1388				       struct team_gsetter_ctx *ctx)
   1389{
   1390	ctx->data.u32_val = team->notify_peers.count;
   1391	return 0;
   1392}
   1393
   1394static int team_notify_peers_count_set(struct team *team,
   1395				       struct team_gsetter_ctx *ctx)
   1396{
   1397	team->notify_peers.count = ctx->data.u32_val;
   1398	return 0;
   1399}
   1400
   1401static int team_notify_peers_interval_get(struct team *team,
   1402					  struct team_gsetter_ctx *ctx)
   1403{
   1404	ctx->data.u32_val = team->notify_peers.interval;
   1405	return 0;
   1406}
   1407
   1408static int team_notify_peers_interval_set(struct team *team,
   1409					  struct team_gsetter_ctx *ctx)
   1410{
   1411	team->notify_peers.interval = ctx->data.u32_val;
   1412	return 0;
   1413}
   1414
   1415static int team_mcast_rejoin_count_get(struct team *team,
   1416				       struct team_gsetter_ctx *ctx)
   1417{
   1418	ctx->data.u32_val = team->mcast_rejoin.count;
   1419	return 0;
   1420}
   1421
   1422static int team_mcast_rejoin_count_set(struct team *team,
   1423				       struct team_gsetter_ctx *ctx)
   1424{
   1425	team->mcast_rejoin.count = ctx->data.u32_val;
   1426	return 0;
   1427}
   1428
   1429static int team_mcast_rejoin_interval_get(struct team *team,
   1430					  struct team_gsetter_ctx *ctx)
   1431{
   1432	ctx->data.u32_val = team->mcast_rejoin.interval;
   1433	return 0;
   1434}
   1435
   1436static int team_mcast_rejoin_interval_set(struct team *team,
   1437					  struct team_gsetter_ctx *ctx)
   1438{
   1439	team->mcast_rejoin.interval = ctx->data.u32_val;
   1440	return 0;
   1441}
   1442
   1443static int team_port_en_option_get(struct team *team,
   1444				   struct team_gsetter_ctx *ctx)
   1445{
   1446	struct team_port *port = ctx->info->port;
   1447
   1448	ctx->data.bool_val = team_port_enabled(port);
   1449	return 0;
   1450}
   1451
   1452static int team_port_en_option_set(struct team *team,
   1453				   struct team_gsetter_ctx *ctx)
   1454{
   1455	struct team_port *port = ctx->info->port;
   1456
   1457	if (ctx->data.bool_val)
   1458		team_port_enable(team, port);
   1459	else
   1460		team_port_disable(team, port);
   1461	return 0;
   1462}
   1463
   1464static int team_user_linkup_option_get(struct team *team,
   1465				       struct team_gsetter_ctx *ctx)
   1466{
   1467	struct team_port *port = ctx->info->port;
   1468
   1469	ctx->data.bool_val = port->user.linkup;
   1470	return 0;
   1471}
   1472
   1473static void __team_carrier_check(struct team *team);
   1474
   1475static int team_user_linkup_option_set(struct team *team,
   1476				       struct team_gsetter_ctx *ctx)
   1477{
   1478	struct team_port *port = ctx->info->port;
   1479
   1480	port->user.linkup = ctx->data.bool_val;
   1481	team_refresh_port_linkup(port);
   1482	__team_carrier_check(port->team);
   1483	return 0;
   1484}
   1485
   1486static int team_user_linkup_en_option_get(struct team *team,
   1487					  struct team_gsetter_ctx *ctx)
   1488{
   1489	struct team_port *port = ctx->info->port;
   1490
   1491	ctx->data.bool_val = port->user.linkup_enabled;
   1492	return 0;
   1493}
   1494
   1495static int team_user_linkup_en_option_set(struct team *team,
   1496					  struct team_gsetter_ctx *ctx)
   1497{
   1498	struct team_port *port = ctx->info->port;
   1499
   1500	port->user.linkup_enabled = ctx->data.bool_val;
   1501	team_refresh_port_linkup(port);
   1502	__team_carrier_check(port->team);
   1503	return 0;
   1504}
   1505
   1506static int team_priority_option_get(struct team *team,
   1507				    struct team_gsetter_ctx *ctx)
   1508{
   1509	struct team_port *port = ctx->info->port;
   1510
   1511	ctx->data.s32_val = port->priority;
   1512	return 0;
   1513}
   1514
   1515static int team_priority_option_set(struct team *team,
   1516				    struct team_gsetter_ctx *ctx)
   1517{
   1518	struct team_port *port = ctx->info->port;
   1519	s32 priority = ctx->data.s32_val;
   1520
   1521	if (port->priority == priority)
   1522		return 0;
   1523	port->priority = priority;
   1524	team_queue_override_port_prio_changed(team, port);
   1525	return 0;
   1526}
   1527
   1528static int team_queue_id_option_get(struct team *team,
   1529				    struct team_gsetter_ctx *ctx)
   1530{
   1531	struct team_port *port = ctx->info->port;
   1532
   1533	ctx->data.u32_val = port->queue_id;
   1534	return 0;
   1535}
   1536
   1537static int team_queue_id_option_set(struct team *team,
   1538				    struct team_gsetter_ctx *ctx)
   1539{
   1540	struct team_port *port = ctx->info->port;
   1541	u16 new_queue_id = ctx->data.u32_val;
   1542
   1543	if (port->queue_id == new_queue_id)
   1544		return 0;
   1545	if (new_queue_id >= team->dev->real_num_tx_queues)
   1546		return -EINVAL;
   1547	team_queue_override_port_change_queue_id(team, port, new_queue_id);
   1548	return 0;
   1549}
   1550
   1551static const struct team_option team_options[] = {
   1552	{
   1553		.name = "mode",
   1554		.type = TEAM_OPTION_TYPE_STRING,
   1555		.getter = team_mode_option_get,
   1556		.setter = team_mode_option_set,
   1557	},
   1558	{
   1559		.name = "notify_peers_count",
   1560		.type = TEAM_OPTION_TYPE_U32,
   1561		.getter = team_notify_peers_count_get,
   1562		.setter = team_notify_peers_count_set,
   1563	},
   1564	{
   1565		.name = "notify_peers_interval",
   1566		.type = TEAM_OPTION_TYPE_U32,
   1567		.getter = team_notify_peers_interval_get,
   1568		.setter = team_notify_peers_interval_set,
   1569	},
   1570	{
   1571		.name = "mcast_rejoin_count",
   1572		.type = TEAM_OPTION_TYPE_U32,
   1573		.getter = team_mcast_rejoin_count_get,
   1574		.setter = team_mcast_rejoin_count_set,
   1575	},
   1576	{
   1577		.name = "mcast_rejoin_interval",
   1578		.type = TEAM_OPTION_TYPE_U32,
   1579		.getter = team_mcast_rejoin_interval_get,
   1580		.setter = team_mcast_rejoin_interval_set,
   1581	},
   1582	{
   1583		.name = "enabled",
   1584		.type = TEAM_OPTION_TYPE_BOOL,
   1585		.per_port = true,
   1586		.getter = team_port_en_option_get,
   1587		.setter = team_port_en_option_set,
   1588	},
   1589	{
   1590		.name = "user_linkup",
   1591		.type = TEAM_OPTION_TYPE_BOOL,
   1592		.per_port = true,
   1593		.getter = team_user_linkup_option_get,
   1594		.setter = team_user_linkup_option_set,
   1595	},
   1596	{
   1597		.name = "user_linkup_enabled",
   1598		.type = TEAM_OPTION_TYPE_BOOL,
   1599		.per_port = true,
   1600		.getter = team_user_linkup_en_option_get,
   1601		.setter = team_user_linkup_en_option_set,
   1602	},
   1603	{
   1604		.name = "priority",
   1605		.type = TEAM_OPTION_TYPE_S32,
   1606		.per_port = true,
   1607		.getter = team_priority_option_get,
   1608		.setter = team_priority_option_set,
   1609	},
   1610	{
   1611		.name = "queue_id",
   1612		.type = TEAM_OPTION_TYPE_U32,
   1613		.per_port = true,
   1614		.getter = team_queue_id_option_get,
   1615		.setter = team_queue_id_option_set,
   1616	},
   1617};
   1618
   1619
   1620static int team_init(struct net_device *dev)
   1621{
   1622	struct team *team = netdev_priv(dev);
   1623	int i;
   1624	int err;
   1625
   1626	team->dev = dev;
   1627	team_set_no_mode(team);
   1628
   1629	team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
   1630	if (!team->pcpu_stats)
   1631		return -ENOMEM;
   1632
   1633	for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
   1634		INIT_HLIST_HEAD(&team->en_port_hlist[i]);
   1635	INIT_LIST_HEAD(&team->port_list);
   1636	err = team_queue_override_init(team);
   1637	if (err)
   1638		goto err_team_queue_override_init;
   1639
   1640	team_adjust_ops(team);
   1641
   1642	INIT_LIST_HEAD(&team->option_list);
   1643	INIT_LIST_HEAD(&team->option_inst_list);
   1644
   1645	team_notify_peers_init(team);
   1646	team_mcast_rejoin_init(team);
   1647
   1648	err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
   1649	if (err)
   1650		goto err_options_register;
   1651	netif_carrier_off(dev);
   1652
   1653	lockdep_register_key(&team->team_lock_key);
   1654	__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
   1655	netdev_lockdep_set_classes(dev);
   1656
   1657	return 0;
   1658
   1659err_options_register:
   1660	team_mcast_rejoin_fini(team);
   1661	team_notify_peers_fini(team);
   1662	team_queue_override_fini(team);
   1663err_team_queue_override_init:
   1664	free_percpu(team->pcpu_stats);
   1665
   1666	return err;
   1667}
   1668
   1669static void team_uninit(struct net_device *dev)
   1670{
   1671	struct team *team = netdev_priv(dev);
   1672	struct team_port *port;
   1673	struct team_port *tmp;
   1674
   1675	mutex_lock(&team->lock);
   1676	list_for_each_entry_safe(port, tmp, &team->port_list, list)
   1677		team_port_del(team, port->dev);
   1678
   1679	__team_change_mode(team, NULL); /* cleanup */
   1680	__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
   1681	team_mcast_rejoin_fini(team);
   1682	team_notify_peers_fini(team);
   1683	team_queue_override_fini(team);
   1684	mutex_unlock(&team->lock);
   1685	netdev_change_features(dev);
   1686	lockdep_unregister_key(&team->team_lock_key);
   1687}
   1688
   1689static void team_destructor(struct net_device *dev)
   1690{
   1691	struct team *team = netdev_priv(dev);
   1692
   1693	free_percpu(team->pcpu_stats);
   1694}
   1695
   1696static int team_open(struct net_device *dev)
   1697{
   1698	return 0;
   1699}
   1700
   1701static int team_close(struct net_device *dev)
   1702{
   1703	return 0;
   1704}
   1705
   1706/*
   1707 * note: already called with rcu_read_lock
   1708 */
   1709static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
   1710{
   1711	struct team *team = netdev_priv(dev);
   1712	bool tx_success;
   1713	unsigned int len = skb->len;
   1714
   1715	tx_success = team_queue_override_transmit(team, skb);
   1716	if (!tx_success)
   1717		tx_success = team->ops.transmit(team, skb);
   1718	if (tx_success) {
   1719		struct team_pcpu_stats *pcpu_stats;
   1720
   1721		pcpu_stats = this_cpu_ptr(team->pcpu_stats);
   1722		u64_stats_update_begin(&pcpu_stats->syncp);
   1723		pcpu_stats->tx_packets++;
   1724		pcpu_stats->tx_bytes += len;
   1725		u64_stats_update_end(&pcpu_stats->syncp);
   1726	} else {
   1727		this_cpu_inc(team->pcpu_stats->tx_dropped);
   1728	}
   1729
   1730	return NETDEV_TX_OK;
   1731}
   1732
   1733static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
   1734			     struct net_device *sb_dev)
   1735{
   1736	/*
   1737	 * This helper function exists to help dev_pick_tx get the correct
   1738	 * destination queue.  Using a helper function skips a call to
   1739	 * skb_tx_hash and will put the skbs in the queue we expect on their
   1740	 * way down to the team driver.
   1741	 */
   1742	u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
   1743
   1744	/*
   1745	 * Save the original txq to restore before passing to the driver
   1746	 */
   1747	qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
   1748
   1749	if (unlikely(txq >= dev->real_num_tx_queues)) {
   1750		do {
   1751			txq -= dev->real_num_tx_queues;
   1752		} while (txq >= dev->real_num_tx_queues);
   1753	}
   1754	return txq;
   1755}
   1756
   1757static void team_change_rx_flags(struct net_device *dev, int change)
   1758{
   1759	struct team *team = netdev_priv(dev);
   1760	struct team_port *port;
   1761	int inc;
   1762
   1763	rcu_read_lock();
   1764	list_for_each_entry_rcu(port, &team->port_list, list) {
   1765		if (change & IFF_PROMISC) {
   1766			inc = dev->flags & IFF_PROMISC ? 1 : -1;
   1767			dev_set_promiscuity(port->dev, inc);
   1768		}
   1769		if (change & IFF_ALLMULTI) {
   1770			inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
   1771			dev_set_allmulti(port->dev, inc);
   1772		}
   1773	}
   1774	rcu_read_unlock();
   1775}
   1776
   1777static void team_set_rx_mode(struct net_device *dev)
   1778{
   1779	struct team *team = netdev_priv(dev);
   1780	struct team_port *port;
   1781
   1782	rcu_read_lock();
   1783	list_for_each_entry_rcu(port, &team->port_list, list) {
   1784		dev_uc_sync_multiple(port->dev, dev);
   1785		dev_mc_sync_multiple(port->dev, dev);
   1786	}
   1787	rcu_read_unlock();
   1788}
   1789
   1790static int team_set_mac_address(struct net_device *dev, void *p)
   1791{
   1792	struct sockaddr *addr = p;
   1793	struct team *team = netdev_priv(dev);
   1794	struct team_port *port;
   1795
   1796	if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
   1797		return -EADDRNOTAVAIL;
   1798	dev_addr_set(dev, addr->sa_data);
   1799	mutex_lock(&team->lock);
   1800	list_for_each_entry(port, &team->port_list, list)
   1801		if (team->ops.port_change_dev_addr)
   1802			team->ops.port_change_dev_addr(team, port);
   1803	mutex_unlock(&team->lock);
   1804	return 0;
   1805}
   1806
   1807static int team_change_mtu(struct net_device *dev, int new_mtu)
   1808{
   1809	struct team *team = netdev_priv(dev);
   1810	struct team_port *port;
   1811	int err;
   1812
   1813	/*
   1814	 * Alhough this is reader, it's guarded by team lock. It's not possible
   1815	 * to traverse list in reverse under rcu_read_lock
   1816	 */
   1817	mutex_lock(&team->lock);
   1818	team->port_mtu_change_allowed = true;
   1819	list_for_each_entry(port, &team->port_list, list) {
   1820		err = dev_set_mtu(port->dev, new_mtu);
   1821		if (err) {
   1822			netdev_err(dev, "Device %s failed to change mtu",
   1823				   port->dev->name);
   1824			goto unwind;
   1825		}
   1826	}
   1827	team->port_mtu_change_allowed = false;
   1828	mutex_unlock(&team->lock);
   1829
   1830	dev->mtu = new_mtu;
   1831
   1832	return 0;
   1833
   1834unwind:
   1835	list_for_each_entry_continue_reverse(port, &team->port_list, list)
   1836		dev_set_mtu(port->dev, dev->mtu);
   1837	team->port_mtu_change_allowed = false;
   1838	mutex_unlock(&team->lock);
   1839
   1840	return err;
   1841}
   1842
   1843static void
   1844team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
   1845{
   1846	struct team *team = netdev_priv(dev);
   1847	struct team_pcpu_stats *p;
   1848	u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
   1849	u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
   1850	unsigned int start;
   1851	int i;
   1852
   1853	for_each_possible_cpu(i) {
   1854		p = per_cpu_ptr(team->pcpu_stats, i);
   1855		do {
   1856			start = u64_stats_fetch_begin_irq(&p->syncp);
   1857			rx_packets	= p->rx_packets;
   1858			rx_bytes	= p->rx_bytes;
   1859			rx_multicast	= p->rx_multicast;
   1860			tx_packets	= p->tx_packets;
   1861			tx_bytes	= p->tx_bytes;
   1862		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
   1863
   1864		stats->rx_packets	+= rx_packets;
   1865		stats->rx_bytes		+= rx_bytes;
   1866		stats->multicast	+= rx_multicast;
   1867		stats->tx_packets	+= tx_packets;
   1868		stats->tx_bytes		+= tx_bytes;
   1869		/*
   1870		 * rx_dropped, tx_dropped & rx_nohandler are u32,
   1871		 * updated without syncp protection.
   1872		 */
   1873		rx_dropped	+= p->rx_dropped;
   1874		tx_dropped	+= p->tx_dropped;
   1875		rx_nohandler	+= p->rx_nohandler;
   1876	}
   1877	stats->rx_dropped	= rx_dropped;
   1878	stats->tx_dropped	= tx_dropped;
   1879	stats->rx_nohandler	= rx_nohandler;
   1880}
   1881
   1882static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
   1883{
   1884	struct team *team = netdev_priv(dev);
   1885	struct team_port *port;
   1886	int err;
   1887
   1888	/*
   1889	 * Alhough this is reader, it's guarded by team lock. It's not possible
   1890	 * to traverse list in reverse under rcu_read_lock
   1891	 */
   1892	mutex_lock(&team->lock);
   1893	list_for_each_entry(port, &team->port_list, list) {
   1894		err = vlan_vid_add(port->dev, proto, vid);
   1895		if (err)
   1896			goto unwind;
   1897	}
   1898	mutex_unlock(&team->lock);
   1899
   1900	return 0;
   1901
   1902unwind:
   1903	list_for_each_entry_continue_reverse(port, &team->port_list, list)
   1904		vlan_vid_del(port->dev, proto, vid);
   1905	mutex_unlock(&team->lock);
   1906
   1907	return err;
   1908}
   1909
   1910static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
   1911{
   1912	struct team *team = netdev_priv(dev);
   1913	struct team_port *port;
   1914
   1915	mutex_lock(&team->lock);
   1916	list_for_each_entry(port, &team->port_list, list)
   1917		vlan_vid_del(port->dev, proto, vid);
   1918	mutex_unlock(&team->lock);
   1919
   1920	return 0;
   1921}
   1922
   1923#ifdef CONFIG_NET_POLL_CONTROLLER
   1924static void team_poll_controller(struct net_device *dev)
   1925{
   1926}
   1927
   1928static void __team_netpoll_cleanup(struct team *team)
   1929{
   1930	struct team_port *port;
   1931
   1932	list_for_each_entry(port, &team->port_list, list)
   1933		team_port_disable_netpoll(port);
   1934}
   1935
   1936static void team_netpoll_cleanup(struct net_device *dev)
   1937{
   1938	struct team *team = netdev_priv(dev);
   1939
   1940	mutex_lock(&team->lock);
   1941	__team_netpoll_cleanup(team);
   1942	mutex_unlock(&team->lock);
   1943}
   1944
   1945static int team_netpoll_setup(struct net_device *dev,
   1946			      struct netpoll_info *npifo)
   1947{
   1948	struct team *team = netdev_priv(dev);
   1949	struct team_port *port;
   1950	int err = 0;
   1951
   1952	mutex_lock(&team->lock);
   1953	list_for_each_entry(port, &team->port_list, list) {
   1954		err = __team_port_enable_netpoll(port);
   1955		if (err) {
   1956			__team_netpoll_cleanup(team);
   1957			break;
   1958		}
   1959	}
   1960	mutex_unlock(&team->lock);
   1961	return err;
   1962}
   1963#endif
   1964
   1965static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
   1966			  struct netlink_ext_ack *extack)
   1967{
   1968	struct team *team = netdev_priv(dev);
   1969	int err;
   1970
   1971	mutex_lock(&team->lock);
   1972	err = team_port_add(team, port_dev, extack);
   1973	mutex_unlock(&team->lock);
   1974
   1975	if (!err)
   1976		netdev_change_features(dev);
   1977
   1978	return err;
   1979}
   1980
   1981static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
   1982{
   1983	struct team *team = netdev_priv(dev);
   1984	int err;
   1985
   1986	mutex_lock(&team->lock);
   1987	err = team_port_del(team, port_dev);
   1988	mutex_unlock(&team->lock);
   1989
   1990	if (err)
   1991		return err;
   1992
   1993	if (netif_is_team_master(port_dev)) {
   1994		lockdep_unregister_key(&team->team_lock_key);
   1995		lockdep_register_key(&team->team_lock_key);
   1996		lockdep_set_class(&team->lock, &team->team_lock_key);
   1997	}
   1998	netdev_change_features(dev);
   1999
   2000	return err;
   2001}
   2002
   2003static netdev_features_t team_fix_features(struct net_device *dev,
   2004					   netdev_features_t features)
   2005{
   2006	struct team_port *port;
   2007	struct team *team = netdev_priv(dev);
   2008	netdev_features_t mask;
   2009
   2010	mask = features;
   2011	features &= ~NETIF_F_ONE_FOR_ALL;
   2012	features |= NETIF_F_ALL_FOR_ALL;
   2013
   2014	rcu_read_lock();
   2015	list_for_each_entry_rcu(port, &team->port_list, list) {
   2016		features = netdev_increment_features(features,
   2017						     port->dev->features,
   2018						     mask);
   2019	}
   2020	rcu_read_unlock();
   2021
   2022	features = netdev_add_tso_features(features, mask);
   2023
   2024	return features;
   2025}
   2026
   2027static int team_change_carrier(struct net_device *dev, bool new_carrier)
   2028{
   2029	struct team *team = netdev_priv(dev);
   2030
   2031	team->user_carrier_enabled = true;
   2032
   2033	if (new_carrier)
   2034		netif_carrier_on(dev);
   2035	else
   2036		netif_carrier_off(dev);
   2037	return 0;
   2038}
   2039
   2040static const struct net_device_ops team_netdev_ops = {
   2041	.ndo_init		= team_init,
   2042	.ndo_uninit		= team_uninit,
   2043	.ndo_open		= team_open,
   2044	.ndo_stop		= team_close,
   2045	.ndo_start_xmit		= team_xmit,
   2046	.ndo_select_queue	= team_select_queue,
   2047	.ndo_change_rx_flags	= team_change_rx_flags,
   2048	.ndo_set_rx_mode	= team_set_rx_mode,
   2049	.ndo_set_mac_address	= team_set_mac_address,
   2050	.ndo_change_mtu		= team_change_mtu,
   2051	.ndo_get_stats64	= team_get_stats64,
   2052	.ndo_vlan_rx_add_vid	= team_vlan_rx_add_vid,
   2053	.ndo_vlan_rx_kill_vid	= team_vlan_rx_kill_vid,
   2054#ifdef CONFIG_NET_POLL_CONTROLLER
   2055	.ndo_poll_controller	= team_poll_controller,
   2056	.ndo_netpoll_setup	= team_netpoll_setup,
   2057	.ndo_netpoll_cleanup	= team_netpoll_cleanup,
   2058#endif
   2059	.ndo_add_slave		= team_add_slave,
   2060	.ndo_del_slave		= team_del_slave,
   2061	.ndo_fix_features	= team_fix_features,
   2062	.ndo_change_carrier     = team_change_carrier,
   2063	.ndo_features_check	= passthru_features_check,
   2064};
   2065
   2066/***********************
   2067 * ethtool interface
   2068 ***********************/
   2069
   2070static void team_ethtool_get_drvinfo(struct net_device *dev,
   2071				     struct ethtool_drvinfo *drvinfo)
   2072{
   2073	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
   2074	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
   2075}
   2076
   2077static int team_ethtool_get_link_ksettings(struct net_device *dev,
   2078					   struct ethtool_link_ksettings *cmd)
   2079{
   2080	struct team *team= netdev_priv(dev);
   2081	unsigned long speed = 0;
   2082	struct team_port *port;
   2083
   2084	cmd->base.duplex = DUPLEX_UNKNOWN;
   2085	cmd->base.port = PORT_OTHER;
   2086
   2087	rcu_read_lock();
   2088	list_for_each_entry_rcu(port, &team->port_list, list) {
   2089		if (team_port_txable(port)) {
   2090			if (port->state.speed != SPEED_UNKNOWN)
   2091				speed += port->state.speed;
   2092			if (cmd->base.duplex == DUPLEX_UNKNOWN &&
   2093			    port->state.duplex != DUPLEX_UNKNOWN)
   2094				cmd->base.duplex = port->state.duplex;
   2095		}
   2096	}
   2097	rcu_read_unlock();
   2098
   2099	cmd->base.speed = speed ? : SPEED_UNKNOWN;
   2100
   2101	return 0;
   2102}
   2103
   2104static const struct ethtool_ops team_ethtool_ops = {
   2105	.get_drvinfo		= team_ethtool_get_drvinfo,
   2106	.get_link		= ethtool_op_get_link,
   2107	.get_link_ksettings	= team_ethtool_get_link_ksettings,
   2108};
   2109
   2110/***********************
   2111 * rt netlink interface
   2112 ***********************/
   2113
   2114static void team_setup_by_port(struct net_device *dev,
   2115			       struct net_device *port_dev)
   2116{
   2117	dev->header_ops	= port_dev->header_ops;
   2118	dev->type = port_dev->type;
   2119	dev->hard_header_len = port_dev->hard_header_len;
   2120	dev->needed_headroom = port_dev->needed_headroom;
   2121	dev->addr_len = port_dev->addr_len;
   2122	dev->mtu = port_dev->mtu;
   2123	memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
   2124	eth_hw_addr_inherit(dev, port_dev);
   2125}
   2126
   2127static int team_dev_type_check_change(struct net_device *dev,
   2128				      struct net_device *port_dev)
   2129{
   2130	struct team *team = netdev_priv(dev);
   2131	char *portname = port_dev->name;
   2132	int err;
   2133
   2134	if (dev->type == port_dev->type)
   2135		return 0;
   2136	if (!list_empty(&team->port_list)) {
   2137		netdev_err(dev, "Device %s is of different type\n", portname);
   2138		return -EBUSY;
   2139	}
   2140	err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
   2141	err = notifier_to_errno(err);
   2142	if (err) {
   2143		netdev_err(dev, "Refused to change device type\n");
   2144		return err;
   2145	}
   2146	dev_uc_flush(dev);
   2147	dev_mc_flush(dev);
   2148	team_setup_by_port(dev, port_dev);
   2149	call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
   2150	return 0;
   2151}
   2152
   2153static void team_setup(struct net_device *dev)
   2154{
   2155	ether_setup(dev);
   2156	dev->max_mtu = ETH_MAX_MTU;
   2157
   2158	dev->netdev_ops = &team_netdev_ops;
   2159	dev->ethtool_ops = &team_ethtool_ops;
   2160	dev->needs_free_netdev = true;
   2161	dev->priv_destructor = team_destructor;
   2162	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
   2163	dev->priv_flags |= IFF_NO_QUEUE;
   2164	dev->priv_flags |= IFF_TEAM;
   2165
   2166	/*
   2167	 * Indicate we support unicast address filtering. That way core won't
   2168	 * bring us to promisc mode in case a unicast addr is added.
   2169	 * Let this up to underlay drivers.
   2170	 */
   2171	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
   2172
   2173	dev->features |= NETIF_F_LLTX;
   2174	dev->features |= NETIF_F_GRO;
   2175
   2176	/* Don't allow team devices to change network namespaces. */
   2177	dev->features |= NETIF_F_NETNS_LOCAL;
   2178
   2179	dev->hw_features = TEAM_VLAN_FEATURES |
   2180			   NETIF_F_HW_VLAN_CTAG_RX |
   2181			   NETIF_F_HW_VLAN_CTAG_FILTER;
   2182
   2183	dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
   2184	dev->features |= dev->hw_features;
   2185	dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
   2186}
   2187
   2188static int team_newlink(struct net *src_net, struct net_device *dev,
   2189			struct nlattr *tb[], struct nlattr *data[],
   2190			struct netlink_ext_ack *extack)
   2191{
   2192	if (tb[IFLA_ADDRESS] == NULL)
   2193		eth_hw_addr_random(dev);
   2194
   2195	return register_netdevice(dev);
   2196}
   2197
   2198static int team_validate(struct nlattr *tb[], struct nlattr *data[],
   2199			 struct netlink_ext_ack *extack)
   2200{
   2201	if (tb[IFLA_ADDRESS]) {
   2202		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
   2203			return -EINVAL;
   2204		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
   2205			return -EADDRNOTAVAIL;
   2206	}
   2207	return 0;
   2208}
   2209
   2210static unsigned int team_get_num_tx_queues(void)
   2211{
   2212	return TEAM_DEFAULT_NUM_TX_QUEUES;
   2213}
   2214
   2215static unsigned int team_get_num_rx_queues(void)
   2216{
   2217	return TEAM_DEFAULT_NUM_RX_QUEUES;
   2218}
   2219
   2220static struct rtnl_link_ops team_link_ops __read_mostly = {
   2221	.kind			= DRV_NAME,
   2222	.priv_size		= sizeof(struct team),
   2223	.setup			= team_setup,
   2224	.newlink		= team_newlink,
   2225	.validate		= team_validate,
   2226	.get_num_tx_queues	= team_get_num_tx_queues,
   2227	.get_num_rx_queues	= team_get_num_rx_queues,
   2228};
   2229
   2230
   2231/***********************************
   2232 * Generic netlink custom interface
   2233 ***********************************/
   2234
   2235static struct genl_family team_nl_family;
   2236
   2237static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
   2238	[TEAM_ATTR_UNSPEC]			= { .type = NLA_UNSPEC, },
   2239	[TEAM_ATTR_TEAM_IFINDEX]		= { .type = NLA_U32 },
   2240	[TEAM_ATTR_LIST_OPTION]			= { .type = NLA_NESTED },
   2241	[TEAM_ATTR_LIST_PORT]			= { .type = NLA_NESTED },
   2242};
   2243
   2244static const struct nla_policy
   2245team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
   2246	[TEAM_ATTR_OPTION_UNSPEC]		= { .type = NLA_UNSPEC, },
   2247	[TEAM_ATTR_OPTION_NAME] = {
   2248		.type = NLA_STRING,
   2249		.len = TEAM_STRING_MAX_LEN,
   2250	},
   2251	[TEAM_ATTR_OPTION_CHANGED]		= { .type = NLA_FLAG },
   2252	[TEAM_ATTR_OPTION_TYPE]			= { .type = NLA_U8 },
   2253	[TEAM_ATTR_OPTION_DATA]			= { .type = NLA_BINARY },
   2254	[TEAM_ATTR_OPTION_PORT_IFINDEX]		= { .type = NLA_U32 },
   2255	[TEAM_ATTR_OPTION_ARRAY_INDEX]		= { .type = NLA_U32 },
   2256};
   2257
   2258static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
   2259{
   2260	struct sk_buff *msg;
   2261	void *hdr;
   2262	int err;
   2263
   2264	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
   2265	if (!msg)
   2266		return -ENOMEM;
   2267
   2268	hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
   2269			  &team_nl_family, 0, TEAM_CMD_NOOP);
   2270	if (!hdr) {
   2271		err = -EMSGSIZE;
   2272		goto err_msg_put;
   2273	}
   2274
   2275	genlmsg_end(msg, hdr);
   2276
   2277	return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
   2278
   2279err_msg_put:
   2280	nlmsg_free(msg);
   2281
   2282	return err;
   2283}
   2284
   2285/*
   2286 * Netlink cmd functions should be locked by following two functions.
   2287 * Since dev gets held here, that ensures dev won't disappear in between.
   2288 */
   2289static struct team *team_nl_team_get(struct genl_info *info)
   2290{
   2291	struct net *net = genl_info_net(info);
   2292	int ifindex;
   2293	struct net_device *dev;
   2294	struct team *team;
   2295
   2296	if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
   2297		return NULL;
   2298
   2299	ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
   2300	dev = dev_get_by_index(net, ifindex);
   2301	if (!dev || dev->netdev_ops != &team_netdev_ops) {
   2302		if (dev)
   2303			dev_put(dev);
   2304		return NULL;
   2305	}
   2306
   2307	team = netdev_priv(dev);
   2308	mutex_lock(&team->lock);
   2309	return team;
   2310}
   2311
   2312static void team_nl_team_put(struct team *team)
   2313{
   2314	mutex_unlock(&team->lock);
   2315	dev_put(team->dev);
   2316}
   2317
   2318typedef int team_nl_send_func_t(struct sk_buff *skb,
   2319				struct team *team, u32 portid);
   2320
   2321static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
   2322{
   2323	return genlmsg_unicast(dev_net(team->dev), skb, portid);
   2324}
   2325
   2326static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
   2327				       struct team_option_inst *opt_inst)
   2328{
   2329	struct nlattr *option_item;
   2330	struct team_option *option = opt_inst->option;
   2331	struct team_option_inst_info *opt_inst_info = &opt_inst->info;
   2332	struct team_gsetter_ctx ctx;
   2333	int err;
   2334
   2335	ctx.info = opt_inst_info;
   2336	err = team_option_get(team, opt_inst, &ctx);
   2337	if (err)
   2338		return err;
   2339
   2340	option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
   2341	if (!option_item)
   2342		return -EMSGSIZE;
   2343
   2344	if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
   2345		goto nest_cancel;
   2346	if (opt_inst_info->port &&
   2347	    nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
   2348			opt_inst_info->port->dev->ifindex))
   2349		goto nest_cancel;
   2350	if (opt_inst->option->array_size &&
   2351	    nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
   2352			opt_inst_info->array_index))
   2353		goto nest_cancel;
   2354
   2355	switch (option->type) {
   2356	case TEAM_OPTION_TYPE_U32:
   2357		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
   2358			goto nest_cancel;
   2359		if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
   2360			goto nest_cancel;
   2361		break;
   2362	case TEAM_OPTION_TYPE_STRING:
   2363		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
   2364			goto nest_cancel;
   2365		if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
   2366				   ctx.data.str_val))
   2367			goto nest_cancel;
   2368		break;
   2369	case TEAM_OPTION_TYPE_BINARY:
   2370		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
   2371			goto nest_cancel;
   2372		if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
   2373			    ctx.data.bin_val.ptr))
   2374			goto nest_cancel;
   2375		break;
   2376	case TEAM_OPTION_TYPE_BOOL:
   2377		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
   2378			goto nest_cancel;
   2379		if (ctx.data.bool_val &&
   2380		    nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
   2381			goto nest_cancel;
   2382		break;
   2383	case TEAM_OPTION_TYPE_S32:
   2384		if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
   2385			goto nest_cancel;
   2386		if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
   2387			goto nest_cancel;
   2388		break;
   2389	default:
   2390		BUG();
   2391	}
   2392	if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
   2393		goto nest_cancel;
   2394	if (opt_inst->changed) {
   2395		if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
   2396			goto nest_cancel;
   2397		opt_inst->changed = false;
   2398	}
   2399	nla_nest_end(skb, option_item);
   2400	return 0;
   2401
   2402nest_cancel:
   2403	nla_nest_cancel(skb, option_item);
   2404	return -EMSGSIZE;
   2405}
   2406
   2407static int __send_and_alloc_skb(struct sk_buff **pskb,
   2408				struct team *team, u32 portid,
   2409				team_nl_send_func_t *send_func)
   2410{
   2411	int err;
   2412
   2413	if (*pskb) {
   2414		err = send_func(*pskb, team, portid);
   2415		if (err)
   2416			return err;
   2417	}
   2418	*pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
   2419	if (!*pskb)
   2420		return -ENOMEM;
   2421	return 0;
   2422}
   2423
   2424static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
   2425				    int flags, team_nl_send_func_t *send_func,
   2426				    struct list_head *sel_opt_inst_list)
   2427{
   2428	struct nlattr *option_list;
   2429	struct nlmsghdr *nlh;
   2430	void *hdr;
   2431	struct team_option_inst *opt_inst;
   2432	int err;
   2433	struct sk_buff *skb = NULL;
   2434	bool incomplete;
   2435	int i;
   2436
   2437	opt_inst = list_first_entry(sel_opt_inst_list,
   2438				    struct team_option_inst, tmp_list);
   2439
   2440start_again:
   2441	err = __send_and_alloc_skb(&skb, team, portid, send_func);
   2442	if (err)
   2443		return err;
   2444
   2445	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
   2446			  TEAM_CMD_OPTIONS_GET);
   2447	if (!hdr) {
   2448		nlmsg_free(skb);
   2449		return -EMSGSIZE;
   2450	}
   2451
   2452	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
   2453		goto nla_put_failure;
   2454	option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
   2455	if (!option_list)
   2456		goto nla_put_failure;
   2457
   2458	i = 0;
   2459	incomplete = false;
   2460	list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
   2461		err = team_nl_fill_one_option_get(skb, team, opt_inst);
   2462		if (err) {
   2463			if (err == -EMSGSIZE) {
   2464				if (!i)
   2465					goto errout;
   2466				incomplete = true;
   2467				break;
   2468			}
   2469			goto errout;
   2470		}
   2471		i++;
   2472	}
   2473
   2474	nla_nest_end(skb, option_list);
   2475	genlmsg_end(skb, hdr);
   2476	if (incomplete)
   2477		goto start_again;
   2478
   2479send_done:
   2480	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
   2481	if (!nlh) {
   2482		err = __send_and_alloc_skb(&skb, team, portid, send_func);
   2483		if (err)
   2484			return err;
   2485		goto send_done;
   2486	}
   2487
   2488	return send_func(skb, team, portid);
   2489
   2490nla_put_failure:
   2491	err = -EMSGSIZE;
   2492errout:
   2493	nlmsg_free(skb);
   2494	return err;
   2495}
   2496
   2497static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
   2498{
   2499	struct team *team;
   2500	struct team_option_inst *opt_inst;
   2501	int err;
   2502	LIST_HEAD(sel_opt_inst_list);
   2503
   2504	team = team_nl_team_get(info);
   2505	if (!team)
   2506		return -EINVAL;
   2507
   2508	list_for_each_entry(opt_inst, &team->option_inst_list, list)
   2509		list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
   2510	err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
   2511				       NLM_F_ACK, team_nl_send_unicast,
   2512				       &sel_opt_inst_list);
   2513
   2514	team_nl_team_put(team);
   2515
   2516	return err;
   2517}
   2518
   2519static int team_nl_send_event_options_get(struct team *team,
   2520					  struct list_head *sel_opt_inst_list);
   2521
   2522static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
   2523{
   2524	struct team *team;
   2525	int err = 0;
   2526	int i;
   2527	struct nlattr *nl_option;
   2528
   2529	rtnl_lock();
   2530
   2531	team = team_nl_team_get(info);
   2532	if (!team) {
   2533		err = -EINVAL;
   2534		goto rtnl_unlock;
   2535	}
   2536
   2537	err = -EINVAL;
   2538	if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
   2539		err = -EINVAL;
   2540		goto team_put;
   2541	}
   2542
   2543	nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
   2544		struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
   2545		struct nlattr *attr;
   2546		struct nlattr *attr_data;
   2547		LIST_HEAD(opt_inst_list);
   2548		enum team_option_type opt_type;
   2549		int opt_port_ifindex = 0; /* != 0 for per-port options */
   2550		u32 opt_array_index = 0;
   2551		bool opt_is_array = false;
   2552		struct team_option_inst *opt_inst;
   2553		char *opt_name;
   2554		bool opt_found = false;
   2555
   2556		if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
   2557			err = -EINVAL;
   2558			goto team_put;
   2559		}
   2560		err = nla_parse_nested_deprecated(opt_attrs,
   2561						  TEAM_ATTR_OPTION_MAX,
   2562						  nl_option,
   2563						  team_nl_option_policy,
   2564						  info->extack);
   2565		if (err)
   2566			goto team_put;
   2567		if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
   2568		    !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
   2569			err = -EINVAL;
   2570			goto team_put;
   2571		}
   2572		switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
   2573		case NLA_U32:
   2574			opt_type = TEAM_OPTION_TYPE_U32;
   2575			break;
   2576		case NLA_STRING:
   2577			opt_type = TEAM_OPTION_TYPE_STRING;
   2578			break;
   2579		case NLA_BINARY:
   2580			opt_type = TEAM_OPTION_TYPE_BINARY;
   2581			break;
   2582		case NLA_FLAG:
   2583			opt_type = TEAM_OPTION_TYPE_BOOL;
   2584			break;
   2585		case NLA_S32:
   2586			opt_type = TEAM_OPTION_TYPE_S32;
   2587			break;
   2588		default:
   2589			goto team_put;
   2590		}
   2591
   2592		attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
   2593		if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
   2594			err = -EINVAL;
   2595			goto team_put;
   2596		}
   2597
   2598		opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
   2599		attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
   2600		if (attr)
   2601			opt_port_ifindex = nla_get_u32(attr);
   2602
   2603		attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
   2604		if (attr) {
   2605			opt_is_array = true;
   2606			opt_array_index = nla_get_u32(attr);
   2607		}
   2608
   2609		list_for_each_entry(opt_inst, &team->option_inst_list, list) {
   2610			struct team_option *option = opt_inst->option;
   2611			struct team_gsetter_ctx ctx;
   2612			struct team_option_inst_info *opt_inst_info;
   2613			int tmp_ifindex;
   2614
   2615			opt_inst_info = &opt_inst->info;
   2616			tmp_ifindex = opt_inst_info->port ?
   2617				      opt_inst_info->port->dev->ifindex : 0;
   2618			if (option->type != opt_type ||
   2619			    strcmp(option->name, opt_name) ||
   2620			    tmp_ifindex != opt_port_ifindex ||
   2621			    (option->array_size && !opt_is_array) ||
   2622			    opt_inst_info->array_index != opt_array_index)
   2623				continue;
   2624			opt_found = true;
   2625			ctx.info = opt_inst_info;
   2626			switch (opt_type) {
   2627			case TEAM_OPTION_TYPE_U32:
   2628				ctx.data.u32_val = nla_get_u32(attr_data);
   2629				break;
   2630			case TEAM_OPTION_TYPE_STRING:
   2631				if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
   2632					err = -EINVAL;
   2633					goto team_put;
   2634				}
   2635				ctx.data.str_val = nla_data(attr_data);
   2636				break;
   2637			case TEAM_OPTION_TYPE_BINARY:
   2638				ctx.data.bin_val.len = nla_len(attr_data);
   2639				ctx.data.bin_val.ptr = nla_data(attr_data);
   2640				break;
   2641			case TEAM_OPTION_TYPE_BOOL:
   2642				ctx.data.bool_val = attr_data ? true : false;
   2643				break;
   2644			case TEAM_OPTION_TYPE_S32:
   2645				ctx.data.s32_val = nla_get_s32(attr_data);
   2646				break;
   2647			default:
   2648				BUG();
   2649			}
   2650			err = team_option_set(team, opt_inst, &ctx);
   2651			if (err)
   2652				goto team_put;
   2653			opt_inst->changed = true;
   2654			list_add(&opt_inst->tmp_list, &opt_inst_list);
   2655		}
   2656		if (!opt_found) {
   2657			err = -ENOENT;
   2658			goto team_put;
   2659		}
   2660
   2661		err = team_nl_send_event_options_get(team, &opt_inst_list);
   2662		if (err)
   2663			break;
   2664	}
   2665
   2666team_put:
   2667	team_nl_team_put(team);
   2668rtnl_unlock:
   2669	rtnl_unlock();
   2670	return err;
   2671}
   2672
   2673static int team_nl_fill_one_port_get(struct sk_buff *skb,
   2674				     struct team_port *port)
   2675{
   2676	struct nlattr *port_item;
   2677
   2678	port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
   2679	if (!port_item)
   2680		goto nest_cancel;
   2681	if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
   2682		goto nest_cancel;
   2683	if (port->changed) {
   2684		if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
   2685			goto nest_cancel;
   2686		port->changed = false;
   2687	}
   2688	if ((port->removed &&
   2689	     nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
   2690	    (port->state.linkup &&
   2691	     nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
   2692	    nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
   2693	    nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
   2694		goto nest_cancel;
   2695	nla_nest_end(skb, port_item);
   2696	return 0;
   2697
   2698nest_cancel:
   2699	nla_nest_cancel(skb, port_item);
   2700	return -EMSGSIZE;
   2701}
   2702
   2703static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
   2704				      int flags, team_nl_send_func_t *send_func,
   2705				      struct team_port *one_port)
   2706{
   2707	struct nlattr *port_list;
   2708	struct nlmsghdr *nlh;
   2709	void *hdr;
   2710	struct team_port *port;
   2711	int err;
   2712	struct sk_buff *skb = NULL;
   2713	bool incomplete;
   2714	int i;
   2715
   2716	port = list_first_entry_or_null(&team->port_list,
   2717					struct team_port, list);
   2718
   2719start_again:
   2720	err = __send_and_alloc_skb(&skb, team, portid, send_func);
   2721	if (err)
   2722		return err;
   2723
   2724	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
   2725			  TEAM_CMD_PORT_LIST_GET);
   2726	if (!hdr) {
   2727		nlmsg_free(skb);
   2728		return -EMSGSIZE;
   2729	}
   2730
   2731	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
   2732		goto nla_put_failure;
   2733	port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
   2734	if (!port_list)
   2735		goto nla_put_failure;
   2736
   2737	i = 0;
   2738	incomplete = false;
   2739
   2740	/* If one port is selected, called wants to send port list containing
   2741	 * only this port. Otherwise go through all listed ports and send all
   2742	 */
   2743	if (one_port) {
   2744		err = team_nl_fill_one_port_get(skb, one_port);
   2745		if (err)
   2746			goto errout;
   2747	} else if (port) {
   2748		list_for_each_entry_from(port, &team->port_list, list) {
   2749			err = team_nl_fill_one_port_get(skb, port);
   2750			if (err) {
   2751				if (err == -EMSGSIZE) {
   2752					if (!i)
   2753						goto errout;
   2754					incomplete = true;
   2755					break;
   2756				}
   2757				goto errout;
   2758			}
   2759			i++;
   2760		}
   2761	}
   2762
   2763	nla_nest_end(skb, port_list);
   2764	genlmsg_end(skb, hdr);
   2765	if (incomplete)
   2766		goto start_again;
   2767
   2768send_done:
   2769	nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
   2770	if (!nlh) {
   2771		err = __send_and_alloc_skb(&skb, team, portid, send_func);
   2772		if (err)
   2773			return err;
   2774		goto send_done;
   2775	}
   2776
   2777	return send_func(skb, team, portid);
   2778
   2779nla_put_failure:
   2780	err = -EMSGSIZE;
   2781errout:
   2782	nlmsg_free(skb);
   2783	return err;
   2784}
   2785
   2786static int team_nl_cmd_port_list_get(struct sk_buff *skb,
   2787				     struct genl_info *info)
   2788{
   2789	struct team *team;
   2790	int err;
   2791
   2792	team = team_nl_team_get(info);
   2793	if (!team)
   2794		return -EINVAL;
   2795
   2796	err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
   2797					 NLM_F_ACK, team_nl_send_unicast, NULL);
   2798
   2799	team_nl_team_put(team);
   2800
   2801	return err;
   2802}
   2803
   2804static const struct genl_small_ops team_nl_ops[] = {
   2805	{
   2806		.cmd = TEAM_CMD_NOOP,
   2807		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
   2808		.doit = team_nl_cmd_noop,
   2809	},
   2810	{
   2811		.cmd = TEAM_CMD_OPTIONS_SET,
   2812		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
   2813		.doit = team_nl_cmd_options_set,
   2814		.flags = GENL_ADMIN_PERM,
   2815	},
   2816	{
   2817		.cmd = TEAM_CMD_OPTIONS_GET,
   2818		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
   2819		.doit = team_nl_cmd_options_get,
   2820		.flags = GENL_ADMIN_PERM,
   2821	},
   2822	{
   2823		.cmd = TEAM_CMD_PORT_LIST_GET,
   2824		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
   2825		.doit = team_nl_cmd_port_list_get,
   2826		.flags = GENL_ADMIN_PERM,
   2827	},
   2828};
   2829
   2830static const struct genl_multicast_group team_nl_mcgrps[] = {
   2831	{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
   2832};
   2833
   2834static struct genl_family team_nl_family __ro_after_init = {
   2835	.name		= TEAM_GENL_NAME,
   2836	.version	= TEAM_GENL_VERSION,
   2837	.maxattr	= TEAM_ATTR_MAX,
   2838	.policy = team_nl_policy,
   2839	.netnsok	= true,
   2840	.module		= THIS_MODULE,
   2841	.small_ops	= team_nl_ops,
   2842	.n_small_ops	= ARRAY_SIZE(team_nl_ops),
   2843	.mcgrps		= team_nl_mcgrps,
   2844	.n_mcgrps	= ARRAY_SIZE(team_nl_mcgrps),
   2845};
   2846
   2847static int team_nl_send_multicast(struct sk_buff *skb,
   2848				  struct team *team, u32 portid)
   2849{
   2850	return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
   2851				       skb, 0, 0, GFP_KERNEL);
   2852}
   2853
   2854static int team_nl_send_event_options_get(struct team *team,
   2855					  struct list_head *sel_opt_inst_list)
   2856{
   2857	return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
   2858					sel_opt_inst_list);
   2859}
   2860
   2861static int team_nl_send_event_port_get(struct team *team,
   2862				       struct team_port *port)
   2863{
   2864	return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
   2865					  port);
   2866}
   2867
   2868static int __init team_nl_init(void)
   2869{
   2870	return genl_register_family(&team_nl_family);
   2871}
   2872
   2873static void team_nl_fini(void)
   2874{
   2875	genl_unregister_family(&team_nl_family);
   2876}
   2877
   2878
   2879/******************
   2880 * Change checkers
   2881 ******************/
   2882
   2883static void __team_options_change_check(struct team *team)
   2884{
   2885	int err;
   2886	struct team_option_inst *opt_inst;
   2887	LIST_HEAD(sel_opt_inst_list);
   2888
   2889	list_for_each_entry(opt_inst, &team->option_inst_list, list) {
   2890		if (opt_inst->changed)
   2891			list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
   2892	}
   2893	err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
   2894	if (err && err != -ESRCH)
   2895		netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
   2896			    err);
   2897}
   2898
   2899/* rtnl lock is held */
   2900
   2901static void __team_port_change_send(struct team_port *port, bool linkup)
   2902{
   2903	int err;
   2904
   2905	port->changed = true;
   2906	port->state.linkup = linkup;
   2907	team_refresh_port_linkup(port);
   2908	if (linkup) {
   2909		struct ethtool_link_ksettings ecmd;
   2910
   2911		err = __ethtool_get_link_ksettings(port->dev, &ecmd);
   2912		if (!err) {
   2913			port->state.speed = ecmd.base.speed;
   2914			port->state.duplex = ecmd.base.duplex;
   2915			goto send_event;
   2916		}
   2917	}
   2918	port->state.speed = 0;
   2919	port->state.duplex = 0;
   2920
   2921send_event:
   2922	err = team_nl_send_event_port_get(port->team, port);
   2923	if (err && err != -ESRCH)
   2924		netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
   2925			    port->dev->name, err);
   2926
   2927}
   2928
   2929static void __team_carrier_check(struct team *team)
   2930{
   2931	struct team_port *port;
   2932	bool team_linkup;
   2933
   2934	if (team->user_carrier_enabled)
   2935		return;
   2936
   2937	team_linkup = false;
   2938	list_for_each_entry(port, &team->port_list, list) {
   2939		if (port->linkup) {
   2940			team_linkup = true;
   2941			break;
   2942		}
   2943	}
   2944
   2945	if (team_linkup)
   2946		netif_carrier_on(team->dev);
   2947	else
   2948		netif_carrier_off(team->dev);
   2949}
   2950
   2951static void __team_port_change_check(struct team_port *port, bool linkup)
   2952{
   2953	if (port->state.linkup != linkup)
   2954		__team_port_change_send(port, linkup);
   2955	__team_carrier_check(port->team);
   2956}
   2957
   2958static void __team_port_change_port_added(struct team_port *port, bool linkup)
   2959{
   2960	__team_port_change_send(port, linkup);
   2961	__team_carrier_check(port->team);
   2962}
   2963
   2964static void __team_port_change_port_removed(struct team_port *port)
   2965{
   2966	port->removed = true;
   2967	__team_port_change_send(port, false);
   2968	__team_carrier_check(port->team);
   2969}
   2970
   2971static void team_port_change_check(struct team_port *port, bool linkup)
   2972{
   2973	struct team *team = port->team;
   2974
   2975	mutex_lock(&team->lock);
   2976	__team_port_change_check(port, linkup);
   2977	mutex_unlock(&team->lock);
   2978}
   2979
   2980
   2981/************************************
   2982 * Net device notifier event handler
   2983 ************************************/
   2984
   2985static int team_device_event(struct notifier_block *unused,
   2986			     unsigned long event, void *ptr)
   2987{
   2988	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
   2989	struct team_port *port;
   2990
   2991	port = team_port_get_rtnl(dev);
   2992	if (!port)
   2993		return NOTIFY_DONE;
   2994
   2995	switch (event) {
   2996	case NETDEV_UP:
   2997		if (netif_oper_up(dev))
   2998			team_port_change_check(port, true);
   2999		break;
   3000	case NETDEV_DOWN:
   3001		team_port_change_check(port, false);
   3002		break;
   3003	case NETDEV_CHANGE:
   3004		if (netif_running(port->dev))
   3005			team_port_change_check(port,
   3006					       !!netif_oper_up(port->dev));
   3007		break;
   3008	case NETDEV_UNREGISTER:
   3009		team_del_slave(port->team->dev, dev);
   3010		break;
   3011	case NETDEV_FEAT_CHANGE:
   3012		team_compute_features(port->team);
   3013		break;
   3014	case NETDEV_PRECHANGEMTU:
   3015		/* Forbid to change mtu of underlaying device */
   3016		if (!port->team->port_mtu_change_allowed)
   3017			return NOTIFY_BAD;
   3018		break;
   3019	case NETDEV_PRE_TYPE_CHANGE:
   3020		/* Forbid to change type of underlaying device */
   3021		return NOTIFY_BAD;
   3022	case NETDEV_RESEND_IGMP:
   3023		/* Propagate to master device */
   3024		call_netdevice_notifiers(event, port->team->dev);
   3025		break;
   3026	}
   3027	return NOTIFY_DONE;
   3028}
   3029
   3030static struct notifier_block team_notifier_block __read_mostly = {
   3031	.notifier_call = team_device_event,
   3032};
   3033
   3034
   3035/***********************
   3036 * Module init and exit
   3037 ***********************/
   3038
   3039static int __init team_module_init(void)
   3040{
   3041	int err;
   3042
   3043	register_netdevice_notifier(&team_notifier_block);
   3044
   3045	err = rtnl_link_register(&team_link_ops);
   3046	if (err)
   3047		goto err_rtnl_reg;
   3048
   3049	err = team_nl_init();
   3050	if (err)
   3051		goto err_nl_init;
   3052
   3053	return 0;
   3054
   3055err_nl_init:
   3056	rtnl_link_unregister(&team_link_ops);
   3057
   3058err_rtnl_reg:
   3059	unregister_netdevice_notifier(&team_notifier_block);
   3060
   3061	return err;
   3062}
   3063
   3064static void __exit team_module_exit(void)
   3065{
   3066	team_nl_fini();
   3067	rtnl_link_unregister(&team_link_ops);
   3068	unregister_netdevice_notifier(&team_notifier_block);
   3069}
   3070
   3071module_init(team_module_init);
   3072module_exit(team_module_exit);
   3073
   3074MODULE_LICENSE("GPL v2");
   3075MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
   3076MODULE_DESCRIPTION("Ethernet team device driver");
   3077MODULE_ALIAS_RTNL_LINK(DRV_NAME);