cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

team_mode_loadbalance.c (18150B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
      4 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
      5 */
      6
      7#include <linux/kernel.h>
      8#include <linux/types.h>
      9#include <linux/module.h>
     10#include <linux/init.h>
     11#include <linux/errno.h>
     12#include <linux/netdevice.h>
     13#include <linux/etherdevice.h>
     14#include <linux/filter.h>
     15#include <linux/if_team.h>
     16
     17static rx_handler_result_t lb_receive(struct team *team, struct team_port *port,
     18				      struct sk_buff *skb)
     19{
     20	if (unlikely(skb->protocol == htons(ETH_P_SLOW))) {
     21		/* LACPDU packets should go to exact delivery */
     22		const unsigned char *dest = eth_hdr(skb)->h_dest;
     23
     24		if (is_link_local_ether_addr(dest) && dest[5] == 0x02)
     25			return RX_HANDLER_EXACT;
     26	}
     27	return RX_HANDLER_ANOTHER;
     28}
     29
     30struct lb_priv;
     31
     32typedef struct team_port *lb_select_tx_port_func_t(struct team *,
     33						   struct lb_priv *,
     34						   struct sk_buff *,
     35						   unsigned char);
     36
     37#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
     38
     39struct lb_stats {
     40	u64 tx_bytes;
     41};
     42
     43struct lb_pcpu_stats {
     44	struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
     45	struct u64_stats_sync syncp;
     46};
     47
     48struct lb_stats_info {
     49	struct lb_stats stats;
     50	struct lb_stats last_stats;
     51	struct team_option_inst_info *opt_inst_info;
     52};
     53
     54struct lb_port_mapping {
     55	struct team_port __rcu *port;
     56	struct team_option_inst_info *opt_inst_info;
     57};
     58
     59struct lb_priv_ex {
     60	struct team *team;
     61	struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
     62	struct sock_fprog_kern *orig_fprog;
     63	struct {
     64		unsigned int refresh_interval; /* in tenths of second */
     65		struct delayed_work refresh_dw;
     66		struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
     67	} stats;
     68};
     69
     70struct lb_priv {
     71	struct bpf_prog __rcu *fp;
     72	lb_select_tx_port_func_t __rcu *select_tx_port_func;
     73	struct lb_pcpu_stats __percpu *pcpu_stats;
     74	struct lb_priv_ex *ex; /* priv extension */
     75};
     76
     77static struct lb_priv *get_lb_priv(struct team *team)
     78{
     79	return (struct lb_priv *) &team->mode_priv;
     80}
     81
     82struct lb_port_priv {
     83	struct lb_stats __percpu *pcpu_stats;
     84	struct lb_stats_info stats_info;
     85};
     86
     87static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
     88{
     89	return (struct lb_port_priv *) &port->mode_priv;
     90}
     91
     92#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
     93	(lb_priv)->ex->tx_hash_to_port_mapping[hash].port
     94
     95#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
     96	(lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
     97
     98static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
     99						 struct team_port *port)
    100{
    101	struct lb_priv *lb_priv = get_lb_priv(team);
    102	bool changed = false;
    103	int i;
    104
    105	for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
    106		struct lb_port_mapping *pm;
    107
    108		pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
    109		if (rcu_access_pointer(pm->port) == port) {
    110			RCU_INIT_POINTER(pm->port, NULL);
    111			team_option_inst_set_change(pm->opt_inst_info);
    112			changed = true;
    113		}
    114	}
    115	if (changed)
    116		team_options_change_check(team);
    117}
    118
    119/* Basic tx selection based solely by hash */
    120static struct team_port *lb_hash_select_tx_port(struct team *team,
    121						struct lb_priv *lb_priv,
    122						struct sk_buff *skb,
    123						unsigned char hash)
    124{
    125	int port_index = team_num_to_port_index(team, hash);
    126
    127	return team_get_port_by_index_rcu(team, port_index);
    128}
    129
    130/* Hash to port mapping select tx port */
    131static struct team_port *lb_htpm_select_tx_port(struct team *team,
    132						struct lb_priv *lb_priv,
    133						struct sk_buff *skb,
    134						unsigned char hash)
    135{
    136	struct team_port *port;
    137
    138	port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
    139	if (likely(port))
    140		return port;
    141	/* If no valid port in the table, fall back to simple hash */
    142	return lb_hash_select_tx_port(team, lb_priv, skb, hash);
    143}
    144
    145struct lb_select_tx_port {
    146	char *name;
    147	lb_select_tx_port_func_t *func;
    148};
    149
    150static const struct lb_select_tx_port lb_select_tx_port_list[] = {
    151	{
    152		.name = "hash",
    153		.func = lb_hash_select_tx_port,
    154	},
    155	{
    156		.name = "hash_to_port_mapping",
    157		.func = lb_htpm_select_tx_port,
    158	},
    159};
    160#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
    161
    162static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
    163{
    164	int i;
    165
    166	for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
    167		const struct lb_select_tx_port *item;
    168
    169		item = &lb_select_tx_port_list[i];
    170		if (item->func == func)
    171			return item->name;
    172	}
    173	return NULL;
    174}
    175
    176static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
    177{
    178	int i;
    179
    180	for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
    181		const struct lb_select_tx_port *item;
    182
    183		item = &lb_select_tx_port_list[i];
    184		if (!strcmp(item->name, name))
    185			return item->func;
    186	}
    187	return NULL;
    188}
    189
    190static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
    191				    struct sk_buff *skb)
    192{
    193	struct bpf_prog *fp;
    194	uint32_t lhash;
    195	unsigned char *c;
    196
    197	fp = rcu_dereference_bh(lb_priv->fp);
    198	if (unlikely(!fp))
    199		return 0;
    200	lhash = bpf_prog_run(fp, skb);
    201	c = (char *) &lhash;
    202	return c[0] ^ c[1] ^ c[2] ^ c[3];
    203}
    204
    205static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
    206			       struct lb_port_priv *lb_port_priv,
    207			       unsigned char hash)
    208{
    209	struct lb_pcpu_stats *pcpu_stats;
    210	struct lb_stats *port_stats;
    211	struct lb_stats *hash_stats;
    212
    213	pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
    214	port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
    215	hash_stats = &pcpu_stats->hash_stats[hash];
    216	u64_stats_update_begin(&pcpu_stats->syncp);
    217	port_stats->tx_bytes += tx_bytes;
    218	hash_stats->tx_bytes += tx_bytes;
    219	u64_stats_update_end(&pcpu_stats->syncp);
    220}
    221
    222static bool lb_transmit(struct team *team, struct sk_buff *skb)
    223{
    224	struct lb_priv *lb_priv = get_lb_priv(team);
    225	lb_select_tx_port_func_t *select_tx_port_func;
    226	struct team_port *port;
    227	unsigned char hash;
    228	unsigned int tx_bytes = skb->len;
    229
    230	hash = lb_get_skb_hash(lb_priv, skb);
    231	select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
    232	port = select_tx_port_func(team, lb_priv, skb, hash);
    233	if (unlikely(!port))
    234		goto drop;
    235	if (team_dev_queue_xmit(team, port, skb))
    236		return false;
    237	lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
    238	return true;
    239
    240drop:
    241	dev_kfree_skb_any(skb);
    242	return false;
    243}
    244
    245static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
    246{
    247	struct lb_priv *lb_priv = get_lb_priv(team);
    248
    249	if (!lb_priv->ex->orig_fprog) {
    250		ctx->data.bin_val.len = 0;
    251		ctx->data.bin_val.ptr = NULL;
    252		return 0;
    253	}
    254	ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
    255				sizeof(struct sock_filter);
    256	ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
    257	return 0;
    258}
    259
    260static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len,
    261			  const void *data)
    262{
    263	struct sock_fprog_kern *fprog;
    264	struct sock_filter *filter = (struct sock_filter *) data;
    265
    266	if (data_len % sizeof(struct sock_filter))
    267		return -EINVAL;
    268	fprog = kmalloc(sizeof(*fprog), GFP_KERNEL);
    269	if (!fprog)
    270		return -ENOMEM;
    271	fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
    272	if (!fprog->filter) {
    273		kfree(fprog);
    274		return -ENOMEM;
    275	}
    276	fprog->len = data_len / sizeof(struct sock_filter);
    277	*pfprog = fprog;
    278	return 0;
    279}
    280
    281static void __fprog_destroy(struct sock_fprog_kern *fprog)
    282{
    283	kfree(fprog->filter);
    284	kfree(fprog);
    285}
    286
    287static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
    288{
    289	struct lb_priv *lb_priv = get_lb_priv(team);
    290	struct bpf_prog *fp = NULL;
    291	struct bpf_prog *orig_fp = NULL;
    292	struct sock_fprog_kern *fprog = NULL;
    293	int err;
    294
    295	if (ctx->data.bin_val.len) {
    296		err = __fprog_create(&fprog, ctx->data.bin_val.len,
    297				     ctx->data.bin_val.ptr);
    298		if (err)
    299			return err;
    300		err = bpf_prog_create(&fp, fprog);
    301		if (err) {
    302			__fprog_destroy(fprog);
    303			return err;
    304		}
    305	}
    306
    307	if (lb_priv->ex->orig_fprog) {
    308		/* Clear old filter data */
    309		__fprog_destroy(lb_priv->ex->orig_fprog);
    310		orig_fp = rcu_dereference_protected(lb_priv->fp,
    311						lockdep_is_held(&team->lock));
    312	}
    313
    314	rcu_assign_pointer(lb_priv->fp, fp);
    315	lb_priv->ex->orig_fprog = fprog;
    316
    317	if (orig_fp) {
    318		synchronize_rcu();
    319		bpf_prog_destroy(orig_fp);
    320	}
    321	return 0;
    322}
    323
    324static void lb_bpf_func_free(struct team *team)
    325{
    326	struct lb_priv *lb_priv = get_lb_priv(team);
    327	struct bpf_prog *fp;
    328
    329	if (!lb_priv->ex->orig_fprog)
    330		return;
    331
    332	__fprog_destroy(lb_priv->ex->orig_fprog);
    333	fp = rcu_dereference_protected(lb_priv->fp,
    334				       lockdep_is_held(&team->lock));
    335	bpf_prog_destroy(fp);
    336}
    337
    338static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
    339{
    340	struct lb_priv *lb_priv = get_lb_priv(team);
    341	lb_select_tx_port_func_t *func;
    342	char *name;
    343
    344	func = rcu_dereference_protected(lb_priv->select_tx_port_func,
    345					 lockdep_is_held(&team->lock));
    346	name = lb_select_tx_port_get_name(func);
    347	BUG_ON(!name);
    348	ctx->data.str_val = name;
    349	return 0;
    350}
    351
    352static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
    353{
    354	struct lb_priv *lb_priv = get_lb_priv(team);
    355	lb_select_tx_port_func_t *func;
    356
    357	func = lb_select_tx_port_get_func(ctx->data.str_val);
    358	if (!func)
    359		return -EINVAL;
    360	rcu_assign_pointer(lb_priv->select_tx_port_func, func);
    361	return 0;
    362}
    363
    364static int lb_tx_hash_to_port_mapping_init(struct team *team,
    365					   struct team_option_inst_info *info)
    366{
    367	struct lb_priv *lb_priv = get_lb_priv(team);
    368	unsigned char hash = info->array_index;
    369
    370	LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
    371	return 0;
    372}
    373
    374static int lb_tx_hash_to_port_mapping_get(struct team *team,
    375					  struct team_gsetter_ctx *ctx)
    376{
    377	struct lb_priv *lb_priv = get_lb_priv(team);
    378	struct team_port *port;
    379	unsigned char hash = ctx->info->array_index;
    380
    381	port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
    382	ctx->data.u32_val = port ? port->dev->ifindex : 0;
    383	return 0;
    384}
    385
    386static int lb_tx_hash_to_port_mapping_set(struct team *team,
    387					  struct team_gsetter_ctx *ctx)
    388{
    389	struct lb_priv *lb_priv = get_lb_priv(team);
    390	struct team_port *port;
    391	unsigned char hash = ctx->info->array_index;
    392
    393	list_for_each_entry(port, &team->port_list, list) {
    394		if (ctx->data.u32_val == port->dev->ifindex &&
    395		    team_port_enabled(port)) {
    396			rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
    397					   port);
    398			return 0;
    399		}
    400	}
    401	return -ENODEV;
    402}
    403
    404static int lb_hash_stats_init(struct team *team,
    405			      struct team_option_inst_info *info)
    406{
    407	struct lb_priv *lb_priv = get_lb_priv(team);
    408	unsigned char hash = info->array_index;
    409
    410	lb_priv->ex->stats.info[hash].opt_inst_info = info;
    411	return 0;
    412}
    413
    414static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
    415{
    416	struct lb_priv *lb_priv = get_lb_priv(team);
    417	unsigned char hash = ctx->info->array_index;
    418
    419	ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
    420	ctx->data.bin_val.len = sizeof(struct lb_stats);
    421	return 0;
    422}
    423
    424static int lb_port_stats_init(struct team *team,
    425			      struct team_option_inst_info *info)
    426{
    427	struct team_port *port = info->port;
    428	struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
    429
    430	lb_port_priv->stats_info.opt_inst_info = info;
    431	return 0;
    432}
    433
    434static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
    435{
    436	struct team_port *port = ctx->info->port;
    437	struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
    438
    439	ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
    440	ctx->data.bin_val.len = sizeof(struct lb_stats);
    441	return 0;
    442}
    443
    444static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
    445{
    446	memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
    447	memset(&s_info->stats, 0, sizeof(struct lb_stats));
    448}
    449
    450static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
    451					  struct team *team)
    452{
    453	if (memcmp(&s_info->last_stats, &s_info->stats,
    454	    sizeof(struct lb_stats))) {
    455		team_option_inst_set_change(s_info->opt_inst_info);
    456		return true;
    457	}
    458	return false;
    459}
    460
    461static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
    462				   struct lb_stats *cpu_stats,
    463				   struct u64_stats_sync *syncp)
    464{
    465	unsigned int start;
    466	struct lb_stats tmp;
    467
    468	do {
    469		start = u64_stats_fetch_begin_irq(syncp);
    470		tmp.tx_bytes = cpu_stats->tx_bytes;
    471	} while (u64_stats_fetch_retry_irq(syncp, start));
    472	acc_stats->tx_bytes += tmp.tx_bytes;
    473}
    474
    475static void lb_stats_refresh(struct work_struct *work)
    476{
    477	struct team *team;
    478	struct lb_priv *lb_priv;
    479	struct lb_priv_ex *lb_priv_ex;
    480	struct lb_pcpu_stats *pcpu_stats;
    481	struct lb_stats *stats;
    482	struct lb_stats_info *s_info;
    483	struct team_port *port;
    484	bool changed = false;
    485	int i;
    486	int j;
    487
    488	lb_priv_ex = container_of(work, struct lb_priv_ex,
    489				  stats.refresh_dw.work);
    490
    491	team = lb_priv_ex->team;
    492	lb_priv = get_lb_priv(team);
    493
    494	if (!mutex_trylock(&team->lock)) {
    495		schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
    496		return;
    497	}
    498
    499	for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
    500		s_info = &lb_priv->ex->stats.info[j];
    501		__lb_stats_info_refresh_prepare(s_info);
    502		for_each_possible_cpu(i) {
    503			pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
    504			stats = &pcpu_stats->hash_stats[j];
    505			__lb_one_cpu_stats_add(&s_info->stats, stats,
    506					       &pcpu_stats->syncp);
    507		}
    508		changed |= __lb_stats_info_refresh_check(s_info, team);
    509	}
    510
    511	list_for_each_entry(port, &team->port_list, list) {
    512		struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
    513
    514		s_info = &lb_port_priv->stats_info;
    515		__lb_stats_info_refresh_prepare(s_info);
    516		for_each_possible_cpu(i) {
    517			pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
    518			stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
    519			__lb_one_cpu_stats_add(&s_info->stats, stats,
    520					       &pcpu_stats->syncp);
    521		}
    522		changed |= __lb_stats_info_refresh_check(s_info, team);
    523	}
    524
    525	if (changed)
    526		team_options_change_check(team);
    527
    528	schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
    529			      (lb_priv_ex->stats.refresh_interval * HZ) / 10);
    530
    531	mutex_unlock(&team->lock);
    532}
    533
    534static int lb_stats_refresh_interval_get(struct team *team,
    535					 struct team_gsetter_ctx *ctx)
    536{
    537	struct lb_priv *lb_priv = get_lb_priv(team);
    538
    539	ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
    540	return 0;
    541}
    542
    543static int lb_stats_refresh_interval_set(struct team *team,
    544					 struct team_gsetter_ctx *ctx)
    545{
    546	struct lb_priv *lb_priv = get_lb_priv(team);
    547	unsigned int interval;
    548
    549	interval = ctx->data.u32_val;
    550	if (lb_priv->ex->stats.refresh_interval == interval)
    551		return 0;
    552	lb_priv->ex->stats.refresh_interval = interval;
    553	if (interval)
    554		schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
    555	else
    556		cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
    557	return 0;
    558}
    559
    560static const struct team_option lb_options[] = {
    561	{
    562		.name = "bpf_hash_func",
    563		.type = TEAM_OPTION_TYPE_BINARY,
    564		.getter = lb_bpf_func_get,
    565		.setter = lb_bpf_func_set,
    566	},
    567	{
    568		.name = "lb_tx_method",
    569		.type = TEAM_OPTION_TYPE_STRING,
    570		.getter = lb_tx_method_get,
    571		.setter = lb_tx_method_set,
    572	},
    573	{
    574		.name = "lb_tx_hash_to_port_mapping",
    575		.array_size = LB_TX_HASHTABLE_SIZE,
    576		.type = TEAM_OPTION_TYPE_U32,
    577		.init = lb_tx_hash_to_port_mapping_init,
    578		.getter = lb_tx_hash_to_port_mapping_get,
    579		.setter = lb_tx_hash_to_port_mapping_set,
    580	},
    581	{
    582		.name = "lb_hash_stats",
    583		.array_size = LB_TX_HASHTABLE_SIZE,
    584		.type = TEAM_OPTION_TYPE_BINARY,
    585		.init = lb_hash_stats_init,
    586		.getter = lb_hash_stats_get,
    587	},
    588	{
    589		.name = "lb_port_stats",
    590		.per_port = true,
    591		.type = TEAM_OPTION_TYPE_BINARY,
    592		.init = lb_port_stats_init,
    593		.getter = lb_port_stats_get,
    594	},
    595	{
    596		.name = "lb_stats_refresh_interval",
    597		.type = TEAM_OPTION_TYPE_U32,
    598		.getter = lb_stats_refresh_interval_get,
    599		.setter = lb_stats_refresh_interval_set,
    600	},
    601};
    602
    603static int lb_init(struct team *team)
    604{
    605	struct lb_priv *lb_priv = get_lb_priv(team);
    606	lb_select_tx_port_func_t *func;
    607	int i, err;
    608
    609	/* set default tx port selector */
    610	func = lb_select_tx_port_get_func("hash");
    611	BUG_ON(!func);
    612	rcu_assign_pointer(lb_priv->select_tx_port_func, func);
    613
    614	lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
    615	if (!lb_priv->ex)
    616		return -ENOMEM;
    617	lb_priv->ex->team = team;
    618
    619	lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
    620	if (!lb_priv->pcpu_stats) {
    621		err = -ENOMEM;
    622		goto err_alloc_pcpu_stats;
    623	}
    624
    625	for_each_possible_cpu(i) {
    626		struct lb_pcpu_stats *team_lb_stats;
    627		team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
    628		u64_stats_init(&team_lb_stats->syncp);
    629	}
    630
    631
    632	INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
    633
    634	err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
    635	if (err)
    636		goto err_options_register;
    637	return 0;
    638
    639err_options_register:
    640	free_percpu(lb_priv->pcpu_stats);
    641err_alloc_pcpu_stats:
    642	kfree(lb_priv->ex);
    643	return err;
    644}
    645
    646static void lb_exit(struct team *team)
    647{
    648	struct lb_priv *lb_priv = get_lb_priv(team);
    649
    650	team_options_unregister(team, lb_options,
    651				ARRAY_SIZE(lb_options));
    652	lb_bpf_func_free(team);
    653	cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
    654	free_percpu(lb_priv->pcpu_stats);
    655	kfree(lb_priv->ex);
    656}
    657
    658static int lb_port_enter(struct team *team, struct team_port *port)
    659{
    660	struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
    661
    662	lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
    663	if (!lb_port_priv->pcpu_stats)
    664		return -ENOMEM;
    665	return 0;
    666}
    667
    668static void lb_port_leave(struct team *team, struct team_port *port)
    669{
    670	struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
    671
    672	free_percpu(lb_port_priv->pcpu_stats);
    673}
    674
    675static void lb_port_disabled(struct team *team, struct team_port *port)
    676{
    677	lb_tx_hash_to_port_mapping_null_port(team, port);
    678}
    679
    680static const struct team_mode_ops lb_mode_ops = {
    681	.init			= lb_init,
    682	.exit			= lb_exit,
    683	.port_enter		= lb_port_enter,
    684	.port_leave		= lb_port_leave,
    685	.port_disabled		= lb_port_disabled,
    686	.receive		= lb_receive,
    687	.transmit		= lb_transmit,
    688};
    689
    690static const struct team_mode lb_mode = {
    691	.kind		= "loadbalance",
    692	.owner		= THIS_MODULE,
    693	.priv_size	= sizeof(struct lb_priv),
    694	.port_priv_size	= sizeof(struct lb_port_priv),
    695	.ops		= &lb_mode_ops,
    696	.lag_tx_type	= NETDEV_LAG_TX_TYPE_HASH,
    697};
    698
    699static int __init lb_init_module(void)
    700{
    701	return team_mode_register(&lb_mode);
    702}
    703
    704static void __exit lb_cleanup_module(void)
    705{
    706	team_mode_unregister(&lb_mode);
    707}
    708
    709module_init(lb_init_module);
    710module_exit(lb_cleanup_module);
    711
    712MODULE_LICENSE("GPL v2");
    713MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
    714MODULE_DESCRIPTION("Load-balancing mode for team");
    715MODULE_ALIAS_TEAM_MODE("loadbalance");