cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

node.c (80872B)


      1/*
      2 * net/tipc/node.c: TIPC node management routines
      3 *
      4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
      5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
      6 * All rights reserved.
      7 *
      8 * Redistribution and use in source and binary forms, with or without
      9 * modification, are permitted provided that the following conditions are met:
     10 *
     11 * 1. Redistributions of source code must retain the above copyright
     12 *    notice, this list of conditions and the following disclaimer.
     13 * 2. Redistributions in binary form must reproduce the above copyright
     14 *    notice, this list of conditions and the following disclaimer in the
     15 *    documentation and/or other materials provided with the distribution.
     16 * 3. Neither the names of the copyright holders nor the names of its
     17 *    contributors may be used to endorse or promote products derived from
     18 *    this software without specific prior written permission.
     19 *
     20 * Alternatively, this software may be distributed under the terms of the
     21 * GNU General Public License ("GPL") version 2 as published by the Free
     22 * Software Foundation.
     23 *
     24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     34 * POSSIBILITY OF SUCH DAMAGE.
     35 */
     36
     37#include "core.h"
     38#include "link.h"
     39#include "node.h"
     40#include "name_distr.h"
     41#include "socket.h"
     42#include "bcast.h"
     43#include "monitor.h"
     44#include "discover.h"
     45#include "netlink.h"
     46#include "trace.h"
     47#include "crypto.h"
     48
     49#define INVALID_NODE_SIG	0x10000
     50#define NODE_CLEANUP_AFTER	300000
     51
     52/* Flags used to take different actions according to flag type
     53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
     54 * TIPC_NOTIFY_NODE_UP: notify node is up
     55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
     56 */
     57enum {
     58	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
     59	TIPC_NOTIFY_NODE_UP		= (1 << 4),
     60	TIPC_NOTIFY_LINK_UP		= (1 << 6),
     61	TIPC_NOTIFY_LINK_DOWN		= (1 << 7)
     62};
     63
     64struct tipc_link_entry {
     65	struct tipc_link *link;
     66	spinlock_t lock; /* per link */
     67	u32 mtu;
     68	struct sk_buff_head inputq;
     69	struct tipc_media_addr maddr;
     70};
     71
     72struct tipc_bclink_entry {
     73	struct tipc_link *link;
     74	struct sk_buff_head inputq1;
     75	struct sk_buff_head arrvq;
     76	struct sk_buff_head inputq2;
     77	struct sk_buff_head namedq;
     78	u16 named_rcv_nxt;
     79	bool named_open;
     80};
     81
     82/**
     83 * struct tipc_node - TIPC node structure
     84 * @addr: network address of node
     85 * @kref: reference counter to node object
     86 * @lock: rwlock governing access to structure
     87 * @net: the applicable net namespace
     88 * @hash: links to adjacent nodes in unsorted hash chain
     89 * @inputq: pointer to input queue containing messages for msg event
     90 * @namedq: pointer to name table input queue with name table messages
     91 * @active_links: bearer ids of active links, used as index into links[] array
     92 * @links: array containing references to all links to node
     93 * @bc_entry: broadcast link entry
     94 * @action_flags: bit mask of different types of node actions
     95 * @state: connectivity state vs peer node
     96 * @preliminary: a preliminary node or not
     97 * @failover_sent: failover sent or not
     98 * @sync_point: sequence number where synch/failover is finished
     99 * @list: links to adjacent nodes in sorted list of cluster's nodes
    100 * @working_links: number of working links to node (both active and standby)
    101 * @link_cnt: number of links to node
    102 * @capabilities: bitmap, indicating peer node's functional capabilities
    103 * @signature: node instance identifier
    104 * @link_id: local and remote bearer ids of changing link, if any
    105 * @peer_id: 128-bit ID of peer
    106 * @peer_id_string: ID string of peer
    107 * @publ_list: list of publications
    108 * @conn_sks: list of connections (FIXME)
    109 * @timer: node's keepalive timer
    110 * @keepalive_intv: keepalive interval in milliseconds
    111 * @rcu: rcu struct for tipc_node
    112 * @delete_at: indicates the time for deleting a down node
    113 * @peer_net: peer's net namespace
    114 * @peer_hash_mix: hash for this peer (FIXME)
    115 * @crypto_rx: RX crypto handler
    116 */
    117struct tipc_node {
    118	u32 addr;
    119	struct kref kref;
    120	rwlock_t lock;
    121	struct net *net;
    122	struct hlist_node hash;
    123	int active_links[2];
    124	struct tipc_link_entry links[MAX_BEARERS];
    125	struct tipc_bclink_entry bc_entry;
    126	int action_flags;
    127	struct list_head list;
    128	int state;
    129	bool preliminary;
    130	bool failover_sent;
    131	u16 sync_point;
    132	int link_cnt;
    133	u16 working_links;
    134	u16 capabilities;
    135	u32 signature;
    136	u32 link_id;
    137	u8 peer_id[16];
    138	char peer_id_string[NODE_ID_STR_LEN];
    139	struct list_head publ_list;
    140	struct list_head conn_sks;
    141	unsigned long keepalive_intv;
    142	struct timer_list timer;
    143	struct rcu_head rcu;
    144	unsigned long delete_at;
    145	struct net *peer_net;
    146	u32 peer_hash_mix;
    147#ifdef CONFIG_TIPC_CRYPTO
    148	struct tipc_crypto *crypto_rx;
    149#endif
    150};
    151
    152/* Node FSM states and events:
    153 */
    154enum {
    155	SELF_DOWN_PEER_DOWN    = 0xdd,
    156	SELF_UP_PEER_UP        = 0xaa,
    157	SELF_DOWN_PEER_LEAVING = 0xd1,
    158	SELF_UP_PEER_COMING    = 0xac,
    159	SELF_COMING_PEER_UP    = 0xca,
    160	SELF_LEAVING_PEER_DOWN = 0x1d,
    161	NODE_FAILINGOVER       = 0xf0,
    162	NODE_SYNCHING          = 0xcc
    163};
    164
    165enum {
    166	SELF_ESTABL_CONTACT_EVT = 0xece,
    167	SELF_LOST_CONTACT_EVT   = 0x1ce,
    168	PEER_ESTABL_CONTACT_EVT = 0x9ece,
    169	PEER_LOST_CONTACT_EVT   = 0x91ce,
    170	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
    171	NODE_FAILOVER_END_EVT   = 0xfee,
    172	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
    173	NODE_SYNCH_END_EVT      = 0xcee
    174};
    175
    176static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
    177				  struct sk_buff_head *xmitq,
    178				  struct tipc_media_addr **maddr);
    179static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
    180				bool delete);
    181static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
    182static void tipc_node_delete(struct tipc_node *node);
    183static void tipc_node_timeout(struct timer_list *t);
    184static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
    185static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
    186static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
    187static bool node_is_up(struct tipc_node *n);
    188static void tipc_node_delete_from_list(struct tipc_node *node);
    189
    190struct tipc_sock_conn {
    191	u32 port;
    192	u32 peer_port;
    193	u32 peer_node;
    194	struct list_head list;
    195};
    196
    197static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
    198{
    199	int bearer_id = n->active_links[sel & 1];
    200
    201	if (unlikely(bearer_id == INVALID_BEARER_ID))
    202		return NULL;
    203
    204	return n->links[bearer_id].link;
    205}
    206
    207int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
    208{
    209	struct tipc_node *n;
    210	int bearer_id;
    211	unsigned int mtu = MAX_MSG_SIZE;
    212
    213	n = tipc_node_find(net, addr);
    214	if (unlikely(!n))
    215		return mtu;
    216
    217	/* Allow MAX_MSG_SIZE when building connection oriented message
    218	 * if they are in the same core network
    219	 */
    220	if (n->peer_net && connected) {
    221		tipc_node_put(n);
    222		return mtu;
    223	}
    224
    225	bearer_id = n->active_links[sel & 1];
    226	if (likely(bearer_id != INVALID_BEARER_ID))
    227		mtu = n->links[bearer_id].mtu;
    228	tipc_node_put(n);
    229	return mtu;
    230}
    231
    232bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
    233{
    234	u8 *own_id = tipc_own_id(net);
    235	struct tipc_node *n;
    236
    237	if (!own_id)
    238		return true;
    239
    240	if (addr == tipc_own_addr(net)) {
    241		memcpy(id, own_id, TIPC_NODEID_LEN);
    242		return true;
    243	}
    244	n = tipc_node_find(net, addr);
    245	if (!n)
    246		return false;
    247
    248	memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
    249	tipc_node_put(n);
    250	return true;
    251}
    252
    253u16 tipc_node_get_capabilities(struct net *net, u32 addr)
    254{
    255	struct tipc_node *n;
    256	u16 caps;
    257
    258	n = tipc_node_find(net, addr);
    259	if (unlikely(!n))
    260		return TIPC_NODE_CAPABILITIES;
    261	caps = n->capabilities;
    262	tipc_node_put(n);
    263	return caps;
    264}
    265
    266u32 tipc_node_get_addr(struct tipc_node *node)
    267{
    268	return (node) ? node->addr : 0;
    269}
    270
    271char *tipc_node_get_id_str(struct tipc_node *node)
    272{
    273	return node->peer_id_string;
    274}
    275
    276#ifdef CONFIG_TIPC_CRYPTO
    277/**
    278 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
    279 * @__n: target tipc_node
    280 * Note: node ref counter must be held first!
    281 */
    282struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
    283{
    284	return (__n) ? __n->crypto_rx : NULL;
    285}
    286
    287struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
    288{
    289	return container_of(pos, struct tipc_node, list)->crypto_rx;
    290}
    291
    292struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
    293{
    294	struct tipc_node *n;
    295
    296	n = tipc_node_find(net, addr);
    297	return (n) ? n->crypto_rx : NULL;
    298}
    299#endif
    300
    301static void tipc_node_free(struct rcu_head *rp)
    302{
    303	struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
    304
    305#ifdef CONFIG_TIPC_CRYPTO
    306	tipc_crypto_stop(&n->crypto_rx);
    307#endif
    308	kfree(n);
    309}
    310
    311static void tipc_node_kref_release(struct kref *kref)
    312{
    313	struct tipc_node *n = container_of(kref, struct tipc_node, kref);
    314
    315	kfree(n->bc_entry.link);
    316	call_rcu(&n->rcu, tipc_node_free);
    317}
    318
    319void tipc_node_put(struct tipc_node *node)
    320{
    321	kref_put(&node->kref, tipc_node_kref_release);
    322}
    323
    324void tipc_node_get(struct tipc_node *node)
    325{
    326	kref_get(&node->kref);
    327}
    328
    329/*
    330 * tipc_node_find - locate specified node object, if it exists
    331 */
    332static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
    333{
    334	struct tipc_net *tn = tipc_net(net);
    335	struct tipc_node *node;
    336	unsigned int thash = tipc_hashfn(addr);
    337
    338	rcu_read_lock();
    339	hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
    340		if (node->addr != addr || node->preliminary)
    341			continue;
    342		if (!kref_get_unless_zero(&node->kref))
    343			node = NULL;
    344		break;
    345	}
    346	rcu_read_unlock();
    347	return node;
    348}
    349
    350/* tipc_node_find_by_id - locate specified node object by its 128-bit id
    351 * Note: this function is called only when a discovery request failed
    352 * to find the node by its 32-bit id, and is not time critical
    353 */
    354static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
    355{
    356	struct tipc_net *tn = tipc_net(net);
    357	struct tipc_node *n;
    358	bool found = false;
    359
    360	rcu_read_lock();
    361	list_for_each_entry_rcu(n, &tn->node_list, list) {
    362		read_lock_bh(&n->lock);
    363		if (!memcmp(id, n->peer_id, 16) &&
    364		    kref_get_unless_zero(&n->kref))
    365			found = true;
    366		read_unlock_bh(&n->lock);
    367		if (found)
    368			break;
    369	}
    370	rcu_read_unlock();
    371	return found ? n : NULL;
    372}
    373
    374static void tipc_node_read_lock(struct tipc_node *n)
    375	__acquires(n->lock)
    376{
    377	read_lock_bh(&n->lock);
    378}
    379
    380static void tipc_node_read_unlock(struct tipc_node *n)
    381	__releases(n->lock)
    382{
    383	read_unlock_bh(&n->lock);
    384}
    385
    386static void tipc_node_write_lock(struct tipc_node *n)
    387	__acquires(n->lock)
    388{
    389	write_lock_bh(&n->lock);
    390}
    391
    392static void tipc_node_write_unlock_fast(struct tipc_node *n)
    393	__releases(n->lock)
    394{
    395	write_unlock_bh(&n->lock);
    396}
    397
    398static void tipc_node_write_unlock(struct tipc_node *n)
    399	__releases(n->lock)
    400{
    401	struct tipc_socket_addr sk;
    402	struct net *net = n->net;
    403	u32 flags = n->action_flags;
    404	struct list_head *publ_list;
    405	struct tipc_uaddr ua;
    406	u32 bearer_id, node;
    407
    408	if (likely(!flags)) {
    409		write_unlock_bh(&n->lock);
    410		return;
    411	}
    412
    413	tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
    414		   TIPC_LINK_STATE, n->addr, n->addr);
    415	sk.ref = n->link_id;
    416	sk.node = tipc_own_addr(net);
    417	node = n->addr;
    418	bearer_id = n->link_id & 0xffff;
    419	publ_list = &n->publ_list;
    420
    421	n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
    422			     TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
    423
    424	write_unlock_bh(&n->lock);
    425
    426	if (flags & TIPC_NOTIFY_NODE_DOWN)
    427		tipc_publ_notify(net, publ_list, node, n->capabilities);
    428
    429	if (flags & TIPC_NOTIFY_NODE_UP)
    430		tipc_named_node_up(net, node, n->capabilities);
    431
    432	if (flags & TIPC_NOTIFY_LINK_UP) {
    433		tipc_mon_peer_up(net, node, bearer_id);
    434		tipc_nametbl_publish(net, &ua, &sk, sk.ref);
    435	}
    436	if (flags & TIPC_NOTIFY_LINK_DOWN) {
    437		tipc_mon_peer_down(net, node, bearer_id);
    438		tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
    439	}
    440}
    441
    442static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
    443{
    444	int net_id = tipc_netid(n->net);
    445	struct tipc_net *tn_peer;
    446	struct net *tmp;
    447	u32 hash_chk;
    448
    449	if (n->peer_net)
    450		return;
    451
    452	for_each_net_rcu(tmp) {
    453		tn_peer = tipc_net(tmp);
    454		if (!tn_peer)
    455			continue;
    456		/* Integrity checking whether node exists in namespace or not */
    457		if (tn_peer->net_id != net_id)
    458			continue;
    459		if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
    460			continue;
    461		hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
    462		if (hash_mixes ^ hash_chk)
    463			continue;
    464		n->peer_net = tmp;
    465		n->peer_hash_mix = hash_mixes;
    466		break;
    467	}
    468}
    469
    470struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
    471				   u16 capabilities, u32 hash_mixes,
    472				   bool preliminary)
    473{
    474	struct tipc_net *tn = net_generic(net, tipc_net_id);
    475	struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
    476	struct tipc_node *n, *temp_node;
    477	unsigned long intv;
    478	int bearer_id;
    479	int i;
    480
    481	spin_lock_bh(&tn->node_list_lock);
    482	n = tipc_node_find(net, addr) ?:
    483		tipc_node_find_by_id(net, peer_id);
    484	if (n) {
    485		if (!n->preliminary)
    486			goto update;
    487		if (preliminary)
    488			goto exit;
    489		/* A preliminary node becomes "real" now, refresh its data */
    490		tipc_node_write_lock(n);
    491		if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
    492					 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
    493					 n->capabilities, &n->bc_entry.inputq1,
    494					 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
    495			pr_warn("Broadcast rcv link refresh failed, no memory\n");
    496			tipc_node_write_unlock_fast(n);
    497			tipc_node_put(n);
    498			n = NULL;
    499			goto exit;
    500		}
    501		n->preliminary = false;
    502		n->addr = addr;
    503		hlist_del_rcu(&n->hash);
    504		hlist_add_head_rcu(&n->hash,
    505				   &tn->node_htable[tipc_hashfn(addr)]);
    506		list_del_rcu(&n->list);
    507		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
    508			if (n->addr < temp_node->addr)
    509				break;
    510		}
    511		list_add_tail_rcu(&n->list, &temp_node->list);
    512		tipc_node_write_unlock_fast(n);
    513
    514update:
    515		if (n->peer_hash_mix ^ hash_mixes)
    516			tipc_node_assign_peer_net(n, hash_mixes);
    517		if (n->capabilities == capabilities)
    518			goto exit;
    519		/* Same node may come back with new capabilities */
    520		tipc_node_write_lock(n);
    521		n->capabilities = capabilities;
    522		for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
    523			l = n->links[bearer_id].link;
    524			if (l)
    525				tipc_link_update_caps(l, capabilities);
    526		}
    527		tipc_node_write_unlock_fast(n);
    528
    529		/* Calculate cluster capabilities */
    530		tn->capabilities = TIPC_NODE_CAPABILITIES;
    531		list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
    532			tn->capabilities &= temp_node->capabilities;
    533		}
    534
    535		tipc_bcast_toggle_rcast(net,
    536					(tn->capabilities & TIPC_BCAST_RCAST));
    537
    538		goto exit;
    539	}
    540	n = kzalloc(sizeof(*n), GFP_ATOMIC);
    541	if (!n) {
    542		pr_warn("Node creation failed, no memory\n");
    543		goto exit;
    544	}
    545	tipc_nodeid2string(n->peer_id_string, peer_id);
    546#ifdef CONFIG_TIPC_CRYPTO
    547	if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
    548		pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
    549		kfree(n);
    550		n = NULL;
    551		goto exit;
    552	}
    553#endif
    554	n->addr = addr;
    555	n->preliminary = preliminary;
    556	memcpy(&n->peer_id, peer_id, 16);
    557	n->net = net;
    558	n->peer_net = NULL;
    559	n->peer_hash_mix = 0;
    560	/* Assign kernel local namespace if exists */
    561	tipc_node_assign_peer_net(n, hash_mixes);
    562	n->capabilities = capabilities;
    563	kref_init(&n->kref);
    564	rwlock_init(&n->lock);
    565	INIT_HLIST_NODE(&n->hash);
    566	INIT_LIST_HEAD(&n->list);
    567	INIT_LIST_HEAD(&n->publ_list);
    568	INIT_LIST_HEAD(&n->conn_sks);
    569	skb_queue_head_init(&n->bc_entry.namedq);
    570	skb_queue_head_init(&n->bc_entry.inputq1);
    571	__skb_queue_head_init(&n->bc_entry.arrvq);
    572	skb_queue_head_init(&n->bc_entry.inputq2);
    573	for (i = 0; i < MAX_BEARERS; i++)
    574		spin_lock_init(&n->links[i].lock);
    575	n->state = SELF_DOWN_PEER_LEAVING;
    576	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
    577	n->signature = INVALID_NODE_SIG;
    578	n->active_links[0] = INVALID_BEARER_ID;
    579	n->active_links[1] = INVALID_BEARER_ID;
    580	if (!preliminary &&
    581	    !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
    582				 tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
    583				 n->capabilities, &n->bc_entry.inputq1,
    584				 &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
    585		pr_warn("Broadcast rcv link creation failed, no memory\n");
    586		kfree(n);
    587		n = NULL;
    588		goto exit;
    589	}
    590	tipc_node_get(n);
    591	timer_setup(&n->timer, tipc_node_timeout, 0);
    592	/* Start a slow timer anyway, crypto needs it */
    593	n->keepalive_intv = 10000;
    594	intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
    595	if (!mod_timer(&n->timer, intv))
    596		tipc_node_get(n);
    597	hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
    598	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
    599		if (n->addr < temp_node->addr)
    600			break;
    601	}
    602	list_add_tail_rcu(&n->list, &temp_node->list);
    603	/* Calculate cluster capabilities */
    604	tn->capabilities = TIPC_NODE_CAPABILITIES;
    605	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
    606		tn->capabilities &= temp_node->capabilities;
    607	}
    608	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
    609	trace_tipc_node_create(n, true, " ");
    610exit:
    611	spin_unlock_bh(&tn->node_list_lock);
    612	return n;
    613}
    614
    615static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
    616{
    617	unsigned long tol = tipc_link_tolerance(l);
    618	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
    619
    620	/* Link with lowest tolerance determines timer interval */
    621	if (intv < n->keepalive_intv)
    622		n->keepalive_intv = intv;
    623
    624	/* Ensure link's abort limit corresponds to current tolerance */
    625	tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
    626}
    627
    628static void tipc_node_delete_from_list(struct tipc_node *node)
    629{
    630#ifdef CONFIG_TIPC_CRYPTO
    631	tipc_crypto_key_flush(node->crypto_rx);
    632#endif
    633	list_del_rcu(&node->list);
    634	hlist_del_rcu(&node->hash);
    635	tipc_node_put(node);
    636}
    637
    638static void tipc_node_delete(struct tipc_node *node)
    639{
    640	trace_tipc_node_delete(node, true, " ");
    641	tipc_node_delete_from_list(node);
    642
    643	del_timer_sync(&node->timer);
    644	tipc_node_put(node);
    645}
    646
    647void tipc_node_stop(struct net *net)
    648{
    649	struct tipc_net *tn = tipc_net(net);
    650	struct tipc_node *node, *t_node;
    651
    652	spin_lock_bh(&tn->node_list_lock);
    653	list_for_each_entry_safe(node, t_node, &tn->node_list, list)
    654		tipc_node_delete(node);
    655	spin_unlock_bh(&tn->node_list_lock);
    656}
    657
    658void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
    659{
    660	struct tipc_node *n;
    661
    662	if (in_own_node(net, addr))
    663		return;
    664
    665	n = tipc_node_find(net, addr);
    666	if (!n) {
    667		pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
    668		return;
    669	}
    670	tipc_node_write_lock(n);
    671	list_add_tail(subscr, &n->publ_list);
    672	tipc_node_write_unlock_fast(n);
    673	tipc_node_put(n);
    674}
    675
    676void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
    677{
    678	struct tipc_node *n;
    679
    680	if (in_own_node(net, addr))
    681		return;
    682
    683	n = tipc_node_find(net, addr);
    684	if (!n) {
    685		pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
    686		return;
    687	}
    688	tipc_node_write_lock(n);
    689	list_del_init(subscr);
    690	tipc_node_write_unlock_fast(n);
    691	tipc_node_put(n);
    692}
    693
    694int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
    695{
    696	struct tipc_node *node;
    697	struct tipc_sock_conn *conn;
    698	int err = 0;
    699
    700	if (in_own_node(net, dnode))
    701		return 0;
    702
    703	node = tipc_node_find(net, dnode);
    704	if (!node) {
    705		pr_warn("Connecting sock to node 0x%x failed\n", dnode);
    706		return -EHOSTUNREACH;
    707	}
    708	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
    709	if (!conn) {
    710		err = -EHOSTUNREACH;
    711		goto exit;
    712	}
    713	conn->peer_node = dnode;
    714	conn->port = port;
    715	conn->peer_port = peer_port;
    716
    717	tipc_node_write_lock(node);
    718	list_add_tail(&conn->list, &node->conn_sks);
    719	tipc_node_write_unlock(node);
    720exit:
    721	tipc_node_put(node);
    722	return err;
    723}
    724
    725void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
    726{
    727	struct tipc_node *node;
    728	struct tipc_sock_conn *conn, *safe;
    729
    730	if (in_own_node(net, dnode))
    731		return;
    732
    733	node = tipc_node_find(net, dnode);
    734	if (!node)
    735		return;
    736
    737	tipc_node_write_lock(node);
    738	list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
    739		if (port != conn->port)
    740			continue;
    741		list_del(&conn->list);
    742		kfree(conn);
    743	}
    744	tipc_node_write_unlock(node);
    745	tipc_node_put(node);
    746}
    747
    748static void  tipc_node_clear_links(struct tipc_node *node)
    749{
    750	int i;
    751
    752	for (i = 0; i < MAX_BEARERS; i++) {
    753		struct tipc_link_entry *le = &node->links[i];
    754
    755		if (le->link) {
    756			kfree(le->link);
    757			le->link = NULL;
    758			node->link_cnt--;
    759		}
    760	}
    761}
    762
    763/* tipc_node_cleanup - delete nodes that does not
    764 * have active links for NODE_CLEANUP_AFTER time
    765 */
    766static bool tipc_node_cleanup(struct tipc_node *peer)
    767{
    768	struct tipc_node *temp_node;
    769	struct tipc_net *tn = tipc_net(peer->net);
    770	bool deleted = false;
    771
    772	/* If lock held by tipc_node_stop() the node will be deleted anyway */
    773	if (!spin_trylock_bh(&tn->node_list_lock))
    774		return false;
    775
    776	tipc_node_write_lock(peer);
    777
    778	if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
    779		tipc_node_clear_links(peer);
    780		tipc_node_delete_from_list(peer);
    781		deleted = true;
    782	}
    783	tipc_node_write_unlock(peer);
    784
    785	if (!deleted) {
    786		spin_unlock_bh(&tn->node_list_lock);
    787		return deleted;
    788	}
    789
    790	/* Calculate cluster capabilities */
    791	tn->capabilities = TIPC_NODE_CAPABILITIES;
    792	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
    793		tn->capabilities &= temp_node->capabilities;
    794	}
    795	tipc_bcast_toggle_rcast(peer->net,
    796				(tn->capabilities & TIPC_BCAST_RCAST));
    797	spin_unlock_bh(&tn->node_list_lock);
    798	return deleted;
    799}
    800
    801/* tipc_node_timeout - handle expiration of node timer
    802 */
    803static void tipc_node_timeout(struct timer_list *t)
    804{
    805	struct tipc_node *n = from_timer(n, t, timer);
    806	struct tipc_link_entry *le;
    807	struct sk_buff_head xmitq;
    808	int remains = n->link_cnt;
    809	int bearer_id;
    810	int rc = 0;
    811
    812	trace_tipc_node_timeout(n, false, " ");
    813	if (!node_is_up(n) && tipc_node_cleanup(n)) {
    814		/*Removing the reference of Timer*/
    815		tipc_node_put(n);
    816		return;
    817	}
    818
    819#ifdef CONFIG_TIPC_CRYPTO
    820	/* Take any crypto key related actions first */
    821	tipc_crypto_timeout(n->crypto_rx);
    822#endif
    823	__skb_queue_head_init(&xmitq);
    824
    825	/* Initial node interval to value larger (10 seconds), then it will be
    826	 * recalculated with link lowest tolerance
    827	 */
    828	tipc_node_read_lock(n);
    829	n->keepalive_intv = 10000;
    830	tipc_node_read_unlock(n);
    831	for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
    832		tipc_node_read_lock(n);
    833		le = &n->links[bearer_id];
    834		if (le->link) {
    835			spin_lock_bh(&le->lock);
    836			/* Link tolerance may change asynchronously: */
    837			tipc_node_calculate_timer(n, le->link);
    838			rc = tipc_link_timeout(le->link, &xmitq);
    839			spin_unlock_bh(&le->lock);
    840			remains--;
    841		}
    842		tipc_node_read_unlock(n);
    843		tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
    844		if (rc & TIPC_LINK_DOWN_EVT)
    845			tipc_node_link_down(n, bearer_id, false);
    846	}
    847	mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
    848}
    849
    850/**
    851 * __tipc_node_link_up - handle addition of link
    852 * @n: target tipc_node
    853 * @bearer_id: id of the bearer
    854 * @xmitq: queue for messages to be xmited on
    855 * Node lock must be held by caller
    856 * Link becomes active (alone or shared) or standby, depending on its priority.
    857 */
    858static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
    859				struct sk_buff_head *xmitq)
    860{
    861	int *slot0 = &n->active_links[0];
    862	int *slot1 = &n->active_links[1];
    863	struct tipc_link *ol = node_active_link(n, 0);
    864	struct tipc_link *nl = n->links[bearer_id].link;
    865
    866	if (!nl || tipc_link_is_up(nl))
    867		return;
    868
    869	tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
    870	if (!tipc_link_is_up(nl))
    871		return;
    872
    873	n->working_links++;
    874	n->action_flags |= TIPC_NOTIFY_LINK_UP;
    875	n->link_id = tipc_link_id(nl);
    876
    877	/* Leave room for tunnel header when returning 'mtu' to users: */
    878	n->links[bearer_id].mtu = tipc_link_mss(nl);
    879
    880	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
    881	tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
    882
    883	pr_debug("Established link <%s> on network plane %c\n",
    884		 tipc_link_name(nl), tipc_link_plane(nl));
    885	trace_tipc_node_link_up(n, true, " ");
    886
    887	/* Ensure that a STATE message goes first */
    888	tipc_link_build_state_msg(nl, xmitq);
    889
    890	/* First link? => give it both slots */
    891	if (!ol) {
    892		*slot0 = bearer_id;
    893		*slot1 = bearer_id;
    894		tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
    895		n->action_flags |= TIPC_NOTIFY_NODE_UP;
    896		tipc_link_set_active(nl, true);
    897		tipc_bcast_add_peer(n->net, nl, xmitq);
    898		return;
    899	}
    900
    901	/* Second link => redistribute slots */
    902	if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
    903		pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
    904		*slot0 = bearer_id;
    905		*slot1 = bearer_id;
    906		tipc_link_set_active(nl, true);
    907		tipc_link_set_active(ol, false);
    908	} else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
    909		tipc_link_set_active(nl, true);
    910		*slot1 = bearer_id;
    911	} else {
    912		pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
    913	}
    914
    915	/* Prepare synchronization with first link */
    916	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
    917}
    918
    919/**
    920 * tipc_node_link_up - handle addition of link
    921 * @n: target tipc_node
    922 * @bearer_id: id of the bearer
    923 * @xmitq: queue for messages to be xmited on
    924 *
    925 * Link becomes active (alone or shared) or standby, depending on its priority.
    926 */
    927static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
    928			      struct sk_buff_head *xmitq)
    929{
    930	struct tipc_media_addr *maddr;
    931
    932	tipc_node_write_lock(n);
    933	__tipc_node_link_up(n, bearer_id, xmitq);
    934	maddr = &n->links[bearer_id].maddr;
    935	tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
    936	tipc_node_write_unlock(n);
    937}
    938
    939/**
    940 * tipc_node_link_failover() - start failover in case "half-failover"
    941 *
    942 * This function is only called in a very special situation where link
    943 * failover can be already started on peer node but not on this node.
    944 * This can happen when e.g.::
    945 *
    946 *	1. Both links <1A-2A>, <1B-2B> down
    947 *	2. Link endpoint 2A up, but 1A still down (e.g. due to network
    948 *	disturbance, wrong session, etc.)
    949 *	3. Link <1B-2B> up
    950 *	4. Link endpoint 2A down (e.g. due to link tolerance timeout)
    951 *	5. Node 2 starts failover onto link <1B-2B>
    952 *
    953 *	==> Node 1 does never start link/node failover!
    954 *
    955 * @n: tipc node structure
    956 * @l: link peer endpoint failingover (- can be NULL)
    957 * @tnl: tunnel link
    958 * @xmitq: queue for messages to be xmited on tnl link later
    959 */
    960static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
    961				    struct tipc_link *tnl,
    962				    struct sk_buff_head *xmitq)
    963{
    964	/* Avoid to be "self-failover" that can never end */
    965	if (!tipc_link_is_up(tnl))
    966		return;
    967
    968	/* Don't rush, failure link may be in the process of resetting */
    969	if (l && !tipc_link_is_reset(l))
    970		return;
    971
    972	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
    973	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
    974
    975	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
    976	tipc_link_failover_prepare(l, tnl, xmitq);
    977
    978	if (l)
    979		tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
    980	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
    981}
    982
    983/**
    984 * __tipc_node_link_down - handle loss of link
    985 * @n: target tipc_node
    986 * @bearer_id: id of the bearer
    987 * @xmitq: queue for messages to be xmited on
    988 * @maddr: output media address of the bearer
    989 */
    990static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
    991				  struct sk_buff_head *xmitq,
    992				  struct tipc_media_addr **maddr)
    993{
    994	struct tipc_link_entry *le = &n->links[*bearer_id];
    995	int *slot0 = &n->active_links[0];
    996	int *slot1 = &n->active_links[1];
    997	int i, highest = 0, prio;
    998	struct tipc_link *l, *_l, *tnl;
    999
   1000	l = n->links[*bearer_id].link;
   1001	if (!l || tipc_link_is_reset(l))
   1002		return;
   1003
   1004	n->working_links--;
   1005	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
   1006	n->link_id = tipc_link_id(l);
   1007
   1008	tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
   1009
   1010	pr_debug("Lost link <%s> on network plane %c\n",
   1011		 tipc_link_name(l), tipc_link_plane(l));
   1012
   1013	/* Select new active link if any available */
   1014	*slot0 = INVALID_BEARER_ID;
   1015	*slot1 = INVALID_BEARER_ID;
   1016	for (i = 0; i < MAX_BEARERS; i++) {
   1017		_l = n->links[i].link;
   1018		if (!_l || !tipc_link_is_up(_l))
   1019			continue;
   1020		if (_l == l)
   1021			continue;
   1022		prio = tipc_link_prio(_l);
   1023		if (prio < highest)
   1024			continue;
   1025		if (prio > highest) {
   1026			highest = prio;
   1027			*slot0 = i;
   1028			*slot1 = i;
   1029			continue;
   1030		}
   1031		*slot1 = i;
   1032	}
   1033
   1034	if (!node_is_up(n)) {
   1035		if (tipc_link_peer_is_down(l))
   1036			tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
   1037		tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
   1038		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
   1039		tipc_link_fsm_evt(l, LINK_RESET_EVT);
   1040		tipc_link_reset(l);
   1041		tipc_link_build_reset_msg(l, xmitq);
   1042		*maddr = &n->links[*bearer_id].maddr;
   1043		node_lost_contact(n, &le->inputq);
   1044		tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
   1045		return;
   1046	}
   1047	tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
   1048
   1049	/* There is still a working link => initiate failover */
   1050	*bearer_id = n->active_links[0];
   1051	tnl = n->links[*bearer_id].link;
   1052	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
   1053	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
   1054	n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
   1055	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
   1056	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
   1057	tipc_link_reset(l);
   1058	tipc_link_fsm_evt(l, LINK_RESET_EVT);
   1059	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
   1060	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
   1061	*maddr = &n->links[*bearer_id].maddr;
   1062}
   1063
   1064static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
   1065{
   1066	struct tipc_link_entry *le = &n->links[bearer_id];
   1067	struct tipc_media_addr *maddr = NULL;
   1068	struct tipc_link *l = le->link;
   1069	int old_bearer_id = bearer_id;
   1070	struct sk_buff_head xmitq;
   1071
   1072	if (!l)
   1073		return;
   1074
   1075	__skb_queue_head_init(&xmitq);
   1076
   1077	tipc_node_write_lock(n);
   1078	if (!tipc_link_is_establishing(l)) {
   1079		__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
   1080	} else {
   1081		/* Defuse pending tipc_node_link_up() */
   1082		tipc_link_reset(l);
   1083		tipc_link_fsm_evt(l, LINK_RESET_EVT);
   1084	}
   1085	if (delete) {
   1086		kfree(l);
   1087		le->link = NULL;
   1088		n->link_cnt--;
   1089	}
   1090	trace_tipc_node_link_down(n, true, "node link down or deleted!");
   1091	tipc_node_write_unlock(n);
   1092	if (delete)
   1093		tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
   1094	if (!skb_queue_empty(&xmitq))
   1095		tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
   1096	tipc_sk_rcv(n->net, &le->inputq);
   1097}
   1098
   1099static bool node_is_up(struct tipc_node *n)
   1100{
   1101	return n->active_links[0] != INVALID_BEARER_ID;
   1102}
   1103
   1104bool tipc_node_is_up(struct net *net, u32 addr)
   1105{
   1106	struct tipc_node *n;
   1107	bool retval = false;
   1108
   1109	if (in_own_node(net, addr))
   1110		return true;
   1111
   1112	n = tipc_node_find(net, addr);
   1113	if (!n)
   1114		return false;
   1115	retval = node_is_up(n);
   1116	tipc_node_put(n);
   1117	return retval;
   1118}
   1119
   1120static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
   1121{
   1122	struct tipc_node *n;
   1123
   1124	addr ^= tipc_net(net)->random;
   1125	while ((n = tipc_node_find(net, addr))) {
   1126		tipc_node_put(n);
   1127		addr++;
   1128	}
   1129	return addr;
   1130}
   1131
   1132/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
   1133 * Returns suggested address if any, otherwise 0
   1134 */
   1135u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
   1136{
   1137	struct tipc_net *tn = tipc_net(net);
   1138	struct tipc_node *n;
   1139	bool preliminary;
   1140	u32 sugg_addr;
   1141
   1142	/* Suggest new address if some other peer is using this one */
   1143	n = tipc_node_find(net, addr);
   1144	if (n) {
   1145		if (!memcmp(n->peer_id, id, NODE_ID_LEN))
   1146			addr = 0;
   1147		tipc_node_put(n);
   1148		if (!addr)
   1149			return 0;
   1150		return tipc_node_suggest_addr(net, addr);
   1151	}
   1152
   1153	/* Suggest previously used address if peer is known */
   1154	n = tipc_node_find_by_id(net, id);
   1155	if (n) {
   1156		sugg_addr = n->addr;
   1157		preliminary = n->preliminary;
   1158		tipc_node_put(n);
   1159		if (!preliminary)
   1160			return sugg_addr;
   1161	}
   1162
   1163	/* Even this node may be in conflict */
   1164	if (tn->trial_addr == addr)
   1165		return tipc_node_suggest_addr(net, addr);
   1166
   1167	return 0;
   1168}
   1169
   1170void tipc_node_check_dest(struct net *net, u32 addr,
   1171			  u8 *peer_id, struct tipc_bearer *b,
   1172			  u16 capabilities, u32 signature, u32 hash_mixes,
   1173			  struct tipc_media_addr *maddr,
   1174			  bool *respond, bool *dupl_addr)
   1175{
   1176	struct tipc_node *n;
   1177	struct tipc_link *l;
   1178	struct tipc_link_entry *le;
   1179	bool addr_match = false;
   1180	bool sign_match = false;
   1181	bool link_up = false;
   1182	bool accept_addr = false;
   1183	bool reset = true;
   1184	char *if_name;
   1185	unsigned long intv;
   1186	u16 session;
   1187
   1188	*dupl_addr = false;
   1189	*respond = false;
   1190
   1191	n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
   1192			     false);
   1193	if (!n)
   1194		return;
   1195
   1196	tipc_node_write_lock(n);
   1197
   1198	le = &n->links[b->identity];
   1199
   1200	/* Prepare to validate requesting node's signature and media address */
   1201	l = le->link;
   1202	link_up = l && tipc_link_is_up(l);
   1203	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
   1204	sign_match = (signature == n->signature);
   1205
   1206	/* These three flags give us eight permutations: */
   1207
   1208	if (sign_match && addr_match && link_up) {
   1209		/* All is fine. Do nothing. */
   1210		reset = false;
   1211		/* Peer node is not a container/local namespace */
   1212		if (!n->peer_hash_mix)
   1213			n->peer_hash_mix = hash_mixes;
   1214	} else if (sign_match && addr_match && !link_up) {
   1215		/* Respond. The link will come up in due time */
   1216		*respond = true;
   1217	} else if (sign_match && !addr_match && link_up) {
   1218		/* Peer has changed i/f address without rebooting.
   1219		 * If so, the link will reset soon, and the next
   1220		 * discovery will be accepted. So we can ignore it.
   1221		 * It may also be a cloned or malicious peer having
   1222		 * chosen the same node address and signature as an
   1223		 * existing one.
   1224		 * Ignore requests until the link goes down, if ever.
   1225		 */
   1226		*dupl_addr = true;
   1227	} else if (sign_match && !addr_match && !link_up) {
   1228		/* Peer link has changed i/f address without rebooting.
   1229		 * It may also be a cloned or malicious peer; we can't
   1230		 * distinguish between the two.
   1231		 * The signature is correct, so we must accept.
   1232		 */
   1233		accept_addr = true;
   1234		*respond = true;
   1235	} else if (!sign_match && addr_match && link_up) {
   1236		/* Peer node rebooted. Two possibilities:
   1237		 *  - Delayed re-discovery; this link endpoint has already
   1238		 *    reset and re-established contact with the peer, before
   1239		 *    receiving a discovery message from that node.
   1240		 *    (The peer happened to receive one from this node first).
   1241		 *  - The peer came back so fast that our side has not
   1242		 *    discovered it yet. Probing from this side will soon
   1243		 *    reset the link, since there can be no working link
   1244		 *    endpoint at the peer end, and the link will re-establish.
   1245		 *  Accept the signature, since it comes from a known peer.
   1246		 */
   1247		n->signature = signature;
   1248	} else if (!sign_match && addr_match && !link_up) {
   1249		/*  The peer node has rebooted.
   1250		 *  Accept signature, since it is a known peer.
   1251		 */
   1252		n->signature = signature;
   1253		*respond = true;
   1254	} else if (!sign_match && !addr_match && link_up) {
   1255		/* Peer rebooted with new address, or a new/duplicate peer.
   1256		 * Ignore until the link goes down, if ever.
   1257		 */
   1258		*dupl_addr = true;
   1259	} else if (!sign_match && !addr_match && !link_up) {
   1260		/* Peer rebooted with new address, or it is a new peer.
   1261		 * Accept signature and address.
   1262		 */
   1263		n->signature = signature;
   1264		accept_addr = true;
   1265		*respond = true;
   1266	}
   1267
   1268	if (!accept_addr)
   1269		goto exit;
   1270
   1271	/* Now create new link if not already existing */
   1272	if (!l) {
   1273		if (n->link_cnt == 2)
   1274			goto exit;
   1275
   1276		if_name = strchr(b->name, ':') + 1;
   1277		get_random_bytes(&session, sizeof(u16));
   1278		if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
   1279				      b->net_plane, b->mtu, b->priority,
   1280				      b->min_win, b->max_win, session,
   1281				      tipc_own_addr(net), addr, peer_id,
   1282				      n->capabilities,
   1283				      tipc_bc_sndlink(n->net), n->bc_entry.link,
   1284				      &le->inputq,
   1285				      &n->bc_entry.namedq, &l)) {
   1286			*respond = false;
   1287			goto exit;
   1288		}
   1289		trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
   1290		tipc_link_reset(l);
   1291		tipc_link_fsm_evt(l, LINK_RESET_EVT);
   1292		if (n->state == NODE_FAILINGOVER)
   1293			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
   1294		le->link = l;
   1295		n->link_cnt++;
   1296		tipc_node_calculate_timer(n, l);
   1297		if (n->link_cnt == 1) {
   1298			intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
   1299			if (!mod_timer(&n->timer, intv))
   1300				tipc_node_get(n);
   1301		}
   1302	}
   1303	memcpy(&le->maddr, maddr, sizeof(*maddr));
   1304exit:
   1305	tipc_node_write_unlock(n);
   1306	if (reset && l && !tipc_link_is_reset(l))
   1307		tipc_node_link_down(n, b->identity, false);
   1308	tipc_node_put(n);
   1309}
   1310
   1311void tipc_node_delete_links(struct net *net, int bearer_id)
   1312{
   1313	struct tipc_net *tn = net_generic(net, tipc_net_id);
   1314	struct tipc_node *n;
   1315
   1316	rcu_read_lock();
   1317	list_for_each_entry_rcu(n, &tn->node_list, list) {
   1318		tipc_node_link_down(n, bearer_id, true);
   1319	}
   1320	rcu_read_unlock();
   1321}
   1322
   1323static void tipc_node_reset_links(struct tipc_node *n)
   1324{
   1325	int i;
   1326
   1327	pr_warn("Resetting all links to %x\n", n->addr);
   1328
   1329	trace_tipc_node_reset_links(n, true, " ");
   1330	for (i = 0; i < MAX_BEARERS; i++) {
   1331		tipc_node_link_down(n, i, false);
   1332	}
   1333}
   1334
   1335/* tipc_node_fsm_evt - node finite state machine
   1336 * Determines when contact is allowed with peer node
   1337 */
   1338static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
   1339{
   1340	int state = n->state;
   1341
   1342	switch (state) {
   1343	case SELF_DOWN_PEER_DOWN:
   1344		switch (evt) {
   1345		case SELF_ESTABL_CONTACT_EVT:
   1346			state = SELF_UP_PEER_COMING;
   1347			break;
   1348		case PEER_ESTABL_CONTACT_EVT:
   1349			state = SELF_COMING_PEER_UP;
   1350			break;
   1351		case SELF_LOST_CONTACT_EVT:
   1352		case PEER_LOST_CONTACT_EVT:
   1353			break;
   1354		case NODE_SYNCH_END_EVT:
   1355		case NODE_SYNCH_BEGIN_EVT:
   1356		case NODE_FAILOVER_BEGIN_EVT:
   1357		case NODE_FAILOVER_END_EVT:
   1358		default:
   1359			goto illegal_evt;
   1360		}
   1361		break;
   1362	case SELF_UP_PEER_UP:
   1363		switch (evt) {
   1364		case SELF_LOST_CONTACT_EVT:
   1365			state = SELF_DOWN_PEER_LEAVING;
   1366			break;
   1367		case PEER_LOST_CONTACT_EVT:
   1368			state = SELF_LEAVING_PEER_DOWN;
   1369			break;
   1370		case NODE_SYNCH_BEGIN_EVT:
   1371			state = NODE_SYNCHING;
   1372			break;
   1373		case NODE_FAILOVER_BEGIN_EVT:
   1374			state = NODE_FAILINGOVER;
   1375			break;
   1376		case SELF_ESTABL_CONTACT_EVT:
   1377		case PEER_ESTABL_CONTACT_EVT:
   1378		case NODE_SYNCH_END_EVT:
   1379		case NODE_FAILOVER_END_EVT:
   1380			break;
   1381		default:
   1382			goto illegal_evt;
   1383		}
   1384		break;
   1385	case SELF_DOWN_PEER_LEAVING:
   1386		switch (evt) {
   1387		case PEER_LOST_CONTACT_EVT:
   1388			state = SELF_DOWN_PEER_DOWN;
   1389			break;
   1390		case SELF_ESTABL_CONTACT_EVT:
   1391		case PEER_ESTABL_CONTACT_EVT:
   1392		case SELF_LOST_CONTACT_EVT:
   1393			break;
   1394		case NODE_SYNCH_END_EVT:
   1395		case NODE_SYNCH_BEGIN_EVT:
   1396		case NODE_FAILOVER_BEGIN_EVT:
   1397		case NODE_FAILOVER_END_EVT:
   1398		default:
   1399			goto illegal_evt;
   1400		}
   1401		break;
   1402	case SELF_UP_PEER_COMING:
   1403		switch (evt) {
   1404		case PEER_ESTABL_CONTACT_EVT:
   1405			state = SELF_UP_PEER_UP;
   1406			break;
   1407		case SELF_LOST_CONTACT_EVT:
   1408			state = SELF_DOWN_PEER_DOWN;
   1409			break;
   1410		case SELF_ESTABL_CONTACT_EVT:
   1411		case PEER_LOST_CONTACT_EVT:
   1412		case NODE_SYNCH_END_EVT:
   1413		case NODE_FAILOVER_BEGIN_EVT:
   1414			break;
   1415		case NODE_SYNCH_BEGIN_EVT:
   1416		case NODE_FAILOVER_END_EVT:
   1417		default:
   1418			goto illegal_evt;
   1419		}
   1420		break;
   1421	case SELF_COMING_PEER_UP:
   1422		switch (evt) {
   1423		case SELF_ESTABL_CONTACT_EVT:
   1424			state = SELF_UP_PEER_UP;
   1425			break;
   1426		case PEER_LOST_CONTACT_EVT:
   1427			state = SELF_DOWN_PEER_DOWN;
   1428			break;
   1429		case SELF_LOST_CONTACT_EVT:
   1430		case PEER_ESTABL_CONTACT_EVT:
   1431			break;
   1432		case NODE_SYNCH_END_EVT:
   1433		case NODE_SYNCH_BEGIN_EVT:
   1434		case NODE_FAILOVER_BEGIN_EVT:
   1435		case NODE_FAILOVER_END_EVT:
   1436		default:
   1437			goto illegal_evt;
   1438		}
   1439		break;
   1440	case SELF_LEAVING_PEER_DOWN:
   1441		switch (evt) {
   1442		case SELF_LOST_CONTACT_EVT:
   1443			state = SELF_DOWN_PEER_DOWN;
   1444			break;
   1445		case SELF_ESTABL_CONTACT_EVT:
   1446		case PEER_ESTABL_CONTACT_EVT:
   1447		case PEER_LOST_CONTACT_EVT:
   1448			break;
   1449		case NODE_SYNCH_END_EVT:
   1450		case NODE_SYNCH_BEGIN_EVT:
   1451		case NODE_FAILOVER_BEGIN_EVT:
   1452		case NODE_FAILOVER_END_EVT:
   1453		default:
   1454			goto illegal_evt;
   1455		}
   1456		break;
   1457	case NODE_FAILINGOVER:
   1458		switch (evt) {
   1459		case SELF_LOST_CONTACT_EVT:
   1460			state = SELF_DOWN_PEER_LEAVING;
   1461			break;
   1462		case PEER_LOST_CONTACT_EVT:
   1463			state = SELF_LEAVING_PEER_DOWN;
   1464			break;
   1465		case NODE_FAILOVER_END_EVT:
   1466			state = SELF_UP_PEER_UP;
   1467			break;
   1468		case NODE_FAILOVER_BEGIN_EVT:
   1469		case SELF_ESTABL_CONTACT_EVT:
   1470		case PEER_ESTABL_CONTACT_EVT:
   1471			break;
   1472		case NODE_SYNCH_BEGIN_EVT:
   1473		case NODE_SYNCH_END_EVT:
   1474		default:
   1475			goto illegal_evt;
   1476		}
   1477		break;
   1478	case NODE_SYNCHING:
   1479		switch (evt) {
   1480		case SELF_LOST_CONTACT_EVT:
   1481			state = SELF_DOWN_PEER_LEAVING;
   1482			break;
   1483		case PEER_LOST_CONTACT_EVT:
   1484			state = SELF_LEAVING_PEER_DOWN;
   1485			break;
   1486		case NODE_SYNCH_END_EVT:
   1487			state = SELF_UP_PEER_UP;
   1488			break;
   1489		case NODE_FAILOVER_BEGIN_EVT:
   1490			state = NODE_FAILINGOVER;
   1491			break;
   1492		case NODE_SYNCH_BEGIN_EVT:
   1493		case SELF_ESTABL_CONTACT_EVT:
   1494		case PEER_ESTABL_CONTACT_EVT:
   1495			break;
   1496		case NODE_FAILOVER_END_EVT:
   1497		default:
   1498			goto illegal_evt;
   1499		}
   1500		break;
   1501	default:
   1502		pr_err("Unknown node fsm state %x\n", state);
   1503		break;
   1504	}
   1505	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
   1506	n->state = state;
   1507	return;
   1508
   1509illegal_evt:
   1510	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
   1511	trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
   1512}
   1513
   1514static void node_lost_contact(struct tipc_node *n,
   1515			      struct sk_buff_head *inputq)
   1516{
   1517	struct tipc_sock_conn *conn, *safe;
   1518	struct tipc_link *l;
   1519	struct list_head *conns = &n->conn_sks;
   1520	struct sk_buff *skb;
   1521	uint i;
   1522
   1523	pr_debug("Lost contact with %x\n", n->addr);
   1524	n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
   1525	trace_tipc_node_lost_contact(n, true, " ");
   1526
   1527	/* Clean up broadcast state */
   1528	tipc_bcast_remove_peer(n->net, n->bc_entry.link);
   1529	skb_queue_purge(&n->bc_entry.namedq);
   1530
   1531	/* Abort any ongoing link failover */
   1532	for (i = 0; i < MAX_BEARERS; i++) {
   1533		l = n->links[i].link;
   1534		if (l)
   1535			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
   1536	}
   1537
   1538	/* Notify publications from this node */
   1539	n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
   1540	n->peer_net = NULL;
   1541	n->peer_hash_mix = 0;
   1542	/* Notify sockets connected to node */
   1543	list_for_each_entry_safe(conn, safe, conns, list) {
   1544		skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
   1545				      SHORT_H_SIZE, 0, tipc_own_addr(n->net),
   1546				      conn->peer_node, conn->port,
   1547				      conn->peer_port, TIPC_ERR_NO_NODE);
   1548		if (likely(skb))
   1549			skb_queue_tail(inputq, skb);
   1550		list_del(&conn->list);
   1551		kfree(conn);
   1552	}
   1553}
   1554
   1555/**
   1556 * tipc_node_get_linkname - get the name of a link
   1557 *
   1558 * @net: the applicable net namespace
   1559 * @bearer_id: id of the bearer
   1560 * @addr: peer node address
   1561 * @linkname: link name output buffer
   1562 * @len: size of @linkname output buffer
   1563 *
   1564 * Return: 0 on success
   1565 */
   1566int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
   1567			   char *linkname, size_t len)
   1568{
   1569	struct tipc_link *link;
   1570	int err = -EINVAL;
   1571	struct tipc_node *node = tipc_node_find(net, addr);
   1572
   1573	if (!node)
   1574		return err;
   1575
   1576	if (bearer_id >= MAX_BEARERS)
   1577		goto exit;
   1578
   1579	tipc_node_read_lock(node);
   1580	link = node->links[bearer_id].link;
   1581	if (link) {
   1582		strncpy(linkname, tipc_link_name(link), len);
   1583		err = 0;
   1584	}
   1585	tipc_node_read_unlock(node);
   1586exit:
   1587	tipc_node_put(node);
   1588	return err;
   1589}
   1590
   1591/* Caller should hold node lock for the passed node */
   1592static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
   1593{
   1594	void *hdr;
   1595	struct nlattr *attrs;
   1596
   1597	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
   1598			  NLM_F_MULTI, TIPC_NL_NODE_GET);
   1599	if (!hdr)
   1600		return -EMSGSIZE;
   1601
   1602	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
   1603	if (!attrs)
   1604		goto msg_full;
   1605
   1606	if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
   1607		goto attr_msg_full;
   1608	if (node_is_up(node))
   1609		if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
   1610			goto attr_msg_full;
   1611
   1612	nla_nest_end(msg->skb, attrs);
   1613	genlmsg_end(msg->skb, hdr);
   1614
   1615	return 0;
   1616
   1617attr_msg_full:
   1618	nla_nest_cancel(msg->skb, attrs);
   1619msg_full:
   1620	genlmsg_cancel(msg->skb, hdr);
   1621
   1622	return -EMSGSIZE;
   1623}
   1624
   1625static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
   1626{
   1627	struct tipc_msg *hdr = buf_msg(skb_peek(list));
   1628	struct sk_buff_head inputq;
   1629
   1630	switch (msg_user(hdr)) {
   1631	case TIPC_LOW_IMPORTANCE:
   1632	case TIPC_MEDIUM_IMPORTANCE:
   1633	case TIPC_HIGH_IMPORTANCE:
   1634	case TIPC_CRITICAL_IMPORTANCE:
   1635		if (msg_connected(hdr) || msg_named(hdr) ||
   1636		    msg_direct(hdr)) {
   1637			tipc_loopback_trace(peer_net, list);
   1638			spin_lock_init(&list->lock);
   1639			tipc_sk_rcv(peer_net, list);
   1640			return;
   1641		}
   1642		if (msg_mcast(hdr)) {
   1643			tipc_loopback_trace(peer_net, list);
   1644			skb_queue_head_init(&inputq);
   1645			tipc_sk_mcast_rcv(peer_net, list, &inputq);
   1646			__skb_queue_purge(list);
   1647			skb_queue_purge(&inputq);
   1648			return;
   1649		}
   1650		return;
   1651	case MSG_FRAGMENTER:
   1652		if (tipc_msg_assemble(list)) {
   1653			tipc_loopback_trace(peer_net, list);
   1654			skb_queue_head_init(&inputq);
   1655			tipc_sk_mcast_rcv(peer_net, list, &inputq);
   1656			__skb_queue_purge(list);
   1657			skb_queue_purge(&inputq);
   1658		}
   1659		return;
   1660	case GROUP_PROTOCOL:
   1661	case CONN_MANAGER:
   1662		tipc_loopback_trace(peer_net, list);
   1663		spin_lock_init(&list->lock);
   1664		tipc_sk_rcv(peer_net, list);
   1665		return;
   1666	case LINK_PROTOCOL:
   1667	case NAME_DISTRIBUTOR:
   1668	case TUNNEL_PROTOCOL:
   1669	case BCAST_PROTOCOL:
   1670		return;
   1671	default:
   1672		return;
   1673	}
   1674}
   1675
   1676/**
   1677 * tipc_node_xmit() - general link level function for message sending
   1678 * @net: the applicable net namespace
   1679 * @list: chain of buffers containing message
   1680 * @dnode: address of destination node
   1681 * @selector: a number used for deterministic link selection
   1682 * Consumes the buffer chain.
   1683 * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
   1684 */
   1685int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
   1686		   u32 dnode, int selector)
   1687{
   1688	struct tipc_link_entry *le = NULL;
   1689	struct tipc_node *n;
   1690	struct sk_buff_head xmitq;
   1691	bool node_up = false;
   1692	int bearer_id;
   1693	int rc;
   1694
   1695	if (in_own_node(net, dnode)) {
   1696		tipc_loopback_trace(net, list);
   1697		spin_lock_init(&list->lock);
   1698		tipc_sk_rcv(net, list);
   1699		return 0;
   1700	}
   1701
   1702	n = tipc_node_find(net, dnode);
   1703	if (unlikely(!n)) {
   1704		__skb_queue_purge(list);
   1705		return -EHOSTUNREACH;
   1706	}
   1707
   1708	tipc_node_read_lock(n);
   1709	node_up = node_is_up(n);
   1710	if (node_up && n->peer_net && check_net(n->peer_net)) {
   1711		/* xmit inner linux container */
   1712		tipc_lxc_xmit(n->peer_net, list);
   1713		if (likely(skb_queue_empty(list))) {
   1714			tipc_node_read_unlock(n);
   1715			tipc_node_put(n);
   1716			return 0;
   1717		}
   1718	}
   1719
   1720	bearer_id = n->active_links[selector & 1];
   1721	if (unlikely(bearer_id == INVALID_BEARER_ID)) {
   1722		tipc_node_read_unlock(n);
   1723		tipc_node_put(n);
   1724		__skb_queue_purge(list);
   1725		return -EHOSTUNREACH;
   1726	}
   1727
   1728	__skb_queue_head_init(&xmitq);
   1729	le = &n->links[bearer_id];
   1730	spin_lock_bh(&le->lock);
   1731	rc = tipc_link_xmit(le->link, list, &xmitq);
   1732	spin_unlock_bh(&le->lock);
   1733	tipc_node_read_unlock(n);
   1734
   1735	if (unlikely(rc == -ENOBUFS))
   1736		tipc_node_link_down(n, bearer_id, false);
   1737	else
   1738		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
   1739
   1740	tipc_node_put(n);
   1741
   1742	return rc;
   1743}
   1744
   1745/* tipc_node_xmit_skb(): send single buffer to destination
   1746 * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
   1747 * messages, which will not be rejected
   1748 * The only exception is datagram messages rerouted after secondary
   1749 * lookup, which are rare and safe to dispose of anyway.
   1750 */
   1751int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
   1752		       u32 selector)
   1753{
   1754	struct sk_buff_head head;
   1755
   1756	__skb_queue_head_init(&head);
   1757	__skb_queue_tail(&head, skb);
   1758	tipc_node_xmit(net, &head, dnode, selector);
   1759	return 0;
   1760}
   1761
   1762/* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
   1763 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
   1764 */
   1765int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
   1766{
   1767	struct sk_buff *skb;
   1768	u32 selector, dnode;
   1769
   1770	while ((skb = __skb_dequeue(xmitq))) {
   1771		selector = msg_origport(buf_msg(skb));
   1772		dnode = msg_destnode(buf_msg(skb));
   1773		tipc_node_xmit_skb(net, skb, dnode, selector);
   1774	}
   1775	return 0;
   1776}
   1777
   1778void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
   1779{
   1780	struct sk_buff_head xmitq;
   1781	struct sk_buff *txskb;
   1782	struct tipc_node *n;
   1783	u16 dummy;
   1784	u32 dst;
   1785
   1786	/* Use broadcast if all nodes support it */
   1787	if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
   1788		__skb_queue_head_init(&xmitq);
   1789		__skb_queue_tail(&xmitq, skb);
   1790		tipc_bcast_xmit(net, &xmitq, &dummy);
   1791		return;
   1792	}
   1793
   1794	/* Otherwise use legacy replicast method */
   1795	rcu_read_lock();
   1796	list_for_each_entry_rcu(n, tipc_nodes(net), list) {
   1797		dst = n->addr;
   1798		if (in_own_node(net, dst))
   1799			continue;
   1800		if (!node_is_up(n))
   1801			continue;
   1802		txskb = pskb_copy(skb, GFP_ATOMIC);
   1803		if (!txskb)
   1804			break;
   1805		msg_set_destnode(buf_msg(txskb), dst);
   1806		tipc_node_xmit_skb(net, txskb, dst, 0);
   1807	}
   1808	rcu_read_unlock();
   1809	kfree_skb(skb);
   1810}
   1811
   1812static void tipc_node_mcast_rcv(struct tipc_node *n)
   1813{
   1814	struct tipc_bclink_entry *be = &n->bc_entry;
   1815
   1816	/* 'arrvq' is under inputq2's lock protection */
   1817	spin_lock_bh(&be->inputq2.lock);
   1818	spin_lock_bh(&be->inputq1.lock);
   1819	skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
   1820	spin_unlock_bh(&be->inputq1.lock);
   1821	spin_unlock_bh(&be->inputq2.lock);
   1822	tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
   1823}
   1824
   1825static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
   1826				  int bearer_id, struct sk_buff_head *xmitq)
   1827{
   1828	struct tipc_link *ucl;
   1829	int rc;
   1830
   1831	rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
   1832
   1833	if (rc & TIPC_LINK_DOWN_EVT) {
   1834		tipc_node_reset_links(n);
   1835		return;
   1836	}
   1837
   1838	if (!(rc & TIPC_LINK_SND_STATE))
   1839		return;
   1840
   1841	/* If probe message, a STATE response will be sent anyway */
   1842	if (msg_probe(hdr))
   1843		return;
   1844
   1845	/* Produce a STATE message carrying broadcast NACK */
   1846	tipc_node_read_lock(n);
   1847	ucl = n->links[bearer_id].link;
   1848	if (ucl)
   1849		tipc_link_build_state_msg(ucl, xmitq);
   1850	tipc_node_read_unlock(n);
   1851}
   1852
   1853/**
   1854 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
   1855 * @net: the applicable net namespace
   1856 * @skb: TIPC packet
   1857 * @bearer_id: id of bearer message arrived on
   1858 *
   1859 * Invoked with no locks held.
   1860 */
   1861static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
   1862{
   1863	int rc;
   1864	struct sk_buff_head xmitq;
   1865	struct tipc_bclink_entry *be;
   1866	struct tipc_link_entry *le;
   1867	struct tipc_msg *hdr = buf_msg(skb);
   1868	int usr = msg_user(hdr);
   1869	u32 dnode = msg_destnode(hdr);
   1870	struct tipc_node *n;
   1871
   1872	__skb_queue_head_init(&xmitq);
   1873
   1874	/* If NACK for other node, let rcv link for that node peek into it */
   1875	if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
   1876		n = tipc_node_find(net, dnode);
   1877	else
   1878		n = tipc_node_find(net, msg_prevnode(hdr));
   1879	if (!n) {
   1880		kfree_skb(skb);
   1881		return;
   1882	}
   1883	be = &n->bc_entry;
   1884	le = &n->links[bearer_id];
   1885
   1886	rc = tipc_bcast_rcv(net, be->link, skb);
   1887
   1888	/* Broadcast ACKs are sent on a unicast link */
   1889	if (rc & TIPC_LINK_SND_STATE) {
   1890		tipc_node_read_lock(n);
   1891		tipc_link_build_state_msg(le->link, &xmitq);
   1892		tipc_node_read_unlock(n);
   1893	}
   1894
   1895	if (!skb_queue_empty(&xmitq))
   1896		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
   1897
   1898	if (!skb_queue_empty(&be->inputq1))
   1899		tipc_node_mcast_rcv(n);
   1900
   1901	/* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
   1902	if (!skb_queue_empty(&n->bc_entry.namedq))
   1903		tipc_named_rcv(net, &n->bc_entry.namedq,
   1904			       &n->bc_entry.named_rcv_nxt,
   1905			       &n->bc_entry.named_open);
   1906
   1907	/* If reassembly or retransmission failure => reset all links to peer */
   1908	if (rc & TIPC_LINK_DOWN_EVT)
   1909		tipc_node_reset_links(n);
   1910
   1911	tipc_node_put(n);
   1912}
   1913
   1914/**
   1915 * tipc_node_check_state - check and if necessary update node state
   1916 * @n: target tipc_node
   1917 * @skb: TIPC packet
   1918 * @bearer_id: identity of bearer delivering the packet
   1919 * @xmitq: queue for messages to be xmited on
   1920 * Return: true if state and msg are ok, otherwise false
   1921 */
   1922static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
   1923				  int bearer_id, struct sk_buff_head *xmitq)
   1924{
   1925	struct tipc_msg *hdr = buf_msg(skb);
   1926	int usr = msg_user(hdr);
   1927	int mtyp = msg_type(hdr);
   1928	u16 oseqno = msg_seqno(hdr);
   1929	u16 exp_pkts = msg_msgcnt(hdr);
   1930	u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
   1931	int state = n->state;
   1932	struct tipc_link *l, *tnl, *pl = NULL;
   1933	struct tipc_media_addr *maddr;
   1934	int pb_id;
   1935
   1936	if (trace_tipc_node_check_state_enabled()) {
   1937		trace_tipc_skb_dump(skb, false, "skb for node state check");
   1938		trace_tipc_node_check_state(n, true, " ");
   1939	}
   1940	l = n->links[bearer_id].link;
   1941	if (!l)
   1942		return false;
   1943	rcv_nxt = tipc_link_rcv_nxt(l);
   1944
   1945
   1946	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
   1947		return true;
   1948
   1949	/* Find parallel link, if any */
   1950	for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
   1951		if ((pb_id != bearer_id) && n->links[pb_id].link) {
   1952			pl = n->links[pb_id].link;
   1953			break;
   1954		}
   1955	}
   1956
   1957	if (!tipc_link_validate_msg(l, hdr)) {
   1958		trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
   1959		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
   1960		return false;
   1961	}
   1962
   1963	/* Check and update node accesibility if applicable */
   1964	if (state == SELF_UP_PEER_COMING) {
   1965		if (!tipc_link_is_up(l))
   1966			return true;
   1967		if (!msg_peer_link_is_up(hdr))
   1968			return true;
   1969		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
   1970	}
   1971
   1972	if (state == SELF_DOWN_PEER_LEAVING) {
   1973		if (msg_peer_node_is_up(hdr))
   1974			return false;
   1975		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
   1976		return true;
   1977	}
   1978
   1979	if (state == SELF_LEAVING_PEER_DOWN)
   1980		return false;
   1981
   1982	/* Ignore duplicate packets */
   1983	if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
   1984		return true;
   1985
   1986	/* Initiate or update failover mode if applicable */
   1987	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
   1988		syncpt = oseqno + exp_pkts - 1;
   1989		if (pl && !tipc_link_is_reset(pl)) {
   1990			__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
   1991			trace_tipc_node_link_down(n, true,
   1992						  "node link down <- failover!");
   1993			tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
   1994							tipc_link_inputq(l));
   1995		}
   1996
   1997		/* If parallel link was already down, and this happened before
   1998		 * the tunnel link came up, node failover was never started.
   1999		 * Ensure that a FAILOVER_MSG is sent to get peer out of
   2000		 * NODE_FAILINGOVER state, also this node must accept
   2001		 * TUNNEL_MSGs from peer.
   2002		 */
   2003		if (n->state != NODE_FAILINGOVER)
   2004			tipc_node_link_failover(n, pl, l, xmitq);
   2005
   2006		/* If pkts arrive out of order, use lowest calculated syncpt */
   2007		if (less(syncpt, n->sync_point))
   2008			n->sync_point = syncpt;
   2009	}
   2010
   2011	/* Open parallel link when tunnel link reaches synch point */
   2012	if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
   2013		if (!more(rcv_nxt, n->sync_point))
   2014			return true;
   2015		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
   2016		if (pl)
   2017			tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
   2018		return true;
   2019	}
   2020
   2021	/* No syncing needed if only one link */
   2022	if (!pl || !tipc_link_is_up(pl))
   2023		return true;
   2024
   2025	/* Initiate synch mode if applicable */
   2026	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
   2027		if (n->capabilities & TIPC_TUNNEL_ENHANCED)
   2028			syncpt = msg_syncpt(hdr);
   2029		else
   2030			syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
   2031		if (!tipc_link_is_up(l))
   2032			__tipc_node_link_up(n, bearer_id, xmitq);
   2033		if (n->state == SELF_UP_PEER_UP) {
   2034			n->sync_point = syncpt;
   2035			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
   2036			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
   2037		}
   2038	}
   2039
   2040	/* Open tunnel link when parallel link reaches synch point */
   2041	if (n->state == NODE_SYNCHING) {
   2042		if (tipc_link_is_synching(l)) {
   2043			tnl = l;
   2044		} else {
   2045			tnl = pl;
   2046			pl = l;
   2047		}
   2048		inputq_len = skb_queue_len(tipc_link_inputq(pl));
   2049		dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
   2050		if (more(dlv_nxt, n->sync_point)) {
   2051			tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
   2052			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
   2053			return true;
   2054		}
   2055		if (l == pl)
   2056			return true;
   2057		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
   2058			return true;
   2059		if (usr == LINK_PROTOCOL)
   2060			return true;
   2061		return false;
   2062	}
   2063	return true;
   2064}
   2065
   2066/**
   2067 * tipc_rcv - process TIPC packets/messages arriving from off-node
   2068 * @net: the applicable net namespace
   2069 * @skb: TIPC packet
   2070 * @b: pointer to bearer message arrived on
   2071 *
   2072 * Invoked with no locks held. Bearer pointer must point to a valid bearer
   2073 * structure (i.e. cannot be NULL), but bearer can be inactive.
   2074 */
   2075void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
   2076{
   2077	struct sk_buff_head xmitq;
   2078	struct tipc_link_entry *le;
   2079	struct tipc_msg *hdr;
   2080	struct tipc_node *n;
   2081	int bearer_id = b->identity;
   2082	u32 self = tipc_own_addr(net);
   2083	int usr, rc = 0;
   2084	u16 bc_ack;
   2085#ifdef CONFIG_TIPC_CRYPTO
   2086	struct tipc_ehdr *ehdr;
   2087
   2088	/* Check if message must be decrypted first */
   2089	if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
   2090		goto rcv;
   2091
   2092	ehdr = (struct tipc_ehdr *)skb->data;
   2093	if (likely(ehdr->user != LINK_CONFIG)) {
   2094		n = tipc_node_find(net, ntohl(ehdr->addr));
   2095		if (unlikely(!n))
   2096			goto discard;
   2097	} else {
   2098		n = tipc_node_find_by_id(net, ehdr->id);
   2099	}
   2100	tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
   2101	if (!skb)
   2102		return;
   2103
   2104rcv:
   2105#endif
   2106	/* Ensure message is well-formed before touching the header */
   2107	if (unlikely(!tipc_msg_validate(&skb)))
   2108		goto discard;
   2109	__skb_queue_head_init(&xmitq);
   2110	hdr = buf_msg(skb);
   2111	usr = msg_user(hdr);
   2112	bc_ack = msg_bcast_ack(hdr);
   2113
   2114	/* Handle arrival of discovery or broadcast packet */
   2115	if (unlikely(msg_non_seq(hdr))) {
   2116		if (unlikely(usr == LINK_CONFIG))
   2117			return tipc_disc_rcv(net, skb, b);
   2118		else
   2119			return tipc_node_bc_rcv(net, skb, bearer_id);
   2120	}
   2121
   2122	/* Discard unicast link messages destined for another node */
   2123	if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
   2124		goto discard;
   2125
   2126	/* Locate neighboring node that sent packet */
   2127	n = tipc_node_find(net, msg_prevnode(hdr));
   2128	if (unlikely(!n))
   2129		goto discard;
   2130	le = &n->links[bearer_id];
   2131
   2132	/* Ensure broadcast reception is in synch with peer's send state */
   2133	if (unlikely(usr == LINK_PROTOCOL)) {
   2134		if (unlikely(skb_linearize(skb))) {
   2135			tipc_node_put(n);
   2136			goto discard;
   2137		}
   2138		hdr = buf_msg(skb);
   2139		tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
   2140	} else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
   2141		tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
   2142	}
   2143
   2144	/* Receive packet directly if conditions permit */
   2145	tipc_node_read_lock(n);
   2146	if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
   2147		spin_lock_bh(&le->lock);
   2148		if (le->link) {
   2149			rc = tipc_link_rcv(le->link, skb, &xmitq);
   2150			skb = NULL;
   2151		}
   2152		spin_unlock_bh(&le->lock);
   2153	}
   2154	tipc_node_read_unlock(n);
   2155
   2156	/* Check/update node state before receiving */
   2157	if (unlikely(skb)) {
   2158		if (unlikely(skb_linearize(skb)))
   2159			goto out_node_put;
   2160		tipc_node_write_lock(n);
   2161		if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
   2162			if (le->link) {
   2163				rc = tipc_link_rcv(le->link, skb, &xmitq);
   2164				skb = NULL;
   2165			}
   2166		}
   2167		tipc_node_write_unlock(n);
   2168	}
   2169
   2170	if (unlikely(rc & TIPC_LINK_UP_EVT))
   2171		tipc_node_link_up(n, bearer_id, &xmitq);
   2172
   2173	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
   2174		tipc_node_link_down(n, bearer_id, false);
   2175
   2176	if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
   2177		tipc_named_rcv(net, &n->bc_entry.namedq,
   2178			       &n->bc_entry.named_rcv_nxt,
   2179			       &n->bc_entry.named_open);
   2180
   2181	if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
   2182		tipc_node_mcast_rcv(n);
   2183
   2184	if (!skb_queue_empty(&le->inputq))
   2185		tipc_sk_rcv(net, &le->inputq);
   2186
   2187	if (!skb_queue_empty(&xmitq))
   2188		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
   2189
   2190out_node_put:
   2191	tipc_node_put(n);
   2192discard:
   2193	kfree_skb(skb);
   2194}
   2195
   2196void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
   2197			      int prop)
   2198{
   2199	struct tipc_net *tn = tipc_net(net);
   2200	int bearer_id = b->identity;
   2201	struct sk_buff_head xmitq;
   2202	struct tipc_link_entry *e;
   2203	struct tipc_node *n;
   2204
   2205	__skb_queue_head_init(&xmitq);
   2206
   2207	rcu_read_lock();
   2208
   2209	list_for_each_entry_rcu(n, &tn->node_list, list) {
   2210		tipc_node_write_lock(n);
   2211		e = &n->links[bearer_id];
   2212		if (e->link) {
   2213			if (prop == TIPC_NLA_PROP_TOL)
   2214				tipc_link_set_tolerance(e->link, b->tolerance,
   2215							&xmitq);
   2216			else if (prop == TIPC_NLA_PROP_MTU)
   2217				tipc_link_set_mtu(e->link, b->mtu);
   2218
   2219			/* Update MTU for node link entry */
   2220			e->mtu = tipc_link_mss(e->link);
   2221		}
   2222
   2223		tipc_node_write_unlock(n);
   2224		tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
   2225	}
   2226
   2227	rcu_read_unlock();
   2228}
   2229
   2230int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
   2231{
   2232	struct net *net = sock_net(skb->sk);
   2233	struct tipc_net *tn = net_generic(net, tipc_net_id);
   2234	struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
   2235	struct tipc_node *peer, *temp_node;
   2236	u8 node_id[NODE_ID_LEN];
   2237	u64 *w0 = (u64 *)&node_id[0];
   2238	u64 *w1 = (u64 *)&node_id[8];
   2239	u32 addr;
   2240	int err;
   2241
   2242	/* We identify the peer by its net */
   2243	if (!info->attrs[TIPC_NLA_NET])
   2244		return -EINVAL;
   2245
   2246	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
   2247					  info->attrs[TIPC_NLA_NET],
   2248					  tipc_nl_net_policy, info->extack);
   2249	if (err)
   2250		return err;
   2251
   2252	/* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
   2253	 * mutually exclusive cases
   2254	 */
   2255	if (attrs[TIPC_NLA_NET_ADDR]) {
   2256		addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
   2257		if (!addr)
   2258			return -EINVAL;
   2259	}
   2260
   2261	if (attrs[TIPC_NLA_NET_NODEID]) {
   2262		if (!attrs[TIPC_NLA_NET_NODEID_W1])
   2263			return -EINVAL;
   2264		*w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
   2265		*w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
   2266		addr = hash128to32(node_id);
   2267	}
   2268
   2269	if (in_own_node(net, addr))
   2270		return -ENOTSUPP;
   2271
   2272	spin_lock_bh(&tn->node_list_lock);
   2273	peer = tipc_node_find(net, addr);
   2274	if (!peer) {
   2275		spin_unlock_bh(&tn->node_list_lock);
   2276		return -ENXIO;
   2277	}
   2278
   2279	tipc_node_write_lock(peer);
   2280	if (peer->state != SELF_DOWN_PEER_DOWN &&
   2281	    peer->state != SELF_DOWN_PEER_LEAVING) {
   2282		tipc_node_write_unlock(peer);
   2283		err = -EBUSY;
   2284		goto err_out;
   2285	}
   2286
   2287	tipc_node_clear_links(peer);
   2288	tipc_node_write_unlock(peer);
   2289	tipc_node_delete(peer);
   2290
   2291	/* Calculate cluster capabilities */
   2292	tn->capabilities = TIPC_NODE_CAPABILITIES;
   2293	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
   2294		tn->capabilities &= temp_node->capabilities;
   2295	}
   2296	tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
   2297	err = 0;
   2298err_out:
   2299	tipc_node_put(peer);
   2300	spin_unlock_bh(&tn->node_list_lock);
   2301
   2302	return err;
   2303}
   2304
   2305int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
   2306{
   2307	int err;
   2308	struct net *net = sock_net(skb->sk);
   2309	struct tipc_net *tn = net_generic(net, tipc_net_id);
   2310	int done = cb->args[0];
   2311	int last_addr = cb->args[1];
   2312	struct tipc_node *node;
   2313	struct tipc_nl_msg msg;
   2314
   2315	if (done)
   2316		return 0;
   2317
   2318	msg.skb = skb;
   2319	msg.portid = NETLINK_CB(cb->skb).portid;
   2320	msg.seq = cb->nlh->nlmsg_seq;
   2321
   2322	rcu_read_lock();
   2323	if (last_addr) {
   2324		node = tipc_node_find(net, last_addr);
   2325		if (!node) {
   2326			rcu_read_unlock();
   2327			/* We never set seq or call nl_dump_check_consistent()
   2328			 * this means that setting prev_seq here will cause the
   2329			 * consistence check to fail in the netlink callback
   2330			 * handler. Resulting in the NLMSG_DONE message having
   2331			 * the NLM_F_DUMP_INTR flag set if the node state
   2332			 * changed while we released the lock.
   2333			 */
   2334			cb->prev_seq = 1;
   2335			return -EPIPE;
   2336		}
   2337		tipc_node_put(node);
   2338	}
   2339
   2340	list_for_each_entry_rcu(node, &tn->node_list, list) {
   2341		if (node->preliminary)
   2342			continue;
   2343		if (last_addr) {
   2344			if (node->addr == last_addr)
   2345				last_addr = 0;
   2346			else
   2347				continue;
   2348		}
   2349
   2350		tipc_node_read_lock(node);
   2351		err = __tipc_nl_add_node(&msg, node);
   2352		if (err) {
   2353			last_addr = node->addr;
   2354			tipc_node_read_unlock(node);
   2355			goto out;
   2356		}
   2357
   2358		tipc_node_read_unlock(node);
   2359	}
   2360	done = 1;
   2361out:
   2362	cb->args[0] = done;
   2363	cb->args[1] = last_addr;
   2364	rcu_read_unlock();
   2365
   2366	return skb->len;
   2367}
   2368
   2369/* tipc_node_find_by_name - locate owner node of link by link's name
   2370 * @net: the applicable net namespace
   2371 * @name: pointer to link name string
   2372 * @bearer_id: pointer to index in 'node->links' array where the link was found.
   2373 *
   2374 * Returns pointer to node owning the link, or 0 if no matching link is found.
   2375 */
   2376static struct tipc_node *tipc_node_find_by_name(struct net *net,
   2377						const char *link_name,
   2378						unsigned int *bearer_id)
   2379{
   2380	struct tipc_net *tn = net_generic(net, tipc_net_id);
   2381	struct tipc_link *l;
   2382	struct tipc_node *n;
   2383	struct tipc_node *found_node = NULL;
   2384	int i;
   2385
   2386	*bearer_id = 0;
   2387	rcu_read_lock();
   2388	list_for_each_entry_rcu(n, &tn->node_list, list) {
   2389		tipc_node_read_lock(n);
   2390		for (i = 0; i < MAX_BEARERS; i++) {
   2391			l = n->links[i].link;
   2392			if (l && !strcmp(tipc_link_name(l), link_name)) {
   2393				*bearer_id = i;
   2394				found_node = n;
   2395				break;
   2396			}
   2397		}
   2398		tipc_node_read_unlock(n);
   2399		if (found_node)
   2400			break;
   2401	}
   2402	rcu_read_unlock();
   2403
   2404	return found_node;
   2405}
   2406
   2407int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
   2408{
   2409	int err;
   2410	int res = 0;
   2411	int bearer_id;
   2412	char *name;
   2413	struct tipc_link *link;
   2414	struct tipc_node *node;
   2415	struct sk_buff_head xmitq;
   2416	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
   2417	struct net *net = sock_net(skb->sk);
   2418
   2419	__skb_queue_head_init(&xmitq);
   2420
   2421	if (!info->attrs[TIPC_NLA_LINK])
   2422		return -EINVAL;
   2423
   2424	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
   2425					  info->attrs[TIPC_NLA_LINK],
   2426					  tipc_nl_link_policy, info->extack);
   2427	if (err)
   2428		return err;
   2429
   2430	if (!attrs[TIPC_NLA_LINK_NAME])
   2431		return -EINVAL;
   2432
   2433	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
   2434
   2435	if (strcmp(name, tipc_bclink_name) == 0)
   2436		return tipc_nl_bc_link_set(net, attrs);
   2437
   2438	node = tipc_node_find_by_name(net, name, &bearer_id);
   2439	if (!node)
   2440		return -EINVAL;
   2441
   2442	tipc_node_read_lock(node);
   2443
   2444	link = node->links[bearer_id].link;
   2445	if (!link) {
   2446		res = -EINVAL;
   2447		goto out;
   2448	}
   2449
   2450	if (attrs[TIPC_NLA_LINK_PROP]) {
   2451		struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
   2452
   2453		err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
   2454		if (err) {
   2455			res = err;
   2456			goto out;
   2457		}
   2458
   2459		if (props[TIPC_NLA_PROP_TOL]) {
   2460			u32 tol;
   2461
   2462			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
   2463			tipc_link_set_tolerance(link, tol, &xmitq);
   2464		}
   2465		if (props[TIPC_NLA_PROP_PRIO]) {
   2466			u32 prio;
   2467
   2468			prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
   2469			tipc_link_set_prio(link, prio, &xmitq);
   2470		}
   2471		if (props[TIPC_NLA_PROP_WIN]) {
   2472			u32 max_win;
   2473
   2474			max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
   2475			tipc_link_set_queue_limits(link,
   2476						   tipc_link_min_win(link),
   2477						   max_win);
   2478		}
   2479	}
   2480
   2481out:
   2482	tipc_node_read_unlock(node);
   2483	tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
   2484			 NULL);
   2485	return res;
   2486}
   2487
   2488int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
   2489{
   2490	struct net *net = genl_info_net(info);
   2491	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
   2492	struct tipc_nl_msg msg;
   2493	char *name;
   2494	int err;
   2495
   2496	msg.portid = info->snd_portid;
   2497	msg.seq = info->snd_seq;
   2498
   2499	if (!info->attrs[TIPC_NLA_LINK])
   2500		return -EINVAL;
   2501
   2502	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
   2503					  info->attrs[TIPC_NLA_LINK],
   2504					  tipc_nl_link_policy, info->extack);
   2505	if (err)
   2506		return err;
   2507
   2508	if (!attrs[TIPC_NLA_LINK_NAME])
   2509		return -EINVAL;
   2510
   2511	name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
   2512
   2513	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
   2514	if (!msg.skb)
   2515		return -ENOMEM;
   2516
   2517	if (strcmp(name, tipc_bclink_name) == 0) {
   2518		err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
   2519		if (err)
   2520			goto err_free;
   2521	} else {
   2522		int bearer_id;
   2523		struct tipc_node *node;
   2524		struct tipc_link *link;
   2525
   2526		node = tipc_node_find_by_name(net, name, &bearer_id);
   2527		if (!node) {
   2528			err = -EINVAL;
   2529			goto err_free;
   2530		}
   2531
   2532		tipc_node_read_lock(node);
   2533		link = node->links[bearer_id].link;
   2534		if (!link) {
   2535			tipc_node_read_unlock(node);
   2536			err = -EINVAL;
   2537			goto err_free;
   2538		}
   2539
   2540		err = __tipc_nl_add_link(net, &msg, link, 0);
   2541		tipc_node_read_unlock(node);
   2542		if (err)
   2543			goto err_free;
   2544	}
   2545
   2546	return genlmsg_reply(msg.skb, info);
   2547
   2548err_free:
   2549	nlmsg_free(msg.skb);
   2550	return err;
   2551}
   2552
   2553int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
   2554{
   2555	int err;
   2556	char *link_name;
   2557	unsigned int bearer_id;
   2558	struct tipc_link *link;
   2559	struct tipc_node *node;
   2560	struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
   2561	struct net *net = sock_net(skb->sk);
   2562	struct tipc_net *tn = tipc_net(net);
   2563	struct tipc_link_entry *le;
   2564
   2565	if (!info->attrs[TIPC_NLA_LINK])
   2566		return -EINVAL;
   2567
   2568	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
   2569					  info->attrs[TIPC_NLA_LINK],
   2570					  tipc_nl_link_policy, info->extack);
   2571	if (err)
   2572		return err;
   2573
   2574	if (!attrs[TIPC_NLA_LINK_NAME])
   2575		return -EINVAL;
   2576
   2577	link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
   2578
   2579	err = -EINVAL;
   2580	if (!strcmp(link_name, tipc_bclink_name)) {
   2581		err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
   2582		if (err)
   2583			return err;
   2584		return 0;
   2585	} else if (strstr(link_name, tipc_bclink_name)) {
   2586		rcu_read_lock();
   2587		list_for_each_entry_rcu(node, &tn->node_list, list) {
   2588			tipc_node_read_lock(node);
   2589			link = node->bc_entry.link;
   2590			if (link && !strcmp(link_name, tipc_link_name(link))) {
   2591				err = tipc_bclink_reset_stats(net, link);
   2592				tipc_node_read_unlock(node);
   2593				break;
   2594			}
   2595			tipc_node_read_unlock(node);
   2596		}
   2597		rcu_read_unlock();
   2598		return err;
   2599	}
   2600
   2601	node = tipc_node_find_by_name(net, link_name, &bearer_id);
   2602	if (!node)
   2603		return -EINVAL;
   2604
   2605	le = &node->links[bearer_id];
   2606	tipc_node_read_lock(node);
   2607	spin_lock_bh(&le->lock);
   2608	link = node->links[bearer_id].link;
   2609	if (!link) {
   2610		spin_unlock_bh(&le->lock);
   2611		tipc_node_read_unlock(node);
   2612		return -EINVAL;
   2613	}
   2614	tipc_link_reset_stats(link);
   2615	spin_unlock_bh(&le->lock);
   2616	tipc_node_read_unlock(node);
   2617	return 0;
   2618}
   2619
   2620/* Caller should hold node lock  */
   2621static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
   2622				    struct tipc_node *node, u32 *prev_link,
   2623				    bool bc_link)
   2624{
   2625	u32 i;
   2626	int err;
   2627
   2628	for (i = *prev_link; i < MAX_BEARERS; i++) {
   2629		*prev_link = i;
   2630
   2631		if (!node->links[i].link)
   2632			continue;
   2633
   2634		err = __tipc_nl_add_link(net, msg,
   2635					 node->links[i].link, NLM_F_MULTI);
   2636		if (err)
   2637			return err;
   2638	}
   2639
   2640	if (bc_link) {
   2641		*prev_link = i;
   2642		err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
   2643		if (err)
   2644			return err;
   2645	}
   2646
   2647	*prev_link = 0;
   2648
   2649	return 0;
   2650}
   2651
   2652int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
   2653{
   2654	struct net *net = sock_net(skb->sk);
   2655	struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
   2656	struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
   2657	struct tipc_net *tn = net_generic(net, tipc_net_id);
   2658	struct tipc_node *node;
   2659	struct tipc_nl_msg msg;
   2660	u32 prev_node = cb->args[0];
   2661	u32 prev_link = cb->args[1];
   2662	int done = cb->args[2];
   2663	bool bc_link = cb->args[3];
   2664	int err;
   2665
   2666	if (done)
   2667		return 0;
   2668
   2669	if (!prev_node) {
   2670		/* Check if broadcast-receiver links dumping is needed */
   2671		if (attrs && attrs[TIPC_NLA_LINK]) {
   2672			err = nla_parse_nested_deprecated(link,
   2673							  TIPC_NLA_LINK_MAX,
   2674							  attrs[TIPC_NLA_LINK],
   2675							  tipc_nl_link_policy,
   2676							  NULL);
   2677			if (unlikely(err))
   2678				return err;
   2679			if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
   2680				return -EINVAL;
   2681			bc_link = true;
   2682		}
   2683	}
   2684
   2685	msg.skb = skb;
   2686	msg.portid = NETLINK_CB(cb->skb).portid;
   2687	msg.seq = cb->nlh->nlmsg_seq;
   2688
   2689	rcu_read_lock();
   2690	if (prev_node) {
   2691		node = tipc_node_find(net, prev_node);
   2692		if (!node) {
   2693			/* We never set seq or call nl_dump_check_consistent()
   2694			 * this means that setting prev_seq here will cause the
   2695			 * consistence check to fail in the netlink callback
   2696			 * handler. Resulting in the last NLMSG_DONE message
   2697			 * having the NLM_F_DUMP_INTR flag set.
   2698			 */
   2699			cb->prev_seq = 1;
   2700			goto out;
   2701		}
   2702		tipc_node_put(node);
   2703
   2704		list_for_each_entry_continue_rcu(node, &tn->node_list,
   2705						 list) {
   2706			tipc_node_read_lock(node);
   2707			err = __tipc_nl_add_node_links(net, &msg, node,
   2708						       &prev_link, bc_link);
   2709			tipc_node_read_unlock(node);
   2710			if (err)
   2711				goto out;
   2712
   2713			prev_node = node->addr;
   2714		}
   2715	} else {
   2716		err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
   2717		if (err)
   2718			goto out;
   2719
   2720		list_for_each_entry_rcu(node, &tn->node_list, list) {
   2721			tipc_node_read_lock(node);
   2722			err = __tipc_nl_add_node_links(net, &msg, node,
   2723						       &prev_link, bc_link);
   2724			tipc_node_read_unlock(node);
   2725			if (err)
   2726				goto out;
   2727
   2728			prev_node = node->addr;
   2729		}
   2730	}
   2731	done = 1;
   2732out:
   2733	rcu_read_unlock();
   2734
   2735	cb->args[0] = prev_node;
   2736	cb->args[1] = prev_link;
   2737	cb->args[2] = done;
   2738	cb->args[3] = bc_link;
   2739
   2740	return skb->len;
   2741}
   2742
   2743int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
   2744{
   2745	struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
   2746	struct net *net = sock_net(skb->sk);
   2747	int err;
   2748
   2749	if (!info->attrs[TIPC_NLA_MON])
   2750		return -EINVAL;
   2751
   2752	err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
   2753					  info->attrs[TIPC_NLA_MON],
   2754					  tipc_nl_monitor_policy,
   2755					  info->extack);
   2756	if (err)
   2757		return err;
   2758
   2759	if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
   2760		u32 val;
   2761
   2762		val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
   2763		err = tipc_nl_monitor_set_threshold(net, val);
   2764		if (err)
   2765			return err;
   2766	}
   2767
   2768	return 0;
   2769}
   2770
   2771static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
   2772{
   2773	struct nlattr *attrs;
   2774	void *hdr;
   2775	u32 val;
   2776
   2777	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
   2778			  0, TIPC_NL_MON_GET);
   2779	if (!hdr)
   2780		return -EMSGSIZE;
   2781
   2782	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
   2783	if (!attrs)
   2784		goto msg_full;
   2785
   2786	val = tipc_nl_monitor_get_threshold(net);
   2787
   2788	if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
   2789		goto attr_msg_full;
   2790
   2791	nla_nest_end(msg->skb, attrs);
   2792	genlmsg_end(msg->skb, hdr);
   2793
   2794	return 0;
   2795
   2796attr_msg_full:
   2797	nla_nest_cancel(msg->skb, attrs);
   2798msg_full:
   2799	genlmsg_cancel(msg->skb, hdr);
   2800
   2801	return -EMSGSIZE;
   2802}
   2803
   2804int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
   2805{
   2806	struct net *net = sock_net(skb->sk);
   2807	struct tipc_nl_msg msg;
   2808	int err;
   2809
   2810	msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
   2811	if (!msg.skb)
   2812		return -ENOMEM;
   2813	msg.portid = info->snd_portid;
   2814	msg.seq = info->snd_seq;
   2815
   2816	err = __tipc_nl_add_monitor_prop(net, &msg);
   2817	if (err) {
   2818		nlmsg_free(msg.skb);
   2819		return err;
   2820	}
   2821
   2822	return genlmsg_reply(msg.skb, info);
   2823}
   2824
   2825int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
   2826{
   2827	struct net *net = sock_net(skb->sk);
   2828	u32 prev_bearer = cb->args[0];
   2829	struct tipc_nl_msg msg;
   2830	int bearer_id;
   2831	int err;
   2832
   2833	if (prev_bearer == MAX_BEARERS)
   2834		return 0;
   2835
   2836	msg.skb = skb;
   2837	msg.portid = NETLINK_CB(cb->skb).portid;
   2838	msg.seq = cb->nlh->nlmsg_seq;
   2839
   2840	rtnl_lock();
   2841	for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
   2842		err = __tipc_nl_add_monitor(net, &msg, bearer_id);
   2843		if (err)
   2844			break;
   2845	}
   2846	rtnl_unlock();
   2847	cb->args[0] = bearer_id;
   2848
   2849	return skb->len;
   2850}
   2851
   2852int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
   2853				   struct netlink_callback *cb)
   2854{
   2855	struct net *net = sock_net(skb->sk);
   2856	u32 prev_node = cb->args[1];
   2857	u32 bearer_id = cb->args[2];
   2858	int done = cb->args[0];
   2859	struct tipc_nl_msg msg;
   2860	int err;
   2861
   2862	if (!prev_node) {
   2863		struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
   2864		struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
   2865
   2866		if (!attrs[TIPC_NLA_MON])
   2867			return -EINVAL;
   2868
   2869		err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
   2870						  attrs[TIPC_NLA_MON],
   2871						  tipc_nl_monitor_policy,
   2872						  NULL);
   2873		if (err)
   2874			return err;
   2875
   2876		if (!mon[TIPC_NLA_MON_REF])
   2877			return -EINVAL;
   2878
   2879		bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
   2880
   2881		if (bearer_id >= MAX_BEARERS)
   2882			return -EINVAL;
   2883	}
   2884
   2885	if (done)
   2886		return 0;
   2887
   2888	msg.skb = skb;
   2889	msg.portid = NETLINK_CB(cb->skb).portid;
   2890	msg.seq = cb->nlh->nlmsg_seq;
   2891
   2892	rtnl_lock();
   2893	err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
   2894	if (!err)
   2895		done = 1;
   2896
   2897	rtnl_unlock();
   2898	cb->args[0] = done;
   2899	cb->args[1] = prev_node;
   2900	cb->args[2] = bearer_id;
   2901
   2902	return skb->len;
   2903}
   2904
   2905#ifdef CONFIG_TIPC_CRYPTO
   2906static int tipc_nl_retrieve_key(struct nlattr **attrs,
   2907				struct tipc_aead_key **pkey)
   2908{
   2909	struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
   2910	struct tipc_aead_key *key;
   2911
   2912	if (!attr)
   2913		return -ENODATA;
   2914
   2915	if (nla_len(attr) < sizeof(*key))
   2916		return -EINVAL;
   2917	key = (struct tipc_aead_key *)nla_data(attr);
   2918	if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
   2919	    nla_len(attr) < tipc_aead_key_size(key))
   2920		return -EINVAL;
   2921
   2922	*pkey = key;
   2923	return 0;
   2924}
   2925
   2926static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
   2927{
   2928	struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
   2929
   2930	if (!attr)
   2931		return -ENODATA;
   2932
   2933	if (nla_len(attr) < TIPC_NODEID_LEN)
   2934		return -EINVAL;
   2935
   2936	*node_id = (u8 *)nla_data(attr);
   2937	return 0;
   2938}
   2939
   2940static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
   2941{
   2942	struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
   2943
   2944	if (!attr)
   2945		return -ENODATA;
   2946
   2947	*intv = nla_get_u32(attr);
   2948	return 0;
   2949}
   2950
   2951static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
   2952{
   2953	struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
   2954	struct net *net = sock_net(skb->sk);
   2955	struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
   2956	struct tipc_node *n = NULL;
   2957	struct tipc_aead_key *ukey;
   2958	bool rekeying = true, master_key = false;
   2959	u8 *id, *own_id, mode;
   2960	u32 intv = 0;
   2961	int rc = 0;
   2962
   2963	if (!info->attrs[TIPC_NLA_NODE])
   2964		return -EINVAL;
   2965
   2966	rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
   2967			      info->attrs[TIPC_NLA_NODE],
   2968			      tipc_nl_node_policy, info->extack);
   2969	if (rc)
   2970		return rc;
   2971
   2972	own_id = tipc_own_id(net);
   2973	if (!own_id) {
   2974		GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
   2975		return -EPERM;
   2976	}
   2977
   2978	rc = tipc_nl_retrieve_rekeying(attrs, &intv);
   2979	if (rc == -ENODATA)
   2980		rekeying = false;
   2981
   2982	rc = tipc_nl_retrieve_key(attrs, &ukey);
   2983	if (rc == -ENODATA && rekeying)
   2984		goto rekeying;
   2985	else if (rc)
   2986		return rc;
   2987
   2988	rc = tipc_aead_key_validate(ukey, info);
   2989	if (rc)
   2990		return rc;
   2991
   2992	rc = tipc_nl_retrieve_nodeid(attrs, &id);
   2993	switch (rc) {
   2994	case -ENODATA:
   2995		mode = CLUSTER_KEY;
   2996		master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
   2997		break;
   2998	case 0:
   2999		mode = PER_NODE_KEY;
   3000		if (memcmp(id, own_id, NODE_ID_LEN)) {
   3001			n = tipc_node_find_by_id(net, id) ?:
   3002				tipc_node_create(net, 0, id, 0xffffu, 0, true);
   3003			if (unlikely(!n))
   3004				return -ENOMEM;
   3005			c = n->crypto_rx;
   3006		}
   3007		break;
   3008	default:
   3009		return rc;
   3010	}
   3011
   3012	/* Initiate the TX/RX key */
   3013	rc = tipc_crypto_key_init(c, ukey, mode, master_key);
   3014	if (n)
   3015		tipc_node_put(n);
   3016
   3017	if (unlikely(rc < 0)) {
   3018		GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
   3019		return rc;
   3020	} else if (c == tx) {
   3021		/* Distribute TX key but not master one */
   3022		if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
   3023			GENL_SET_ERR_MSG(info, "failed to replicate new key");
   3024rekeying:
   3025		/* Schedule TX rekeying if needed */
   3026		tipc_crypto_rekeying_sched(tx, rekeying, intv);
   3027	}
   3028
   3029	return 0;
   3030}
   3031
   3032int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
   3033{
   3034	int err;
   3035
   3036	rtnl_lock();
   3037	err = __tipc_nl_node_set_key(skb, info);
   3038	rtnl_unlock();
   3039
   3040	return err;
   3041}
   3042
   3043static int __tipc_nl_node_flush_key(struct sk_buff *skb,
   3044				    struct genl_info *info)
   3045{
   3046	struct net *net = sock_net(skb->sk);
   3047	struct tipc_net *tn = tipc_net(net);
   3048	struct tipc_node *n;
   3049
   3050	tipc_crypto_key_flush(tn->crypto_tx);
   3051	rcu_read_lock();
   3052	list_for_each_entry_rcu(n, &tn->node_list, list)
   3053		tipc_crypto_key_flush(n->crypto_rx);
   3054	rcu_read_unlock();
   3055
   3056	return 0;
   3057}
   3058
   3059int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
   3060{
   3061	int err;
   3062
   3063	rtnl_lock();
   3064	err = __tipc_nl_node_flush_key(skb, info);
   3065	rtnl_unlock();
   3066
   3067	return err;
   3068}
   3069#endif
   3070
   3071/**
   3072 * tipc_node_dump - dump TIPC node data
   3073 * @n: tipc node to be dumped
   3074 * @more: dump more?
   3075 *        - false: dump only tipc node data
   3076 *        - true: dump node link data as well
   3077 * @buf: returned buffer of dump data in format
   3078 */
   3079int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
   3080{
   3081	int i = 0;
   3082	size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
   3083
   3084	if (!n) {
   3085		i += scnprintf(buf, sz, "node data: (null)\n");
   3086		return i;
   3087	}
   3088
   3089	i += scnprintf(buf, sz, "node data: %x", n->addr);
   3090	i += scnprintf(buf + i, sz - i, " %x", n->state);
   3091	i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
   3092	i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
   3093	i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
   3094	i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
   3095	i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
   3096	i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
   3097	i += scnprintf(buf + i, sz - i, " %u", n->working_links);
   3098	i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
   3099	i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
   3100
   3101	if (!more)
   3102		return i;
   3103
   3104	i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
   3105	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
   3106	i += scnprintf(buf + i, sz - i, " media: ");
   3107	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
   3108	i += scnprintf(buf + i, sz - i, "\n");
   3109	i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
   3110	i += scnprintf(buf + i, sz - i, " inputq: ");
   3111	i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
   3112
   3113	i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
   3114	i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
   3115	i += scnprintf(buf + i, sz - i, " media: ");
   3116	i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
   3117	i += scnprintf(buf + i, sz - i, "\n");
   3118	i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
   3119	i += scnprintf(buf + i, sz - i, " inputq: ");
   3120	i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
   3121
   3122	i += scnprintf(buf + i, sz - i, "bclink:\n ");
   3123	i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
   3124
   3125	return i;
   3126}
   3127
   3128void tipc_node_pre_cleanup_net(struct net *exit_net)
   3129{
   3130	struct tipc_node *n;
   3131	struct tipc_net *tn;
   3132	struct net *tmp;
   3133
   3134	rcu_read_lock();
   3135	for_each_net_rcu(tmp) {
   3136		if (tmp == exit_net)
   3137			continue;
   3138		tn = tipc_net(tmp);
   3139		if (!tn)
   3140			continue;
   3141		spin_lock_bh(&tn->node_list_lock);
   3142		list_for_each_entry_rcu(n, &tn->node_list, list) {
   3143			if (!n->peer_net)
   3144				continue;
   3145			if (n->peer_net != exit_net)
   3146				continue;
   3147			tipc_node_write_lock(n);
   3148			n->peer_net = NULL;
   3149			n->peer_hash_mix = 0;
   3150			tipc_node_write_unlock_fast(n);
   3151			break;
   3152		}
   3153		spin_unlock_bh(&tn->node_list_lock);
   3154	}
   3155	rcu_read_unlock();
   3156}