cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

monitor.c (22720B)


      1/*
      2 * net/tipc/monitor.c
      3 *
      4 * Copyright (c) 2016, Ericsson AB
      5 * All rights reserved.
      6 *
      7 * Redistribution and use in source and binary forms, with or without
      8 * modification, are permitted provided that the following conditions are met:
      9 *
     10 * 1. Redistributions of source code must retain the above copyright
     11 *    notice, this list of conditions and the following disclaimer.
     12 * 2. Redistributions in binary form must reproduce the above copyright
     13 *    notice, this list of conditions and the following disclaimer in the
     14 *    documentation and/or other materials provided with the distribution.
     15 * 3. Neither the names of the copyright holders nor the names of its
     16 *    contributors may be used to endorse or promote products derived from
     17 *    this software without specific prior written permission.
     18 *
     19 * Alternatively, this software may be distributed under the terms of the
     20 * GNU General Public License ("GPL") version 2 as published by the Free
     21 * Software Foundation.
     22 *
     23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33 * POSSIBILITY OF SUCH DAMAGE.
     34 */
     35
     36#include <net/genetlink.h>
     37#include "core.h"
     38#include "addr.h"
     39#include "monitor.h"
     40#include "bearer.h"
     41
     42#define MAX_MON_DOMAIN       64
     43#define MON_TIMEOUT          120000
     44#define MAX_PEER_DOWN_EVENTS 4
     45
     46/* struct tipc_mon_domain: domain record to be transferred between peers
     47 * @len: actual size of domain record
     48 * @gen: current generation of sender's domain
     49 * @ack_gen: most recent generation of self's domain acked by peer
     50 * @member_cnt: number of domain member nodes described in this record
     51 * @up_map: bit map indicating which of the members the sender considers up
     52 * @members: identity of the domain members
     53 */
     54struct tipc_mon_domain {
     55	u16 len;
     56	u16 gen;
     57	u16 ack_gen;
     58	u16 member_cnt;
     59	u64 up_map;
     60	u32 members[MAX_MON_DOMAIN];
     61};
     62
     63/* struct tipc_peer: state of a peer node and its domain
     64 * @addr: tipc node identity of peer
     65 * @head_map: shows which other nodes currently consider peer 'up'
     66 * @domain: most recent domain record from peer
     67 * @hash: position in hashed lookup list
     68 * @list: position in linked list, in circular ascending order by 'addr'
     69 * @applied: number of reported domain members applied on this monitor list
     70 * @is_up: peer is up as seen from this node
     71 * @is_head: peer is assigned domain head as seen from this node
     72 * @is_local: peer is in local domain and should be continuously monitored
     73 * @down_cnt: - numbers of other peers which have reported this on lost
     74 */
     75struct tipc_peer {
     76	u32 addr;
     77	struct tipc_mon_domain *domain;
     78	struct hlist_node hash;
     79	struct list_head list;
     80	u8 applied;
     81	u8 down_cnt;
     82	bool is_up;
     83	bool is_head;
     84	bool is_local;
     85};
     86
     87struct tipc_monitor {
     88	struct hlist_head peers[NODE_HTABLE_SIZE];
     89	int peer_cnt;
     90	struct tipc_peer *self;
     91	rwlock_t lock;
     92	struct tipc_mon_domain cache;
     93	u16 list_gen;
     94	u16 dom_gen;
     95	struct net *net;
     96	struct timer_list timer;
     97	unsigned long timer_intv;
     98};
     99
    100static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
    101{
    102	return tipc_net(net)->monitors[bearer_id];
    103}
    104
    105const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
    106
    107static inline u16 mon_cpu_to_le16(u16 val)
    108{
    109	return (__force __u16)htons(val);
    110}
    111
    112static inline u32 mon_cpu_to_le32(u32 val)
    113{
    114	return (__force __u32)htonl(val);
    115}
    116
    117static inline u64 mon_cpu_to_le64(u64 val)
    118{
    119	return (__force __u64)cpu_to_be64(val);
    120}
    121
    122static inline u16 mon_le16_to_cpu(u16 val)
    123{
    124	return ntohs((__force __be16)val);
    125}
    126
    127static inline u32 mon_le32_to_cpu(u32 val)
    128{
    129	return ntohl((__force __be32)val);
    130}
    131
    132static inline u64 mon_le64_to_cpu(u64 val)
    133{
    134	return be64_to_cpu((__force __be64)val);
    135}
    136
    137/* dom_rec_len(): actual length of domain record for transport
    138 */
    139static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
    140{
    141	return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
    142}
    143
    144/* dom_size() : calculate size of own domain based on number of peers
    145 */
    146static int dom_size(int peers)
    147{
    148	int i = 0;
    149
    150	while ((i * i) < peers)
    151		i++;
    152	return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
    153}
    154
    155static void map_set(u64 *up_map, int i, unsigned int v)
    156{
    157	*up_map &= ~(1ULL << i);
    158	*up_map |= ((u64)v << i);
    159}
    160
    161static int map_get(u64 up_map, int i)
    162{
    163	return (up_map & (1 << i)) >> i;
    164}
    165
    166static struct tipc_peer *peer_prev(struct tipc_peer *peer)
    167{
    168	return list_last_entry(&peer->list, struct tipc_peer, list);
    169}
    170
    171static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
    172{
    173	return list_first_entry(&peer->list, struct tipc_peer, list);
    174}
    175
    176static struct tipc_peer *peer_head(struct tipc_peer *peer)
    177{
    178	while (!peer->is_head)
    179		peer = peer_prev(peer);
    180	return peer;
    181}
    182
    183static struct tipc_peer *get_peer(struct tipc_monitor *mon, u32 addr)
    184{
    185	struct tipc_peer *peer;
    186	unsigned int thash = tipc_hashfn(addr);
    187
    188	hlist_for_each_entry(peer, &mon->peers[thash], hash) {
    189		if (peer->addr == addr)
    190			return peer;
    191	}
    192	return NULL;
    193}
    194
    195static struct tipc_peer *get_self(struct net *net, int bearer_id)
    196{
    197	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    198
    199	return mon->self;
    200}
    201
    202static inline bool tipc_mon_is_active(struct net *net, struct tipc_monitor *mon)
    203{
    204	struct tipc_net *tn = tipc_net(net);
    205
    206	return mon->peer_cnt > tn->mon_threshold;
    207}
    208
    209/* mon_identify_lost_members() : - identify amd mark potentially lost members
    210 */
    211static void mon_identify_lost_members(struct tipc_peer *peer,
    212				      struct tipc_mon_domain *dom_bef,
    213				      int applied_bef)
    214{
    215	struct tipc_peer *member = peer;
    216	struct tipc_mon_domain *dom_aft = peer->domain;
    217	int applied_aft = peer->applied;
    218	int i;
    219
    220	for (i = 0; i < applied_bef; i++) {
    221		member = peer_nxt(member);
    222
    223		/* Do nothing if self or peer already see member as down */
    224		if (!member->is_up || !map_get(dom_bef->up_map, i))
    225			continue;
    226
    227		/* Loss of local node must be detected by active probing */
    228		if (member->is_local)
    229			continue;
    230
    231		/* Start probing if member was removed from applied domain */
    232		if (!applied_aft || (applied_aft < i)) {
    233			member->down_cnt = 1;
    234			continue;
    235		}
    236
    237		/* Member loss is confirmed if it is still in applied domain */
    238		if (!map_get(dom_aft->up_map, i))
    239			member->down_cnt++;
    240	}
    241}
    242
    243/* mon_apply_domain() : match a peer's domain record against monitor list
    244 */
    245static void mon_apply_domain(struct tipc_monitor *mon,
    246			     struct tipc_peer *peer)
    247{
    248	struct tipc_mon_domain *dom = peer->domain;
    249	struct tipc_peer *member;
    250	u32 addr;
    251	int i;
    252
    253	if (!dom || !peer->is_up)
    254		return;
    255
    256	/* Scan across domain members and match against monitor list */
    257	peer->applied = 0;
    258	member = peer_nxt(peer);
    259	for (i = 0; i < dom->member_cnt; i++) {
    260		addr = dom->members[i];
    261		if (addr != member->addr)
    262			return;
    263		peer->applied++;
    264		member = peer_nxt(member);
    265	}
    266}
    267
    268/* mon_update_local_domain() : update after peer addition/removal/up/down
    269 */
    270static void mon_update_local_domain(struct tipc_monitor *mon)
    271{
    272	struct tipc_peer *self = mon->self;
    273	struct tipc_mon_domain *cache = &mon->cache;
    274	struct tipc_mon_domain *dom = self->domain;
    275	struct tipc_peer *peer = self;
    276	u64 prev_up_map = dom->up_map;
    277	u16 member_cnt, i;
    278	bool diff;
    279
    280	/* Update local domain size based on current size of cluster */
    281	member_cnt = dom_size(mon->peer_cnt) - 1;
    282	self->applied = member_cnt;
    283
    284	/* Update native and cached outgoing local domain records */
    285	dom->len = dom_rec_len(dom, member_cnt);
    286	diff = dom->member_cnt != member_cnt;
    287	dom->member_cnt = member_cnt;
    288	for (i = 0; i < member_cnt; i++) {
    289		peer = peer_nxt(peer);
    290		diff |= dom->members[i] != peer->addr;
    291		dom->members[i] = peer->addr;
    292		map_set(&dom->up_map, i, peer->is_up);
    293		cache->members[i] = mon_cpu_to_le32(peer->addr);
    294	}
    295	diff |= dom->up_map != prev_up_map;
    296	if (!diff)
    297		return;
    298	dom->gen = ++mon->dom_gen;
    299	cache->len = mon_cpu_to_le16(dom->len);
    300	cache->gen = mon_cpu_to_le16(dom->gen);
    301	cache->member_cnt = mon_cpu_to_le16(member_cnt);
    302	cache->up_map = mon_cpu_to_le64(dom->up_map);
    303	mon_apply_domain(mon, self);
    304}
    305
    306/* mon_update_neighbors() : update preceding neighbors of added/removed peer
    307 */
    308static void mon_update_neighbors(struct tipc_monitor *mon,
    309				 struct tipc_peer *peer)
    310{
    311	int dz, i;
    312
    313	dz = dom_size(mon->peer_cnt);
    314	for (i = 0; i < dz; i++) {
    315		mon_apply_domain(mon, peer);
    316		peer = peer_prev(peer);
    317	}
    318}
    319
    320/* mon_assign_roles() : reassign peer roles after a network change
    321 * The monitor list is consistent at this stage; i.e., each peer is monitoring
    322 * a set of domain members as matched between domain record and the monitor list
    323 */
    324static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
    325{
    326	struct tipc_peer *peer = peer_nxt(head);
    327	struct tipc_peer *self = mon->self;
    328	int i = 0;
    329
    330	for (; peer != self; peer = peer_nxt(peer)) {
    331		peer->is_local = false;
    332
    333		/* Update domain member */
    334		if (i++ < head->applied) {
    335			peer->is_head = false;
    336			if (head == self)
    337				peer->is_local = true;
    338			continue;
    339		}
    340		/* Assign next domain head */
    341		if (!peer->is_up)
    342			continue;
    343		if (peer->is_head)
    344			break;
    345		head = peer;
    346		head->is_head = true;
    347		i = 0;
    348	}
    349	mon->list_gen++;
    350}
    351
    352void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
    353{
    354	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    355	struct tipc_peer *self;
    356	struct tipc_peer *peer, *prev, *head;
    357
    358	if (!mon)
    359		return;
    360
    361	self = get_self(net, bearer_id);
    362	write_lock_bh(&mon->lock);
    363	peer = get_peer(mon, addr);
    364	if (!peer)
    365		goto exit;
    366	prev = peer_prev(peer);
    367	list_del(&peer->list);
    368	hlist_del(&peer->hash);
    369	kfree(peer->domain);
    370	kfree(peer);
    371	mon->peer_cnt--;
    372	head = peer_head(prev);
    373	if (head == self)
    374		mon_update_local_domain(mon);
    375	mon_update_neighbors(mon, prev);
    376
    377	/* Revert to full-mesh monitoring if we reach threshold */
    378	if (!tipc_mon_is_active(net, mon)) {
    379		list_for_each_entry(peer, &self->list, list) {
    380			kfree(peer->domain);
    381			peer->domain = NULL;
    382			peer->applied = 0;
    383		}
    384	}
    385	mon_assign_roles(mon, head);
    386exit:
    387	write_unlock_bh(&mon->lock);
    388}
    389
    390static bool tipc_mon_add_peer(struct tipc_monitor *mon, u32 addr,
    391			      struct tipc_peer **peer)
    392{
    393	struct tipc_peer *self = mon->self;
    394	struct tipc_peer *cur, *prev, *p;
    395
    396	p = kzalloc(sizeof(*p), GFP_ATOMIC);
    397	*peer = p;
    398	if (!p)
    399		return false;
    400	p->addr = addr;
    401
    402	/* Add new peer to lookup list */
    403	INIT_LIST_HEAD(&p->list);
    404	hlist_add_head(&p->hash, &mon->peers[tipc_hashfn(addr)]);
    405
    406	/* Sort new peer into iterator list, in ascending circular order */
    407	prev = self;
    408	list_for_each_entry(cur, &self->list, list) {
    409		if ((addr > prev->addr) && (addr < cur->addr))
    410			break;
    411		if (((addr < cur->addr) || (addr > prev->addr)) &&
    412		    (prev->addr > cur->addr))
    413			break;
    414		prev = cur;
    415	}
    416	list_add_tail(&p->list, &cur->list);
    417	mon->peer_cnt++;
    418	mon_update_neighbors(mon, p);
    419	return true;
    420}
    421
    422void tipc_mon_peer_up(struct net *net, u32 addr, int bearer_id)
    423{
    424	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    425	struct tipc_peer *self = get_self(net, bearer_id);
    426	struct tipc_peer *peer, *head;
    427
    428	write_lock_bh(&mon->lock);
    429	peer = get_peer(mon, addr);
    430	if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
    431		goto exit;
    432	peer->is_up = true;
    433	head = peer_head(peer);
    434	if (head == self)
    435		mon_update_local_domain(mon);
    436	mon_assign_roles(mon, head);
    437exit:
    438	write_unlock_bh(&mon->lock);
    439}
    440
    441void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
    442{
    443	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    444	struct tipc_peer *self;
    445	struct tipc_peer *peer, *head;
    446	struct tipc_mon_domain *dom;
    447	int applied;
    448
    449	if (!mon)
    450		return;
    451
    452	self = get_self(net, bearer_id);
    453	write_lock_bh(&mon->lock);
    454	peer = get_peer(mon, addr);
    455	if (!peer) {
    456		pr_warn("Mon: unknown link %x/%u DOWN\n", addr, bearer_id);
    457		goto exit;
    458	}
    459	applied = peer->applied;
    460	peer->applied = 0;
    461	dom = peer->domain;
    462	peer->domain = NULL;
    463	if (peer->is_head)
    464		mon_identify_lost_members(peer, dom, applied);
    465	kfree(dom);
    466	peer->is_up = false;
    467	peer->is_head = false;
    468	peer->is_local = false;
    469	peer->down_cnt = 0;
    470	head = peer_head(peer);
    471	if (head == self)
    472		mon_update_local_domain(mon);
    473	mon_assign_roles(mon, head);
    474exit:
    475	write_unlock_bh(&mon->lock);
    476}
    477
    478/* tipc_mon_rcv - process monitor domain event message
    479 */
    480void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
    481		  struct tipc_mon_state *state, int bearer_id)
    482{
    483	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    484	struct tipc_mon_domain *arrv_dom = data;
    485	struct tipc_mon_domain dom_bef;
    486	struct tipc_mon_domain *dom;
    487	struct tipc_peer *peer;
    488	u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
    489	int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
    490	u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
    491	u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
    492	u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
    493	bool probing = state->probing;
    494	int i, applied_bef;
    495
    496	state->probing = false;
    497
    498	/* Sanity check received domain record */
    499	if (new_member_cnt > MAX_MON_DOMAIN)
    500		return;
    501	if (dlen < dom_rec_len(arrv_dom, 0))
    502		return;
    503	if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
    504		return;
    505	if (dlen < new_dlen || arrv_dlen != new_dlen)
    506		return;
    507
    508	/* Synch generation numbers with peer if link just came up */
    509	if (!state->synched) {
    510		state->peer_gen = new_gen - 1;
    511		state->acked_gen = acked_gen;
    512		state->synched = true;
    513	}
    514
    515	if (more(acked_gen, state->acked_gen))
    516		state->acked_gen = acked_gen;
    517
    518	/* Drop duplicate unless we are waiting for a probe response */
    519	if (!more(new_gen, state->peer_gen) && !probing)
    520		return;
    521
    522	write_lock_bh(&mon->lock);
    523	peer = get_peer(mon, addr);
    524	if (!peer || !peer->is_up)
    525		goto exit;
    526
    527	/* Peer is confirmed, stop any ongoing probing */
    528	peer->down_cnt = 0;
    529
    530	/* Task is done for duplicate record */
    531	if (!more(new_gen, state->peer_gen))
    532		goto exit;
    533
    534	state->peer_gen = new_gen;
    535
    536	/* Cache current domain record for later use */
    537	dom_bef.member_cnt = 0;
    538	dom = peer->domain;
    539	if (dom)
    540		memcpy(&dom_bef, dom, dom->len);
    541
    542	/* Transform and store received domain record */
    543	if (!dom || (dom->len < new_dlen)) {
    544		kfree(dom);
    545		dom = kmalloc(new_dlen, GFP_ATOMIC);
    546		peer->domain = dom;
    547		if (!dom)
    548			goto exit;
    549	}
    550	dom->len = new_dlen;
    551	dom->gen = new_gen;
    552	dom->member_cnt = new_member_cnt;
    553	dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
    554	for (i = 0; i < new_member_cnt; i++)
    555		dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
    556
    557	/* Update peers affected by this domain record */
    558	applied_bef = peer->applied;
    559	mon_apply_domain(mon, peer);
    560	mon_identify_lost_members(peer, &dom_bef, applied_bef);
    561	mon_assign_roles(mon, peer_head(peer));
    562exit:
    563	write_unlock_bh(&mon->lock);
    564}
    565
    566void tipc_mon_prep(struct net *net, void *data, int *dlen,
    567		   struct tipc_mon_state *state, int bearer_id)
    568{
    569	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    570	struct tipc_mon_domain *dom = data;
    571	u16 gen = mon->dom_gen;
    572	u16 len;
    573
    574	/* Send invalid record if not active */
    575	if (!tipc_mon_is_active(net, mon)) {
    576		dom->len = 0;
    577		return;
    578	}
    579
    580	/* Send only a dummy record with ack if peer has acked our last sent */
    581	if (likely(state->acked_gen == gen)) {
    582		len = dom_rec_len(dom, 0);
    583		*dlen = len;
    584		dom->len = mon_cpu_to_le16(len);
    585		dom->gen = mon_cpu_to_le16(gen);
    586		dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
    587		dom->member_cnt = 0;
    588		return;
    589	}
    590	/* Send the full record */
    591	read_lock_bh(&mon->lock);
    592	len = mon_le16_to_cpu(mon->cache.len);
    593	*dlen = len;
    594	memcpy(data, &mon->cache, len);
    595	read_unlock_bh(&mon->lock);
    596	dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
    597}
    598
    599void tipc_mon_get_state(struct net *net, u32 addr,
    600			struct tipc_mon_state *state,
    601			int bearer_id)
    602{
    603	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    604	struct tipc_peer *peer;
    605
    606	if (!tipc_mon_is_active(net, mon)) {
    607		state->probing = false;
    608		state->monitoring = true;
    609		return;
    610	}
    611
    612	/* Used cached state if table has not changed */
    613	if (!state->probing &&
    614	    (state->list_gen == mon->list_gen) &&
    615	    (state->acked_gen == mon->dom_gen))
    616		return;
    617
    618	read_lock_bh(&mon->lock);
    619	peer = get_peer(mon, addr);
    620	if (peer) {
    621		state->probing = state->acked_gen != mon->dom_gen;
    622		state->probing |= peer->down_cnt;
    623		state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
    624		state->monitoring = peer->is_local;
    625		state->monitoring |= peer->is_head;
    626		state->list_gen = mon->list_gen;
    627	}
    628	read_unlock_bh(&mon->lock);
    629}
    630
    631static void mon_timeout(struct timer_list *t)
    632{
    633	struct tipc_monitor *mon = from_timer(mon, t, timer);
    634	struct tipc_peer *self;
    635	int best_member_cnt = dom_size(mon->peer_cnt) - 1;
    636
    637	write_lock_bh(&mon->lock);
    638	self = mon->self;
    639	if (self && (best_member_cnt != self->applied)) {
    640		mon_update_local_domain(mon);
    641		mon_assign_roles(mon, self);
    642	}
    643	write_unlock_bh(&mon->lock);
    644	mod_timer(&mon->timer, jiffies + mon->timer_intv);
    645}
    646
    647int tipc_mon_create(struct net *net, int bearer_id)
    648{
    649	struct tipc_net *tn = tipc_net(net);
    650	struct tipc_monitor *mon;
    651	struct tipc_peer *self;
    652	struct tipc_mon_domain *dom;
    653
    654	if (tn->monitors[bearer_id])
    655		return 0;
    656
    657	mon = kzalloc(sizeof(*mon), GFP_ATOMIC);
    658	self = kzalloc(sizeof(*self), GFP_ATOMIC);
    659	dom = kzalloc(sizeof(*dom), GFP_ATOMIC);
    660	if (!mon || !self || !dom) {
    661		kfree(mon);
    662		kfree(self);
    663		kfree(dom);
    664		return -ENOMEM;
    665	}
    666	tn->monitors[bearer_id] = mon;
    667	rwlock_init(&mon->lock);
    668	mon->net = net;
    669	mon->peer_cnt = 1;
    670	mon->self = self;
    671	self->domain = dom;
    672	self->addr = tipc_own_addr(net);
    673	self->is_up = true;
    674	self->is_head = true;
    675	INIT_LIST_HEAD(&self->list);
    676	timer_setup(&mon->timer, mon_timeout, 0);
    677	mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
    678	mod_timer(&mon->timer, jiffies + mon->timer_intv);
    679	return 0;
    680}
    681
    682void tipc_mon_delete(struct net *net, int bearer_id)
    683{
    684	struct tipc_net *tn = tipc_net(net);
    685	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    686	struct tipc_peer *self;
    687	struct tipc_peer *peer, *tmp;
    688
    689	if (!mon)
    690		return;
    691
    692	self = get_self(net, bearer_id);
    693	write_lock_bh(&mon->lock);
    694	tn->monitors[bearer_id] = NULL;
    695	list_for_each_entry_safe(peer, tmp, &self->list, list) {
    696		list_del(&peer->list);
    697		hlist_del(&peer->hash);
    698		kfree(peer->domain);
    699		kfree(peer);
    700	}
    701	mon->self = NULL;
    702	write_unlock_bh(&mon->lock);
    703	del_timer_sync(&mon->timer);
    704	kfree(self->domain);
    705	kfree(self);
    706	kfree(mon);
    707}
    708
    709void tipc_mon_reinit_self(struct net *net)
    710{
    711	struct tipc_monitor *mon;
    712	int bearer_id;
    713
    714	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
    715		mon = tipc_monitor(net, bearer_id);
    716		if (!mon)
    717			continue;
    718		write_lock_bh(&mon->lock);
    719		mon->self->addr = tipc_own_addr(net);
    720		write_unlock_bh(&mon->lock);
    721	}
    722}
    723
    724int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
    725{
    726	struct tipc_net *tn = tipc_net(net);
    727
    728	if (cluster_size > TIPC_CLUSTER_SIZE)
    729		return -EINVAL;
    730
    731	tn->mon_threshold = cluster_size;
    732
    733	return 0;
    734}
    735
    736int tipc_nl_monitor_get_threshold(struct net *net)
    737{
    738	struct tipc_net *tn = tipc_net(net);
    739
    740	return tn->mon_threshold;
    741}
    742
    743static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
    744				      struct tipc_nl_msg *msg)
    745{
    746	struct tipc_mon_domain *dom = peer->domain;
    747	struct nlattr *attrs;
    748	void *hdr;
    749
    750	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
    751			  NLM_F_MULTI, TIPC_NL_MON_PEER_GET);
    752	if (!hdr)
    753		return -EMSGSIZE;
    754
    755	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
    756	if (!attrs)
    757		goto msg_full;
    758
    759	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
    760		goto attr_msg_full;
    761	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
    762		goto attr_msg_full;
    763
    764	if (peer->is_up)
    765		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_UP))
    766			goto attr_msg_full;
    767	if (peer->is_local)
    768		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_LOCAL))
    769			goto attr_msg_full;
    770	if (peer->is_head)
    771		if (nla_put_flag(msg->skb, TIPC_NLA_MON_PEER_HEAD))
    772			goto attr_msg_full;
    773
    774	if (dom) {
    775		if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_DOMGEN, dom->gen))
    776			goto attr_msg_full;
    777		if (nla_put_u64_64bit(msg->skb, TIPC_NLA_MON_PEER_UPMAP,
    778				      dom->up_map, TIPC_NLA_MON_PEER_PAD))
    779			goto attr_msg_full;
    780		if (nla_put(msg->skb, TIPC_NLA_MON_PEER_MEMBERS,
    781			    dom->member_cnt * sizeof(u32), &dom->members))
    782			goto attr_msg_full;
    783	}
    784
    785	nla_nest_end(msg->skb, attrs);
    786	genlmsg_end(msg->skb, hdr);
    787	return 0;
    788
    789attr_msg_full:
    790	nla_nest_cancel(msg->skb, attrs);
    791msg_full:
    792	genlmsg_cancel(msg->skb, hdr);
    793
    794	return -EMSGSIZE;
    795}
    796
    797int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
    798			     u32 bearer_id, u32 *prev_node)
    799{
    800	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    801	struct tipc_peer *peer;
    802
    803	if (!mon)
    804		return -EINVAL;
    805
    806	read_lock_bh(&mon->lock);
    807	peer = mon->self;
    808	do {
    809		if (*prev_node) {
    810			if (peer->addr == *prev_node)
    811				*prev_node = 0;
    812			else
    813				continue;
    814		}
    815		if (__tipc_nl_add_monitor_peer(peer, msg)) {
    816			*prev_node = peer->addr;
    817			read_unlock_bh(&mon->lock);
    818			return -EMSGSIZE;
    819		}
    820	} while ((peer = peer_nxt(peer)) != mon->self);
    821	read_unlock_bh(&mon->lock);
    822
    823	return 0;
    824}
    825
    826int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
    827			  u32 bearer_id)
    828{
    829	struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
    830	char bearer_name[TIPC_MAX_BEARER_NAME];
    831	struct nlattr *attrs;
    832	void *hdr;
    833	int ret;
    834
    835	ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
    836	if (ret || !mon)
    837		return 0;
    838
    839	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
    840			  NLM_F_MULTI, TIPC_NL_MON_GET);
    841	if (!hdr)
    842		return -EMSGSIZE;
    843
    844	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
    845	if (!attrs)
    846		goto msg_full;
    847
    848	read_lock_bh(&mon->lock);
    849	if (nla_put_u32(msg->skb, TIPC_NLA_MON_REF, bearer_id))
    850		goto attr_msg_full;
    851	if (tipc_mon_is_active(net, mon))
    852		if (nla_put_flag(msg->skb, TIPC_NLA_MON_ACTIVE))
    853			goto attr_msg_full;
    854	if (nla_put_string(msg->skb, TIPC_NLA_MON_BEARER_NAME, bearer_name))
    855		goto attr_msg_full;
    856	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEERCNT, mon->peer_cnt))
    857		goto attr_msg_full;
    858	if (nla_put_u32(msg->skb, TIPC_NLA_MON_LISTGEN, mon->list_gen))
    859		goto attr_msg_full;
    860
    861	read_unlock_bh(&mon->lock);
    862	nla_nest_end(msg->skb, attrs);
    863	genlmsg_end(msg->skb, hdr);
    864
    865	return 0;
    866
    867attr_msg_full:
    868	read_unlock_bh(&mon->lock);
    869	nla_nest_cancel(msg->skb, attrs);
    870msg_full:
    871	genlmsg_cancel(msg->skb, hdr);
    872
    873	return -EMSGSIZE;
    874}