cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hsr_framereg.c (18473B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright 2011-2014 Autronica Fire and Security AS
      3 *
      4 * Author(s):
      5 *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
      6 *
      7 * The HSR spec says never to forward the same frame twice on the same
      8 * interface. A frame is identified by its source MAC address and its HSR
      9 * sequence number. This code keeps track of senders and their sequence numbers
     10 * to allow filtering of duplicate frames, and to detect HSR ring errors.
     11 * Same code handles filtering of duplicates for PRP as well.
     12 */
     13
     14#include <linux/if_ether.h>
     15#include <linux/etherdevice.h>
     16#include <linux/slab.h>
     17#include <linux/rculist.h>
     18#include <linux/jhash.h>
     19#include "hsr_main.h"
     20#include "hsr_framereg.h"
     21#include "hsr_netlink.h"
     22
     23#ifdef CONFIG_LOCKDEP
     24int lockdep_hsr_is_held(spinlock_t *lock)
     25{
     26	return lockdep_is_held(lock);
     27}
     28#endif
     29
     30u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr)
     31{
     32	u32 hash = jhash(addr, ETH_ALEN, hsr->hash_seed);
     33
     34	return reciprocal_scale(hash, hsr->hash_buckets);
     35}
     36
     37struct hsr_node *hsr_node_get_first(struct hlist_head *head, spinlock_t *lock)
     38{
     39	struct hlist_node *first;
     40
     41	first = rcu_dereference_bh_check(hlist_first_rcu(head),
     42					 lockdep_hsr_is_held(lock));
     43	if (first)
     44		return hlist_entry(first, struct hsr_node, mac_list);
     45
     46	return NULL;
     47}
     48
     49/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
     50 * false otherwise.
     51 */
     52static bool seq_nr_after(u16 a, u16 b)
     53{
     54	/* Remove inconsistency where
     55	 * seq_nr_after(a, b) == seq_nr_before(a, b)
     56	 */
     57	if ((int)b - a == 32768)
     58		return false;
     59
     60	return (((s16)(b - a)) < 0);
     61}
     62
     63#define seq_nr_before(a, b)		seq_nr_after((b), (a))
     64#define seq_nr_before_or_eq(a, b)	(!seq_nr_after((a), (b)))
     65
     66bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
     67{
     68	struct hsr_node *node;
     69
     70	node = hsr_node_get_first(&hsr->self_node_db, &hsr->list_lock);
     71	if (!node) {
     72		WARN_ONCE(1, "HSR: No self node\n");
     73		return false;
     74	}
     75
     76	if (ether_addr_equal(addr, node->macaddress_A))
     77		return true;
     78	if (ether_addr_equal(addr, node->macaddress_B))
     79		return true;
     80
     81	return false;
     82}
     83
     84/* Search for mac entry. Caller must hold rcu read lock.
     85 */
     86static struct hsr_node *find_node_by_addr_A(struct hlist_head *node_db,
     87					    const unsigned char addr[ETH_ALEN])
     88{
     89	struct hsr_node *node;
     90
     91	hlist_for_each_entry_rcu(node, node_db, mac_list) {
     92		if (ether_addr_equal(node->macaddress_A, addr))
     93			return node;
     94	}
     95
     96	return NULL;
     97}
     98
     99/* Helper for device init; the self_node_db is used in hsr_rcv() to recognize
    100 * frames from self that's been looped over the HSR ring.
    101 */
    102int hsr_create_self_node(struct hsr_priv *hsr,
    103			 const unsigned char addr_a[ETH_ALEN],
    104			 const unsigned char addr_b[ETH_ALEN])
    105{
    106	struct hlist_head *self_node_db = &hsr->self_node_db;
    107	struct hsr_node *node, *oldnode;
    108
    109	node = kmalloc(sizeof(*node), GFP_KERNEL);
    110	if (!node)
    111		return -ENOMEM;
    112
    113	ether_addr_copy(node->macaddress_A, addr_a);
    114	ether_addr_copy(node->macaddress_B, addr_b);
    115
    116	spin_lock_bh(&hsr->list_lock);
    117	oldnode = hsr_node_get_first(self_node_db, &hsr->list_lock);
    118	if (oldnode) {
    119		hlist_replace_rcu(&oldnode->mac_list, &node->mac_list);
    120		spin_unlock_bh(&hsr->list_lock);
    121		kfree_rcu(oldnode, rcu_head);
    122	} else {
    123		hlist_add_tail_rcu(&node->mac_list, self_node_db);
    124		spin_unlock_bh(&hsr->list_lock);
    125	}
    126
    127	return 0;
    128}
    129
    130void hsr_del_self_node(struct hsr_priv *hsr)
    131{
    132	struct hlist_head *self_node_db = &hsr->self_node_db;
    133	struct hsr_node *node;
    134
    135	spin_lock_bh(&hsr->list_lock);
    136	node = hsr_node_get_first(self_node_db, &hsr->list_lock);
    137	if (node) {
    138		hlist_del_rcu(&node->mac_list);
    139		kfree_rcu(node, rcu_head);
    140	}
    141	spin_unlock_bh(&hsr->list_lock);
    142}
    143
    144void hsr_del_nodes(struct hlist_head *node_db)
    145{
    146	struct hsr_node *node;
    147	struct hlist_node *tmp;
    148
    149	hlist_for_each_entry_safe(node, tmp, node_db, mac_list)
    150		kfree_rcu(node, rcu_head);
    151}
    152
    153void prp_handle_san_frame(bool san, enum hsr_port_type port,
    154			  struct hsr_node *node)
    155{
    156	/* Mark if the SAN node is over LAN_A or LAN_B */
    157	if (port == HSR_PT_SLAVE_A) {
    158		node->san_a = true;
    159		return;
    160	}
    161
    162	if (port == HSR_PT_SLAVE_B)
    163		node->san_b = true;
    164}
    165
    166/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
    167 * seq_out is used to initialize filtering of outgoing duplicate frames
    168 * originating from the newly added node.
    169 */
    170static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
    171				     struct hlist_head *node_db,
    172				     unsigned char addr[],
    173				     u16 seq_out, bool san,
    174				     enum hsr_port_type rx_port)
    175{
    176	struct hsr_node *new_node, *node;
    177	unsigned long now;
    178	int i;
    179
    180	new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
    181	if (!new_node)
    182		return NULL;
    183
    184	ether_addr_copy(new_node->macaddress_A, addr);
    185
    186	/* We are only interested in time diffs here, so use current jiffies
    187	 * as initialization. (0 could trigger an spurious ring error warning).
    188	 */
    189	now = jiffies;
    190	for (i = 0; i < HSR_PT_PORTS; i++) {
    191		new_node->time_in[i] = now;
    192		new_node->time_out[i] = now;
    193	}
    194	for (i = 0; i < HSR_PT_PORTS; i++)
    195		new_node->seq_out[i] = seq_out;
    196
    197	if (san && hsr->proto_ops->handle_san_frame)
    198		hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
    199
    200	spin_lock_bh(&hsr->list_lock);
    201	hlist_for_each_entry_rcu(node, node_db, mac_list,
    202				 lockdep_hsr_is_held(&hsr->list_lock)) {
    203		if (ether_addr_equal(node->macaddress_A, addr))
    204			goto out;
    205		if (ether_addr_equal(node->macaddress_B, addr))
    206			goto out;
    207	}
    208	hlist_add_tail_rcu(&new_node->mac_list, node_db);
    209	spin_unlock_bh(&hsr->list_lock);
    210	return new_node;
    211out:
    212	spin_unlock_bh(&hsr->list_lock);
    213	kfree(new_node);
    214	return node;
    215}
    216
    217void prp_update_san_info(struct hsr_node *node, bool is_sup)
    218{
    219	if (!is_sup)
    220		return;
    221
    222	node->san_a = false;
    223	node->san_b = false;
    224}
    225
    226/* Get the hsr_node from which 'skb' was sent.
    227 */
    228struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db,
    229			      struct sk_buff *skb, bool is_sup,
    230			      enum hsr_port_type rx_port)
    231{
    232	struct hsr_priv *hsr = port->hsr;
    233	struct hsr_node *node;
    234	struct ethhdr *ethhdr;
    235	struct prp_rct *rct;
    236	bool san = false;
    237	u16 seq_out;
    238
    239	if (!skb_mac_header_was_set(skb))
    240		return NULL;
    241
    242	ethhdr = (struct ethhdr *)skb_mac_header(skb);
    243
    244	hlist_for_each_entry_rcu(node, node_db, mac_list) {
    245		if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
    246			if (hsr->proto_ops->update_san_info)
    247				hsr->proto_ops->update_san_info(node, is_sup);
    248			return node;
    249		}
    250		if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
    251			if (hsr->proto_ops->update_san_info)
    252				hsr->proto_ops->update_san_info(node, is_sup);
    253			return node;
    254		}
    255	}
    256
    257	/* Everyone may create a node entry, connected node to a HSR/PRP
    258	 * device.
    259	 */
    260	if (ethhdr->h_proto == htons(ETH_P_PRP) ||
    261	    ethhdr->h_proto == htons(ETH_P_HSR)) {
    262		/* Use the existing sequence_nr from the tag as starting point
    263		 * for filtering duplicate frames.
    264		 */
    265		seq_out = hsr_get_skb_sequence_nr(skb) - 1;
    266	} else {
    267		rct = skb_get_PRP_rct(skb);
    268		if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
    269			seq_out = prp_get_skb_sequence_nr(rct);
    270		} else {
    271			if (rx_port != HSR_PT_MASTER)
    272				san = true;
    273			seq_out = HSR_SEQNR_START;
    274		}
    275	}
    276
    277	return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
    278			    san, rx_port);
    279}
    280
    281/* Use the Supervision frame's info about an eventual macaddress_B for merging
    282 * nodes that has previously had their macaddress_B registered as a separate
    283 * node.
    284 */
    285void hsr_handle_sup_frame(struct hsr_frame_info *frame)
    286{
    287	struct hsr_node *node_curr = frame->node_src;
    288	struct hsr_port *port_rcv = frame->port_rcv;
    289	struct hsr_priv *hsr = port_rcv->hsr;
    290	struct hsr_sup_payload *hsr_sp;
    291	struct hsr_sup_tlv *hsr_sup_tlv;
    292	struct hsr_node *node_real;
    293	struct sk_buff *skb = NULL;
    294	struct hlist_head *node_db;
    295	struct ethhdr *ethhdr;
    296	int i;
    297	unsigned int pull_size = 0;
    298	unsigned int total_pull_size = 0;
    299	u32 hash;
    300
    301	/* Here either frame->skb_hsr or frame->skb_prp should be
    302	 * valid as supervision frame always will have protocol
    303	 * header info.
    304	 */
    305	if (frame->skb_hsr)
    306		skb = frame->skb_hsr;
    307	else if (frame->skb_prp)
    308		skb = frame->skb_prp;
    309	else if (frame->skb_std)
    310		skb = frame->skb_std;
    311	if (!skb)
    312		return;
    313
    314	/* Leave the ethernet header. */
    315	pull_size = sizeof(struct ethhdr);
    316	skb_pull(skb, pull_size);
    317	total_pull_size += pull_size;
    318
    319	ethhdr = (struct ethhdr *)skb_mac_header(skb);
    320
    321	/* And leave the HSR tag. */
    322	if (ethhdr->h_proto == htons(ETH_P_HSR)) {
    323		pull_size = sizeof(struct ethhdr);
    324		skb_pull(skb, pull_size);
    325		total_pull_size += pull_size;
    326	}
    327
    328	/* And leave the HSR sup tag. */
    329	pull_size = sizeof(struct hsr_tag);
    330	skb_pull(skb, pull_size);
    331	total_pull_size += pull_size;
    332
    333	/* get HSR sup payload */
    334	hsr_sp = (struct hsr_sup_payload *)skb->data;
    335
    336	/* Merge node_curr (registered on macaddress_B) into node_real */
    337	node_db = port_rcv->hsr->node_db;
    338	hash = hsr_mac_hash(hsr, hsr_sp->macaddress_A);
    339	node_real = find_node_by_addr_A(&node_db[hash], hsr_sp->macaddress_A);
    340	if (!node_real)
    341		/* No frame received from AddrA of this node yet */
    342		node_real = hsr_add_node(hsr, &node_db[hash],
    343					 hsr_sp->macaddress_A,
    344					 HSR_SEQNR_START - 1, true,
    345					 port_rcv->type);
    346	if (!node_real)
    347		goto done; /* No mem */
    348	if (node_real == node_curr)
    349		/* Node has already been merged */
    350		goto done;
    351
    352	/* Leave the first HSR sup payload. */
    353	pull_size = sizeof(struct hsr_sup_payload);
    354	skb_pull(skb, pull_size);
    355	total_pull_size += pull_size;
    356
    357	/* Get second supervision tlv */
    358	hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
    359	/* And check if it is a redbox mac TLV */
    360	if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
    361		/* We could stop here after pushing hsr_sup_payload,
    362		 * or proceed and allow macaddress_B and for redboxes.
    363		 */
    364		/* Sanity check length */
    365		if (hsr_sup_tlv->HSR_TLV_length != 6)
    366			goto done;
    367
    368		/* Leave the second HSR sup tlv. */
    369		pull_size = sizeof(struct hsr_sup_tlv);
    370		skb_pull(skb, pull_size);
    371		total_pull_size += pull_size;
    372
    373		/* Get redbox mac address. */
    374		hsr_sp = (struct hsr_sup_payload *)skb->data;
    375
    376		/* Check if redbox mac and node mac are equal. */
    377		if (!ether_addr_equal(node_real->macaddress_A,
    378				      hsr_sp->macaddress_A)) {
    379			/* This is a redbox supervision frame for a VDAN! */
    380			goto done;
    381		}
    382	}
    383
    384	ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
    385	for (i = 0; i < HSR_PT_PORTS; i++) {
    386		if (!node_curr->time_in_stale[i] &&
    387		    time_after(node_curr->time_in[i], node_real->time_in[i])) {
    388			node_real->time_in[i] = node_curr->time_in[i];
    389			node_real->time_in_stale[i] =
    390						node_curr->time_in_stale[i];
    391		}
    392		if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
    393			node_real->seq_out[i] = node_curr->seq_out[i];
    394	}
    395	node_real->addr_B_port = port_rcv->type;
    396
    397	spin_lock_bh(&hsr->list_lock);
    398	hlist_del_rcu(&node_curr->mac_list);
    399	spin_unlock_bh(&hsr->list_lock);
    400	kfree_rcu(node_curr, rcu_head);
    401
    402done:
    403	/* Push back here */
    404	skb_push(skb, total_pull_size);
    405}
    406
    407/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
    408 *
    409 * If the frame was sent by a node's B interface, replace the source
    410 * address with that node's "official" address (macaddress_A) so that upper
    411 * layers recognize where it came from.
    412 */
    413void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
    414{
    415	if (!skb_mac_header_was_set(skb)) {
    416		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
    417		return;
    418	}
    419
    420	memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
    421}
    422
    423/* 'skb' is a frame meant for another host.
    424 * 'port' is the outgoing interface
    425 *
    426 * Substitute the target (dest) MAC address if necessary, so the it matches the
    427 * recipient interface MAC address, regardless of whether that is the
    428 * recipient's A or B interface.
    429 * This is needed to keep the packets flowing through switches that learn on
    430 * which "side" the different interfaces are.
    431 */
    432void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
    433			 struct hsr_port *port)
    434{
    435	struct hsr_node *node_dst;
    436	u32 hash;
    437
    438	if (!skb_mac_header_was_set(skb)) {
    439		WARN_ONCE(1, "%s: Mac header not set\n", __func__);
    440		return;
    441	}
    442
    443	if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
    444		return;
    445
    446	hash = hsr_mac_hash(port->hsr, eth_hdr(skb)->h_dest);
    447	node_dst = find_node_by_addr_A(&port->hsr->node_db[hash],
    448				       eth_hdr(skb)->h_dest);
    449	if (!node_dst) {
    450		if (net_ratelimit())
    451			netdev_err(skb->dev, "%s: Unknown node\n", __func__);
    452		return;
    453	}
    454	if (port->type != node_dst->addr_B_port)
    455		return;
    456
    457	if (is_valid_ether_addr(node_dst->macaddress_B))
    458		ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
    459}
    460
    461void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
    462			   u16 sequence_nr)
    463{
    464	/* Don't register incoming frames without a valid sequence number. This
    465	 * ensures entries of restarted nodes gets pruned so that they can
    466	 * re-register and resume communications.
    467	 */
    468	if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
    469	    seq_nr_before(sequence_nr, node->seq_out[port->type]))
    470		return;
    471
    472	node->time_in[port->type] = jiffies;
    473	node->time_in_stale[port->type] = false;
    474}
    475
    476/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
    477 * ethhdr->h_source address and skb->mac_header set.
    478 *
    479 * Return:
    480 *	 1 if frame can be shown to have been sent recently on this interface,
    481 *	 0 otherwise, or
    482 *	 negative error code on error
    483 */
    484int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
    485			   u16 sequence_nr)
    486{
    487	if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
    488	    time_is_after_jiffies(node->time_out[port->type] +
    489	    msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)))
    490		return 1;
    491
    492	node->time_out[port->type] = jiffies;
    493	node->seq_out[port->type] = sequence_nr;
    494	return 0;
    495}
    496
    497static struct hsr_port *get_late_port(struct hsr_priv *hsr,
    498				      struct hsr_node *node)
    499{
    500	if (node->time_in_stale[HSR_PT_SLAVE_A])
    501		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
    502	if (node->time_in_stale[HSR_PT_SLAVE_B])
    503		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
    504
    505	if (time_after(node->time_in[HSR_PT_SLAVE_B],
    506		       node->time_in[HSR_PT_SLAVE_A] +
    507					msecs_to_jiffies(MAX_SLAVE_DIFF)))
    508		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
    509	if (time_after(node->time_in[HSR_PT_SLAVE_A],
    510		       node->time_in[HSR_PT_SLAVE_B] +
    511					msecs_to_jiffies(MAX_SLAVE_DIFF)))
    512		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
    513
    514	return NULL;
    515}
    516
    517/* Remove stale sequence_nr records. Called by timer every
    518 * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
    519 */
    520void hsr_prune_nodes(struct timer_list *t)
    521{
    522	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
    523	struct hlist_node *tmp;
    524	struct hsr_node *node;
    525	struct hsr_port *port;
    526	unsigned long timestamp;
    527	unsigned long time_a, time_b;
    528	int i;
    529
    530	spin_lock_bh(&hsr->list_lock);
    531
    532	for (i = 0; i < hsr->hash_buckets; i++) {
    533		hlist_for_each_entry_safe(node, tmp, &hsr->node_db[i],
    534					  mac_list) {
    535			/* Don't prune own node.
    536			 * Neither time_in[HSR_PT_SLAVE_A]
    537			 * nor time_in[HSR_PT_SLAVE_B], will ever be updated
    538			 * for the master port. Thus the master node will be
    539			 * repeatedly pruned leading to packet loss.
    540			 */
    541			if (hsr_addr_is_self(hsr, node->macaddress_A))
    542				continue;
    543
    544			/* Shorthand */
    545			time_a = node->time_in[HSR_PT_SLAVE_A];
    546			time_b = node->time_in[HSR_PT_SLAVE_B];
    547
    548			/* Check for timestamps old enough to
    549			 * risk wrap-around
    550			 */
    551			if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
    552				node->time_in_stale[HSR_PT_SLAVE_A] = true;
    553			if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
    554				node->time_in_stale[HSR_PT_SLAVE_B] = true;
    555
    556			/* Get age of newest frame from node.
    557			 * At least one time_in is OK here; nodes get pruned
    558			 * long before both time_ins can get stale
    559			 */
    560			timestamp = time_a;
    561			if (node->time_in_stale[HSR_PT_SLAVE_A] ||
    562			    (!node->time_in_stale[HSR_PT_SLAVE_B] &&
    563			     time_after(time_b, time_a)))
    564				timestamp = time_b;
    565
    566			/* Warn of ring error only as long as we get
    567			 * frames at all
    568			 */
    569			if (time_is_after_jiffies(timestamp +
    570						  msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
    571				rcu_read_lock();
    572				port = get_late_port(hsr, node);
    573				if (port)
    574					hsr_nl_ringerror(hsr,
    575							 node->macaddress_A,
    576							 port);
    577				rcu_read_unlock();
    578			}
    579
    580			/* Prune old entries */
    581			if (time_is_before_jiffies(timestamp +
    582						   msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
    583				hsr_nl_nodedown(hsr, node->macaddress_A);
    584				hlist_del_rcu(&node->mac_list);
    585				/* Note that we need to free this
    586				 * entry later:
    587				 */
    588				kfree_rcu(node, rcu_head);
    589			}
    590		}
    591	}
    592	spin_unlock_bh(&hsr->list_lock);
    593
    594	/* Restart timer */
    595	mod_timer(&hsr->prune_timer,
    596		  jiffies + msecs_to_jiffies(PRUNE_PERIOD));
    597}
    598
    599void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
    600			unsigned char addr[ETH_ALEN])
    601{
    602	struct hsr_node *node;
    603	u32 hash;
    604
    605	hash = hsr_mac_hash(hsr, addr);
    606
    607	if (!_pos) {
    608		node = hsr_node_get_first(&hsr->node_db[hash],
    609					  &hsr->list_lock);
    610		if (node)
    611			ether_addr_copy(addr, node->macaddress_A);
    612		return node;
    613	}
    614
    615	node = _pos;
    616	hlist_for_each_entry_continue_rcu(node, mac_list) {
    617		ether_addr_copy(addr, node->macaddress_A);
    618		return node;
    619	}
    620
    621	return NULL;
    622}
    623
    624int hsr_get_node_data(struct hsr_priv *hsr,
    625		      const unsigned char *addr,
    626		      unsigned char addr_b[ETH_ALEN],
    627		      unsigned int *addr_b_ifindex,
    628		      int *if1_age,
    629		      u16 *if1_seq,
    630		      int *if2_age,
    631		      u16 *if2_seq)
    632{
    633	struct hsr_node *node;
    634	struct hsr_port *port;
    635	unsigned long tdiff;
    636	u32 hash;
    637
    638	hash = hsr_mac_hash(hsr, addr);
    639
    640	node = find_node_by_addr_A(&hsr->node_db[hash], addr);
    641	if (!node)
    642		return -ENOENT;
    643
    644	ether_addr_copy(addr_b, node->macaddress_B);
    645
    646	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
    647	if (node->time_in_stale[HSR_PT_SLAVE_A])
    648		*if1_age = INT_MAX;
    649#if HZ <= MSEC_PER_SEC
    650	else if (tdiff > msecs_to_jiffies(INT_MAX))
    651		*if1_age = INT_MAX;
    652#endif
    653	else
    654		*if1_age = jiffies_to_msecs(tdiff);
    655
    656	tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
    657	if (node->time_in_stale[HSR_PT_SLAVE_B])
    658		*if2_age = INT_MAX;
    659#if HZ <= MSEC_PER_SEC
    660	else if (tdiff > msecs_to_jiffies(INT_MAX))
    661		*if2_age = INT_MAX;
    662#endif
    663	else
    664		*if2_age = jiffies_to_msecs(tdiff);
    665
    666	/* Present sequence numbers as if they were incoming on interface */
    667	*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
    668	*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
    669
    670	if (node->addr_B_port != HSR_PT_NONE) {
    671		port = hsr_port_get_hsr(hsr, node->addr_B_port);
    672		*addr_b_ifindex = port->dev->ifindex;
    673	} else {
    674		*addr_b_ifindex = -1;
    675	}
    676
    677	return 0;
    678}