cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

neighbour.h (17210B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _NET_NEIGHBOUR_H
      3#define _NET_NEIGHBOUR_H
      4
      5#include <linux/neighbour.h>
      6
      7/*
      8 *	Generic neighbour manipulation
      9 *
     10 *	Authors:
     11 *	Pedro Roque		<roque@di.fc.ul.pt>
     12 *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
     13 *
     14 * 	Changes:
     15 *
     16 *	Harald Welte:		<laforge@gnumonks.org>
     17 *		- Add neighbour cache statistics like rtstat
     18 */
     19
     20#include <linux/atomic.h>
     21#include <linux/refcount.h>
     22#include <linux/netdevice.h>
     23#include <linux/skbuff.h>
     24#include <linux/rcupdate.h>
     25#include <linux/seq_file.h>
     26#include <linux/bitmap.h>
     27
     28#include <linux/err.h>
     29#include <linux/sysctl.h>
     30#include <linux/workqueue.h>
     31#include <net/rtnetlink.h>
     32
     33/*
     34 * NUD stands for "neighbor unreachability detection"
     35 */
     36
     37#define NUD_IN_TIMER	(NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
     38#define NUD_VALID	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
     39#define NUD_CONNECTED	(NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
     40
     41struct neighbour;
     42
     43enum {
     44	NEIGH_VAR_MCAST_PROBES,
     45	NEIGH_VAR_UCAST_PROBES,
     46	NEIGH_VAR_APP_PROBES,
     47	NEIGH_VAR_MCAST_REPROBES,
     48	NEIGH_VAR_RETRANS_TIME,
     49	NEIGH_VAR_BASE_REACHABLE_TIME,
     50	NEIGH_VAR_DELAY_PROBE_TIME,
     51	NEIGH_VAR_GC_STALETIME,
     52	NEIGH_VAR_QUEUE_LEN_BYTES,
     53	NEIGH_VAR_PROXY_QLEN,
     54	NEIGH_VAR_ANYCAST_DELAY,
     55	NEIGH_VAR_PROXY_DELAY,
     56	NEIGH_VAR_LOCKTIME,
     57#define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
     58	/* Following are used as a second way to access one of the above */
     59	NEIGH_VAR_QUEUE_LEN, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
     60	NEIGH_VAR_RETRANS_TIME_MS, /* same data as NEIGH_VAR_RETRANS_TIME */
     61	NEIGH_VAR_BASE_REACHABLE_TIME_MS, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
     62	/* Following are used by "default" only */
     63	NEIGH_VAR_GC_INTERVAL,
     64	NEIGH_VAR_GC_THRESH1,
     65	NEIGH_VAR_GC_THRESH2,
     66	NEIGH_VAR_GC_THRESH3,
     67	NEIGH_VAR_MAX
     68};
     69
     70struct neigh_parms {
     71	possible_net_t net;
     72	struct net_device *dev;
     73	netdevice_tracker dev_tracker;
     74	struct list_head list;
     75	int	(*neigh_setup)(struct neighbour *);
     76	struct neigh_table *tbl;
     77
     78	void	*sysctl_table;
     79
     80	int dead;
     81	refcount_t refcnt;
     82	struct rcu_head rcu_head;
     83
     84	int	reachable_time;
     85	int	data[NEIGH_VAR_DATA_MAX];
     86	DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX);
     87};
     88
     89static inline void neigh_var_set(struct neigh_parms *p, int index, int val)
     90{
     91	set_bit(index, p->data_state);
     92	p->data[index] = val;
     93}
     94
     95#define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
     96
     97/* In ndo_neigh_setup, NEIGH_VAR_INIT should be used.
     98 * In other cases, NEIGH_VAR_SET should be used.
     99 */
    100#define NEIGH_VAR_INIT(p, attr, val) (NEIGH_VAR(p, attr) = val)
    101#define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
    102
    103static inline void neigh_parms_data_state_setall(struct neigh_parms *p)
    104{
    105	bitmap_fill(p->data_state, NEIGH_VAR_DATA_MAX);
    106}
    107
    108static inline void neigh_parms_data_state_cleanall(struct neigh_parms *p)
    109{
    110	bitmap_zero(p->data_state, NEIGH_VAR_DATA_MAX);
    111}
    112
    113struct neigh_statistics {
    114	unsigned long allocs;		/* number of allocated neighs */
    115	unsigned long destroys;		/* number of destroyed neighs */
    116	unsigned long hash_grows;	/* number of hash resizes */
    117
    118	unsigned long res_failed;	/* number of failed resolutions */
    119
    120	unsigned long lookups;		/* number of lookups */
    121	unsigned long hits;		/* number of hits (among lookups) */
    122
    123	unsigned long rcv_probes_mcast;	/* number of received mcast ipv6 */
    124	unsigned long rcv_probes_ucast; /* number of received ucast ipv6 */
    125
    126	unsigned long periodic_gc_runs;	/* number of periodic GC runs */
    127	unsigned long forced_gc_runs;	/* number of forced GC runs */
    128
    129	unsigned long unres_discards;	/* number of unresolved drops */
    130	unsigned long table_fulls;      /* times even gc couldn't help */
    131};
    132
    133#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
    134
    135struct neighbour {
    136	struct neighbour __rcu	*next;
    137	struct neigh_table	*tbl;
    138	struct neigh_parms	*parms;
    139	unsigned long		confirmed;
    140	unsigned long		updated;
    141	rwlock_t		lock;
    142	refcount_t		refcnt;
    143	unsigned int		arp_queue_len_bytes;
    144	struct sk_buff_head	arp_queue;
    145	struct timer_list	timer;
    146	unsigned long		used;
    147	atomic_t		probes;
    148	u8			nud_state;
    149	u8			type;
    150	u8			dead;
    151	u8			protocol;
    152	u32			flags;
    153	seqlock_t		ha_lock;
    154	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
    155	struct hh_cache		hh;
    156	int			(*output)(struct neighbour *, struct sk_buff *);
    157	const struct neigh_ops	*ops;
    158	struct list_head	gc_list;
    159	struct list_head	managed_list;
    160	struct rcu_head		rcu;
    161	struct net_device	*dev;
    162	netdevice_tracker	dev_tracker;
    163	u8			primary_key[0];
    164} __randomize_layout;
    165
    166struct neigh_ops {
    167	int			family;
    168	void			(*solicit)(struct neighbour *, struct sk_buff *);
    169	void			(*error_report)(struct neighbour *, struct sk_buff *);
    170	int			(*output)(struct neighbour *, struct sk_buff *);
    171	int			(*connected_output)(struct neighbour *, struct sk_buff *);
    172};
    173
    174struct pneigh_entry {
    175	struct pneigh_entry	*next;
    176	possible_net_t		net;
    177	struct net_device	*dev;
    178	netdevice_tracker	dev_tracker;
    179	u32			flags;
    180	u8			protocol;
    181	u8			key[];
    182};
    183
    184/*
    185 *	neighbour table manipulation
    186 */
    187
    188#define NEIGH_NUM_HASH_RND	4
    189
    190struct neigh_hash_table {
    191	struct neighbour __rcu	**hash_buckets;
    192	unsigned int		hash_shift;
    193	__u32			hash_rnd[NEIGH_NUM_HASH_RND];
    194	struct rcu_head		rcu;
    195};
    196
    197
    198struct neigh_table {
    199	int			family;
    200	unsigned int		entry_size;
    201	unsigned int		key_len;
    202	__be16			protocol;
    203	__u32			(*hash)(const void *pkey,
    204					const struct net_device *dev,
    205					__u32 *hash_rnd);
    206	bool			(*key_eq)(const struct neighbour *, const void *pkey);
    207	int			(*constructor)(struct neighbour *);
    208	int			(*pconstructor)(struct pneigh_entry *);
    209	void			(*pdestructor)(struct pneigh_entry *);
    210	void			(*proxy_redo)(struct sk_buff *skb);
    211	int			(*is_multicast)(const void *pkey);
    212	bool			(*allow_add)(const struct net_device *dev,
    213					     struct netlink_ext_ack *extack);
    214	char			*id;
    215	struct neigh_parms	parms;
    216	struct list_head	parms_list;
    217	int			gc_interval;
    218	int			gc_thresh1;
    219	int			gc_thresh2;
    220	int			gc_thresh3;
    221	unsigned long		last_flush;
    222	struct delayed_work	gc_work;
    223	struct delayed_work	managed_work;
    224	struct timer_list 	proxy_timer;
    225	struct sk_buff_head	proxy_queue;
    226	atomic_t		entries;
    227	atomic_t		gc_entries;
    228	struct list_head	gc_list;
    229	struct list_head	managed_list;
    230	rwlock_t		lock;
    231	unsigned long		last_rand;
    232	struct neigh_statistics	__percpu *stats;
    233	struct neigh_hash_table __rcu *nht;
    234	struct pneigh_entry	**phash_buckets;
    235};
    236
    237enum {
    238	NEIGH_ARP_TABLE = 0,
    239	NEIGH_ND_TABLE = 1,
    240	NEIGH_DN_TABLE = 2,
    241	NEIGH_NR_TABLES,
    242	NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */
    243};
    244
    245static inline int neigh_parms_family(struct neigh_parms *p)
    246{
    247	return p->tbl->family;
    248}
    249
    250#define NEIGH_PRIV_ALIGN	sizeof(long long)
    251#define NEIGH_ENTRY_SIZE(size)	ALIGN((size), NEIGH_PRIV_ALIGN)
    252
    253static inline void *neighbour_priv(const struct neighbour *n)
    254{
    255	return (char *)n + n->tbl->entry_size;
    256}
    257
    258/* flags for neigh_update() */
    259#define NEIGH_UPDATE_F_OVERRIDE			BIT(0)
    260#define NEIGH_UPDATE_F_WEAK_OVERRIDE		BIT(1)
    261#define NEIGH_UPDATE_F_OVERRIDE_ISROUTER	BIT(2)
    262#define NEIGH_UPDATE_F_USE			BIT(3)
    263#define NEIGH_UPDATE_F_MANAGED			BIT(4)
    264#define NEIGH_UPDATE_F_EXT_LEARNED		BIT(5)
    265#define NEIGH_UPDATE_F_ISROUTER			BIT(6)
    266#define NEIGH_UPDATE_F_ADMIN			BIT(7)
    267
    268/* In-kernel representation for NDA_FLAGS_EXT flags: */
    269#define NTF_OLD_MASK		0xff
    270#define NTF_EXT_SHIFT		8
    271#define NTF_EXT_MASK		(NTF_EXT_MANAGED)
    272
    273#define NTF_MANAGED		(NTF_EXT_MANAGED << NTF_EXT_SHIFT)
    274
    275extern const struct nla_policy nda_policy[];
    276
    277static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
    278{
    279	return *(const u16 *)n->primary_key == *(const u16 *)pkey;
    280}
    281
    282static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey)
    283{
    284	return *(const u32 *)n->primary_key == *(const u32 *)pkey;
    285}
    286
    287static inline bool neigh_key_eq128(const struct neighbour *n, const void *pkey)
    288{
    289	const u32 *n32 = (const u32 *)n->primary_key;
    290	const u32 *p32 = pkey;
    291
    292	return ((n32[0] ^ p32[0]) | (n32[1] ^ p32[1]) |
    293		(n32[2] ^ p32[2]) | (n32[3] ^ p32[3])) == 0;
    294}
    295
    296static inline struct neighbour *___neigh_lookup_noref(
    297	struct neigh_table *tbl,
    298	bool (*key_eq)(const struct neighbour *n, const void *pkey),
    299	__u32 (*hash)(const void *pkey,
    300		      const struct net_device *dev,
    301		      __u32 *hash_rnd),
    302	const void *pkey,
    303	struct net_device *dev)
    304{
    305	struct neigh_hash_table *nht = rcu_dereference_bh(tbl->nht);
    306	struct neighbour *n;
    307	u32 hash_val;
    308
    309	hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
    310	for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
    311	     n != NULL;
    312	     n = rcu_dereference_bh(n->next)) {
    313		if (n->dev == dev && key_eq(n, pkey))
    314			return n;
    315	}
    316
    317	return NULL;
    318}
    319
    320static inline struct neighbour *__neigh_lookup_noref(struct neigh_table *tbl,
    321						     const void *pkey,
    322						     struct net_device *dev)
    323{
    324	return ___neigh_lookup_noref(tbl, tbl->key_eq, tbl->hash, pkey, dev);
    325}
    326
    327static inline void neigh_confirm(struct neighbour *n)
    328{
    329	if (n) {
    330		unsigned long now = jiffies;
    331
    332		/* avoid dirtying neighbour */
    333		if (READ_ONCE(n->confirmed) != now)
    334			WRITE_ONCE(n->confirmed, now);
    335	}
    336}
    337
    338void neigh_table_init(int index, struct neigh_table *tbl);
    339int neigh_table_clear(int index, struct neigh_table *tbl);
    340struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
    341			       struct net_device *dev);
    342struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
    343				     const void *pkey);
    344struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
    345				 struct net_device *dev, bool want_ref);
    346static inline struct neighbour *neigh_create(struct neigh_table *tbl,
    347					     const void *pkey,
    348					     struct net_device *dev)
    349{
    350	return __neigh_create(tbl, pkey, dev, true);
    351}
    352void neigh_destroy(struct neighbour *neigh);
    353int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
    354		       const bool immediate_ok);
    355int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
    356		 u32 nlmsg_pid);
    357void __neigh_set_probe_once(struct neighbour *neigh);
    358bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl);
    359void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
    360int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
    361int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev);
    362int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
    363int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
    364int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
    365struct neighbour *neigh_event_ns(struct neigh_table *tbl,
    366						u8 *lladdr, void *saddr,
    367						struct net_device *dev);
    368
    369struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
    370				      struct neigh_table *tbl);
    371void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
    372
    373static inline
    374struct net *neigh_parms_net(const struct neigh_parms *parms)
    375{
    376	return read_pnet(&parms->net);
    377}
    378
    379unsigned long neigh_rand_reach_time(unsigned long base);
    380
    381void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
    382		    struct sk_buff *skb);
    383struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
    384				   const void *key, struct net_device *dev,
    385				   int creat);
    386struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
    387				     const void *key, struct net_device *dev);
    388int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
    389		  struct net_device *dev);
    390
    391static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
    392{
    393	return read_pnet(&pneigh->net);
    394}
    395
    396void neigh_app_ns(struct neighbour *n);
    397void neigh_for_each(struct neigh_table *tbl,
    398		    void (*cb)(struct neighbour *, void *), void *cookie);
    399void __neigh_for_each_release(struct neigh_table *tbl,
    400			      int (*cb)(struct neighbour *));
    401int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
    402void pneigh_for_each(struct neigh_table *tbl,
    403		     void (*cb)(struct pneigh_entry *));
    404
    405struct neigh_seq_state {
    406	struct seq_net_private p;
    407	struct neigh_table *tbl;
    408	struct neigh_hash_table *nht;
    409	void *(*neigh_sub_iter)(struct neigh_seq_state *state,
    410				struct neighbour *n, loff_t *pos);
    411	unsigned int bucket;
    412	unsigned int flags;
    413#define NEIGH_SEQ_NEIGH_ONLY	0x00000001
    414#define NEIGH_SEQ_IS_PNEIGH	0x00000002
    415#define NEIGH_SEQ_SKIP_NOARP	0x00000004
    416};
    417void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
    418		      unsigned int);
    419void *neigh_seq_next(struct seq_file *, void *, loff_t *);
    420void neigh_seq_stop(struct seq_file *, void *);
    421
    422int neigh_proc_dointvec(struct ctl_table *ctl, int write,
    423			void *buffer, size_t *lenp, loff_t *ppos);
    424int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
    425				void *buffer,
    426				size_t *lenp, loff_t *ppos);
    427int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
    428				   void *buffer, size_t *lenp, loff_t *ppos);
    429
    430int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
    431			  proc_handler *proc_handler);
    432void neigh_sysctl_unregister(struct neigh_parms *p);
    433
    434static inline void __neigh_parms_put(struct neigh_parms *parms)
    435{
    436	refcount_dec(&parms->refcnt);
    437}
    438
    439static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
    440{
    441	refcount_inc(&parms->refcnt);
    442	return parms;
    443}
    444
    445/*
    446 *	Neighbour references
    447 */
    448
    449static inline void neigh_release(struct neighbour *neigh)
    450{
    451	if (refcount_dec_and_test(&neigh->refcnt))
    452		neigh_destroy(neigh);
    453}
    454
    455static inline struct neighbour * neigh_clone(struct neighbour *neigh)
    456{
    457	if (neigh)
    458		refcount_inc(&neigh->refcnt);
    459	return neigh;
    460}
    461
    462#define neigh_hold(n)	refcount_inc(&(n)->refcnt)
    463
    464static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
    465						  struct sk_buff *skb,
    466						  const bool immediate_ok)
    467{
    468	unsigned long now = jiffies;
    469
    470	if (READ_ONCE(neigh->used) != now)
    471		WRITE_ONCE(neigh->used, now);
    472	if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
    473		return __neigh_event_send(neigh, skb, immediate_ok);
    474	return 0;
    475}
    476
    477static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
    478{
    479	return neigh_event_send_probe(neigh, skb, true);
    480}
    481
    482#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
    483static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
    484{
    485	unsigned int seq, hh_alen;
    486
    487	do {
    488		seq = read_seqbegin(&hh->hh_lock);
    489		hh_alen = HH_DATA_ALIGN(ETH_HLEN);
    490		memcpy(skb->data - hh_alen, hh->hh_data, ETH_ALEN + hh_alen - ETH_HLEN);
    491	} while (read_seqretry(&hh->hh_lock, seq));
    492	return 0;
    493}
    494#endif
    495
    496static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
    497{
    498	unsigned int hh_alen = 0;
    499	unsigned int seq;
    500	unsigned int hh_len;
    501
    502	do {
    503		seq = read_seqbegin(&hh->hh_lock);
    504		hh_len = READ_ONCE(hh->hh_len);
    505		if (likely(hh_len <= HH_DATA_MOD)) {
    506			hh_alen = HH_DATA_MOD;
    507
    508			/* skb_push() would proceed silently if we have room for
    509			 * the unaligned size but not for the aligned size:
    510			 * check headroom explicitly.
    511			 */
    512			if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
    513				/* this is inlined by gcc */
    514				memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
    515				       HH_DATA_MOD);
    516			}
    517		} else {
    518			hh_alen = HH_DATA_ALIGN(hh_len);
    519
    520			if (likely(skb_headroom(skb) >= hh_alen)) {
    521				memcpy(skb->data - hh_alen, hh->hh_data,
    522				       hh_alen);
    523			}
    524		}
    525	} while (read_seqretry(&hh->hh_lock, seq));
    526
    527	if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
    528		kfree_skb(skb);
    529		return NET_XMIT_DROP;
    530	}
    531
    532	__skb_push(skb, hh_len);
    533	return dev_queue_xmit(skb);
    534}
    535
    536static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
    537			       bool skip_cache)
    538{
    539	const struct hh_cache *hh = &n->hh;
    540
    541	/* n->nud_state and hh->hh_len could be changed under us.
    542	 * neigh_hh_output() is taking care of the race later.
    543	 */
    544	if (!skip_cache &&
    545	    (READ_ONCE(n->nud_state) & NUD_CONNECTED) &&
    546	    READ_ONCE(hh->hh_len))
    547		return neigh_hh_output(hh, skb);
    548
    549	return n->output(n, skb);
    550}
    551
    552static inline struct neighbour *
    553__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
    554{
    555	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
    556
    557	if (n || !creat)
    558		return n;
    559
    560	n = neigh_create(tbl, pkey, dev);
    561	return IS_ERR(n) ? NULL : n;
    562}
    563
    564static inline struct neighbour *
    565__neigh_lookup_errno(struct neigh_table *tbl, const void *pkey,
    566  struct net_device *dev)
    567{
    568	struct neighbour *n = neigh_lookup(tbl, pkey, dev);
    569
    570	if (n)
    571		return n;
    572
    573	return neigh_create(tbl, pkey, dev);
    574}
    575
    576struct neighbour_cb {
    577	unsigned long sched_next;
    578	unsigned int flags;
    579};
    580
    581#define LOCALLY_ENQUEUED 0x1
    582
    583#define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
    584
    585static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
    586				     const struct net_device *dev)
    587{
    588	unsigned int seq;
    589
    590	do {
    591		seq = read_seqbegin(&n->ha_lock);
    592		memcpy(dst, n->ha, dev->addr_len);
    593	} while (read_seqretry(&n->ha_lock, seq));
    594}
    595
    596static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
    597					  int *notify)
    598{
    599	u8 ndm_flags = 0;
    600
    601	ndm_flags |= (flags & NEIGH_UPDATE_F_ISROUTER) ? NTF_ROUTER : 0;
    602	if ((neigh->flags ^ ndm_flags) & NTF_ROUTER) {
    603		if (ndm_flags & NTF_ROUTER)
    604			neigh->flags |= NTF_ROUTER;
    605		else
    606			neigh->flags &= ~NTF_ROUTER;
    607		*notify = 1;
    608	}
    609}
    610#endif