cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tcp_metrics.c (27486B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/rcupdate.h>
      3#include <linux/spinlock.h>
      4#include <linux/jiffies.h>
      5#include <linux/module.h>
      6#include <linux/cache.h>
      7#include <linux/slab.h>
      8#include <linux/init.h>
      9#include <linux/tcp.h>
     10#include <linux/hash.h>
     11#include <linux/tcp_metrics.h>
     12#include <linux/vmalloc.h>
     13
     14#include <net/inet_connection_sock.h>
     15#include <net/net_namespace.h>
     16#include <net/request_sock.h>
     17#include <net/inetpeer.h>
     18#include <net/sock.h>
     19#include <net/ipv6.h>
     20#include <net/dst.h>
     21#include <net/tcp.h>
     22#include <net/genetlink.h>
     23
     24static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
     25						   const struct inetpeer_addr *daddr,
     26						   struct net *net, unsigned int hash);
     27
     28struct tcp_fastopen_metrics {
     29	u16	mss;
     30	u16	syn_loss:10,		/* Recurring Fast Open SYN losses */
     31		try_exp:2;		/* Request w/ exp. option (once) */
     32	unsigned long	last_syn_loss;	/* Last Fast Open SYN loss */
     33	struct	tcp_fastopen_cookie	cookie;
     34};
     35
     36/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
     37 * Kernel only stores RTT and RTTVAR in usec resolution
     38 */
     39#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
     40
     41struct tcp_metrics_block {
     42	struct tcp_metrics_block __rcu	*tcpm_next;
     43	possible_net_t			tcpm_net;
     44	struct inetpeer_addr		tcpm_saddr;
     45	struct inetpeer_addr		tcpm_daddr;
     46	unsigned long			tcpm_stamp;
     47	u32				tcpm_lock;
     48	u32				tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
     49	struct tcp_fastopen_metrics	tcpm_fastopen;
     50
     51	struct rcu_head			rcu_head;
     52};
     53
     54static inline struct net *tm_net(struct tcp_metrics_block *tm)
     55{
     56	return read_pnet(&tm->tcpm_net);
     57}
     58
     59static bool tcp_metric_locked(struct tcp_metrics_block *tm,
     60			      enum tcp_metric_index idx)
     61{
     62	return tm->tcpm_lock & (1 << idx);
     63}
     64
     65static u32 tcp_metric_get(struct tcp_metrics_block *tm,
     66			  enum tcp_metric_index idx)
     67{
     68	return tm->tcpm_vals[idx];
     69}
     70
     71static void tcp_metric_set(struct tcp_metrics_block *tm,
     72			   enum tcp_metric_index idx,
     73			   u32 val)
     74{
     75	tm->tcpm_vals[idx] = val;
     76}
     77
     78static bool addr_same(const struct inetpeer_addr *a,
     79		      const struct inetpeer_addr *b)
     80{
     81	return inetpeer_addr_cmp(a, b) == 0;
     82}
     83
     84struct tcpm_hash_bucket {
     85	struct tcp_metrics_block __rcu	*chain;
     86};
     87
     88static struct tcpm_hash_bucket	*tcp_metrics_hash __read_mostly;
     89static unsigned int		tcp_metrics_hash_log __read_mostly;
     90
     91static DEFINE_SPINLOCK(tcp_metrics_lock);
     92
     93static void tcpm_suck_dst(struct tcp_metrics_block *tm,
     94			  const struct dst_entry *dst,
     95			  bool fastopen_clear)
     96{
     97	u32 msval;
     98	u32 val;
     99
    100	tm->tcpm_stamp = jiffies;
    101
    102	val = 0;
    103	if (dst_metric_locked(dst, RTAX_RTT))
    104		val |= 1 << TCP_METRIC_RTT;
    105	if (dst_metric_locked(dst, RTAX_RTTVAR))
    106		val |= 1 << TCP_METRIC_RTTVAR;
    107	if (dst_metric_locked(dst, RTAX_SSTHRESH))
    108		val |= 1 << TCP_METRIC_SSTHRESH;
    109	if (dst_metric_locked(dst, RTAX_CWND))
    110		val |= 1 << TCP_METRIC_CWND;
    111	if (dst_metric_locked(dst, RTAX_REORDERING))
    112		val |= 1 << TCP_METRIC_REORDERING;
    113	tm->tcpm_lock = val;
    114
    115	msval = dst_metric_raw(dst, RTAX_RTT);
    116	tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
    117
    118	msval = dst_metric_raw(dst, RTAX_RTTVAR);
    119	tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
    120	tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
    121	tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
    122	tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
    123	if (fastopen_clear) {
    124		tm->tcpm_fastopen.mss = 0;
    125		tm->tcpm_fastopen.syn_loss = 0;
    126		tm->tcpm_fastopen.try_exp = 0;
    127		tm->tcpm_fastopen.cookie.exp = false;
    128		tm->tcpm_fastopen.cookie.len = 0;
    129	}
    130}
    131
    132#define TCP_METRICS_TIMEOUT		(60 * 60 * HZ)
    133
    134static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
    135{
    136	if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
    137		tcpm_suck_dst(tm, dst, false);
    138}
    139
    140#define TCP_METRICS_RECLAIM_DEPTH	5
    141#define TCP_METRICS_RECLAIM_PTR		(struct tcp_metrics_block *) 0x1UL
    142
    143#define deref_locked(p)	\
    144	rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock))
    145
    146static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
    147					  struct inetpeer_addr *saddr,
    148					  struct inetpeer_addr *daddr,
    149					  unsigned int hash)
    150{
    151	struct tcp_metrics_block *tm;
    152	struct net *net;
    153	bool reclaim = false;
    154
    155	spin_lock_bh(&tcp_metrics_lock);
    156	net = dev_net(dst->dev);
    157
    158	/* While waiting for the spin-lock the cache might have been populated
    159	 * with this entry and so we have to check again.
    160	 */
    161	tm = __tcp_get_metrics(saddr, daddr, net, hash);
    162	if (tm == TCP_METRICS_RECLAIM_PTR) {
    163		reclaim = true;
    164		tm = NULL;
    165	}
    166	if (tm) {
    167		tcpm_check_stamp(tm, dst);
    168		goto out_unlock;
    169	}
    170
    171	if (unlikely(reclaim)) {
    172		struct tcp_metrics_block *oldest;
    173
    174		oldest = deref_locked(tcp_metrics_hash[hash].chain);
    175		for (tm = deref_locked(oldest->tcpm_next); tm;
    176		     tm = deref_locked(tm->tcpm_next)) {
    177			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
    178				oldest = tm;
    179		}
    180		tm = oldest;
    181	} else {
    182		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
    183		if (!tm)
    184			goto out_unlock;
    185	}
    186	write_pnet(&tm->tcpm_net, net);
    187	tm->tcpm_saddr = *saddr;
    188	tm->tcpm_daddr = *daddr;
    189
    190	tcpm_suck_dst(tm, dst, true);
    191
    192	if (likely(!reclaim)) {
    193		tm->tcpm_next = tcp_metrics_hash[hash].chain;
    194		rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm);
    195	}
    196
    197out_unlock:
    198	spin_unlock_bh(&tcp_metrics_lock);
    199	return tm;
    200}
    201
    202static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
    203{
    204	if (tm)
    205		return tm;
    206	if (depth > TCP_METRICS_RECLAIM_DEPTH)
    207		return TCP_METRICS_RECLAIM_PTR;
    208	return NULL;
    209}
    210
    211static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr,
    212						   const struct inetpeer_addr *daddr,
    213						   struct net *net, unsigned int hash)
    214{
    215	struct tcp_metrics_block *tm;
    216	int depth = 0;
    217
    218	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
    219	     tm = rcu_dereference(tm->tcpm_next)) {
    220		if (addr_same(&tm->tcpm_saddr, saddr) &&
    221		    addr_same(&tm->tcpm_daddr, daddr) &&
    222		    net_eq(tm_net(tm), net))
    223			break;
    224		depth++;
    225	}
    226	return tcp_get_encode(tm, depth);
    227}
    228
    229static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
    230						       struct dst_entry *dst)
    231{
    232	struct tcp_metrics_block *tm;
    233	struct inetpeer_addr saddr, daddr;
    234	unsigned int hash;
    235	struct net *net;
    236
    237	saddr.family = req->rsk_ops->family;
    238	daddr.family = req->rsk_ops->family;
    239	switch (daddr.family) {
    240	case AF_INET:
    241		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
    242		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
    243		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
    244		break;
    245#if IS_ENABLED(CONFIG_IPV6)
    246	case AF_INET6:
    247		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
    248		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
    249		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
    250		break;
    251#endif
    252	default:
    253		return NULL;
    254	}
    255
    256	net = dev_net(dst->dev);
    257	hash ^= net_hash_mix(net);
    258	hash = hash_32(hash, tcp_metrics_hash_log);
    259
    260	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
    261	     tm = rcu_dereference(tm->tcpm_next)) {
    262		if (addr_same(&tm->tcpm_saddr, &saddr) &&
    263		    addr_same(&tm->tcpm_daddr, &daddr) &&
    264		    net_eq(tm_net(tm), net))
    265			break;
    266	}
    267	tcpm_check_stamp(tm, dst);
    268	return tm;
    269}
    270
    271static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
    272						 struct dst_entry *dst,
    273						 bool create)
    274{
    275	struct tcp_metrics_block *tm;
    276	struct inetpeer_addr saddr, daddr;
    277	unsigned int hash;
    278	struct net *net;
    279
    280	if (sk->sk_family == AF_INET) {
    281		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
    282		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
    283		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
    284	}
    285#if IS_ENABLED(CONFIG_IPV6)
    286	else if (sk->sk_family == AF_INET6) {
    287		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
    288			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
    289			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
    290			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
    291		} else {
    292			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
    293			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
    294			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
    295		}
    296	}
    297#endif
    298	else
    299		return NULL;
    300
    301	net = dev_net(dst->dev);
    302	hash ^= net_hash_mix(net);
    303	hash = hash_32(hash, tcp_metrics_hash_log);
    304
    305	tm = __tcp_get_metrics(&saddr, &daddr, net, hash);
    306	if (tm == TCP_METRICS_RECLAIM_PTR)
    307		tm = NULL;
    308	if (!tm && create)
    309		tm = tcpm_new(dst, &saddr, &daddr, hash);
    310	else
    311		tcpm_check_stamp(tm, dst);
    312
    313	return tm;
    314}
    315
    316/* Save metrics learned by this TCP session.  This function is called
    317 * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
    318 * or goes from LAST-ACK to CLOSE.
    319 */
    320void tcp_update_metrics(struct sock *sk)
    321{
    322	const struct inet_connection_sock *icsk = inet_csk(sk);
    323	struct dst_entry *dst = __sk_dst_get(sk);
    324	struct tcp_sock *tp = tcp_sk(sk);
    325	struct net *net = sock_net(sk);
    326	struct tcp_metrics_block *tm;
    327	unsigned long rtt;
    328	u32 val;
    329	int m;
    330
    331	sk_dst_confirm(sk);
    332	if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
    333		return;
    334
    335	rcu_read_lock();
    336	if (icsk->icsk_backoff || !tp->srtt_us) {
    337		/* This session failed to estimate rtt. Why?
    338		 * Probably, no packets returned in time.  Reset our
    339		 * results.
    340		 */
    341		tm = tcp_get_metrics(sk, dst, false);
    342		if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
    343			tcp_metric_set(tm, TCP_METRIC_RTT, 0);
    344		goto out_unlock;
    345	} else
    346		tm = tcp_get_metrics(sk, dst, true);
    347
    348	if (!tm)
    349		goto out_unlock;
    350
    351	rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
    352	m = rtt - tp->srtt_us;
    353
    354	/* If newly calculated rtt larger than stored one, store new
    355	 * one. Otherwise, use EWMA. Remember, rtt overestimation is
    356	 * always better than underestimation.
    357	 */
    358	if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
    359		if (m <= 0)
    360			rtt = tp->srtt_us;
    361		else
    362			rtt -= (m >> 3);
    363		tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
    364	}
    365
    366	if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
    367		unsigned long var;
    368
    369		if (m < 0)
    370			m = -m;
    371
    372		/* Scale deviation to rttvar fixed point */
    373		m >>= 1;
    374		if (m < tp->mdev_us)
    375			m = tp->mdev_us;
    376
    377		var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
    378		if (m >= var)
    379			var = m;
    380		else
    381			var -= (var - m) >> 2;
    382
    383		tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
    384	}
    385
    386	if (tcp_in_initial_slowstart(tp)) {
    387		/* Slow start still did not finish. */
    388		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
    389		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
    390			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
    391			if (val && (tcp_snd_cwnd(tp) >> 1) > val)
    392				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
    393					       tcp_snd_cwnd(tp) >> 1);
    394		}
    395		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
    396			val = tcp_metric_get(tm, TCP_METRIC_CWND);
    397			if (tcp_snd_cwnd(tp) > val)
    398				tcp_metric_set(tm, TCP_METRIC_CWND,
    399					       tcp_snd_cwnd(tp));
    400		}
    401	} else if (!tcp_in_slow_start(tp) &&
    402		   icsk->icsk_ca_state == TCP_CA_Open) {
    403		/* Cong. avoidance phase, cwnd is reliable. */
    404		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
    405		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
    406			tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
    407				       max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
    408		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
    409			val = tcp_metric_get(tm, TCP_METRIC_CWND);
    410			tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
    411		}
    412	} else {
    413		/* Else slow start did not finish, cwnd is non-sense,
    414		 * ssthresh may be also invalid.
    415		 */
    416		if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
    417			val = tcp_metric_get(tm, TCP_METRIC_CWND);
    418			tcp_metric_set(tm, TCP_METRIC_CWND,
    419				       (val + tp->snd_ssthresh) >> 1);
    420		}
    421		if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
    422		    !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
    423			val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
    424			if (val && tp->snd_ssthresh > val)
    425				tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
    426					       tp->snd_ssthresh);
    427		}
    428		if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
    429			val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
    430			if (val < tp->reordering &&
    431			    tp->reordering != net->ipv4.sysctl_tcp_reordering)
    432				tcp_metric_set(tm, TCP_METRIC_REORDERING,
    433					       tp->reordering);
    434		}
    435	}
    436	tm->tcpm_stamp = jiffies;
    437out_unlock:
    438	rcu_read_unlock();
    439}
    440
    441/* Initialize metrics on socket. */
    442
    443void tcp_init_metrics(struct sock *sk)
    444{
    445	struct dst_entry *dst = __sk_dst_get(sk);
    446	struct tcp_sock *tp = tcp_sk(sk);
    447	struct net *net = sock_net(sk);
    448	struct tcp_metrics_block *tm;
    449	u32 val, crtt = 0; /* cached RTT scaled by 8 */
    450
    451	sk_dst_confirm(sk);
    452	if (!dst)
    453		goto reset;
    454
    455	rcu_read_lock();
    456	tm = tcp_get_metrics(sk, dst, true);
    457	if (!tm) {
    458		rcu_read_unlock();
    459		goto reset;
    460	}
    461
    462	if (tcp_metric_locked(tm, TCP_METRIC_CWND))
    463		tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
    464
    465	val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
    466	      0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
    467	if (val) {
    468		tp->snd_ssthresh = val;
    469		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
    470			tp->snd_ssthresh = tp->snd_cwnd_clamp;
    471	} else {
    472		/* ssthresh may have been reduced unnecessarily during.
    473		 * 3WHS. Restore it back to its initial default.
    474		 */
    475		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
    476	}
    477	val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
    478	if (val && tp->reordering != val)
    479		tp->reordering = val;
    480
    481	crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
    482	rcu_read_unlock();
    483reset:
    484	/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
    485	 * to seed the RTO for later data packets because SYN packets are
    486	 * small. Use the per-dst cached values to seed the RTO but keep
    487	 * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
    488	 * Later the RTO will be updated immediately upon obtaining the first
    489	 * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
    490	 * influences the first RTO but not later RTT estimation.
    491	 *
    492	 * But if RTT is not available from the SYN (due to retransmits or
    493	 * syn cookies) or the cache, force a conservative 3secs timeout.
    494	 *
    495	 * A bit of theory. RTT is time passed after "normal" sized packet
    496	 * is sent until it is ACKed. In normal circumstances sending small
    497	 * packets force peer to delay ACKs and calculation is correct too.
    498	 * The algorithm is adaptive and, provided we follow specs, it
    499	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
    500	 * tricks sort of "quick acks" for time long enough to decrease RTT
    501	 * to low value, and then abruptly stops to do it and starts to delay
    502	 * ACKs, wait for troubles.
    503	 */
    504	if (crtt > tp->srtt_us) {
    505		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
    506		crtt /= 8 * USEC_PER_SEC / HZ;
    507		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
    508	} else if (tp->srtt_us == 0) {
    509		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
    510		 * 3WHS. This is most likely due to retransmission,
    511		 * including spurious one. Reset the RTO back to 3secs
    512		 * from the more aggressive 1sec to avoid more spurious
    513		 * retransmission.
    514		 */
    515		tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
    516		tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
    517
    518		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
    519	}
    520}
    521
    522bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
    523{
    524	struct tcp_metrics_block *tm;
    525	bool ret;
    526
    527	if (!dst)
    528		return false;
    529
    530	rcu_read_lock();
    531	tm = __tcp_get_metrics_req(req, dst);
    532	if (tm && tcp_metric_get(tm, TCP_METRIC_RTT))
    533		ret = true;
    534	else
    535		ret = false;
    536	rcu_read_unlock();
    537
    538	return ret;
    539}
    540
    541static DEFINE_SEQLOCK(fastopen_seqlock);
    542
    543void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
    544			    struct tcp_fastopen_cookie *cookie)
    545{
    546	struct tcp_metrics_block *tm;
    547
    548	rcu_read_lock();
    549	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
    550	if (tm) {
    551		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
    552		unsigned int seq;
    553
    554		do {
    555			seq = read_seqbegin(&fastopen_seqlock);
    556			if (tfom->mss)
    557				*mss = tfom->mss;
    558			*cookie = tfom->cookie;
    559			if (cookie->len <= 0 && tfom->try_exp == 1)
    560				cookie->exp = true;
    561		} while (read_seqretry(&fastopen_seqlock, seq));
    562	}
    563	rcu_read_unlock();
    564}
    565
    566void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
    567			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
    568			    u16 try_exp)
    569{
    570	struct dst_entry *dst = __sk_dst_get(sk);
    571	struct tcp_metrics_block *tm;
    572
    573	if (!dst)
    574		return;
    575	rcu_read_lock();
    576	tm = tcp_get_metrics(sk, dst, true);
    577	if (tm) {
    578		struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
    579
    580		write_seqlock_bh(&fastopen_seqlock);
    581		if (mss)
    582			tfom->mss = mss;
    583		if (cookie && cookie->len > 0)
    584			tfom->cookie = *cookie;
    585		else if (try_exp > tfom->try_exp &&
    586			 tfom->cookie.len <= 0 && !tfom->cookie.exp)
    587			tfom->try_exp = try_exp;
    588		if (syn_lost) {
    589			++tfom->syn_loss;
    590			tfom->last_syn_loss = jiffies;
    591		} else
    592			tfom->syn_loss = 0;
    593		write_sequnlock_bh(&fastopen_seqlock);
    594	}
    595	rcu_read_unlock();
    596}
    597
    598static struct genl_family tcp_metrics_nl_family;
    599
    600static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
    601	[TCP_METRICS_ATTR_ADDR_IPV4]	= { .type = NLA_U32, },
    602	[TCP_METRICS_ATTR_ADDR_IPV6]	= { .type = NLA_BINARY,
    603					    .len = sizeof(struct in6_addr), },
    604	/* Following attributes are not received for GET/DEL,
    605	 * we keep them for reference
    606	 */
    607#if 0
    608	[TCP_METRICS_ATTR_AGE]		= { .type = NLA_MSECS, },
    609	[TCP_METRICS_ATTR_TW_TSVAL]	= { .type = NLA_U32, },
    610	[TCP_METRICS_ATTR_TW_TS_STAMP]	= { .type = NLA_S32, },
    611	[TCP_METRICS_ATTR_VALS]		= { .type = NLA_NESTED, },
    612	[TCP_METRICS_ATTR_FOPEN_MSS]	= { .type = NLA_U16, },
    613	[TCP_METRICS_ATTR_FOPEN_SYN_DROPS]	= { .type = NLA_U16, },
    614	[TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]	= { .type = NLA_MSECS, },
    615	[TCP_METRICS_ATTR_FOPEN_COOKIE]	= { .type = NLA_BINARY,
    616					    .len = TCP_FASTOPEN_COOKIE_MAX, },
    617#endif
    618};
    619
    620/* Add attributes, caller cancels its header on failure */
    621static int tcp_metrics_fill_info(struct sk_buff *msg,
    622				 struct tcp_metrics_block *tm)
    623{
    624	struct nlattr *nest;
    625	int i;
    626
    627	switch (tm->tcpm_daddr.family) {
    628	case AF_INET:
    629		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
    630				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
    631			goto nla_put_failure;
    632		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
    633				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
    634			goto nla_put_failure;
    635		break;
    636	case AF_INET6:
    637		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
    638				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
    639			goto nla_put_failure;
    640		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
    641				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
    642			goto nla_put_failure;
    643		break;
    644	default:
    645		return -EAFNOSUPPORT;
    646	}
    647
    648	if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
    649			  jiffies - tm->tcpm_stamp,
    650			  TCP_METRICS_ATTR_PAD) < 0)
    651		goto nla_put_failure;
    652
    653	{
    654		int n = 0;
    655
    656		nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS);
    657		if (!nest)
    658			goto nla_put_failure;
    659		for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
    660			u32 val = tm->tcpm_vals[i];
    661
    662			if (!val)
    663				continue;
    664			if (i == TCP_METRIC_RTT) {
    665				if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
    666						val) < 0)
    667					goto nla_put_failure;
    668				n++;
    669				val = max(val / 1000, 1U);
    670			}
    671			if (i == TCP_METRIC_RTTVAR) {
    672				if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
    673						val) < 0)
    674					goto nla_put_failure;
    675				n++;
    676				val = max(val / 1000, 1U);
    677			}
    678			if (nla_put_u32(msg, i + 1, val) < 0)
    679				goto nla_put_failure;
    680			n++;
    681		}
    682		if (n)
    683			nla_nest_end(msg, nest);
    684		else
    685			nla_nest_cancel(msg, nest);
    686	}
    687
    688	{
    689		struct tcp_fastopen_metrics tfom_copy[1], *tfom;
    690		unsigned int seq;
    691
    692		do {
    693			seq = read_seqbegin(&fastopen_seqlock);
    694			tfom_copy[0] = tm->tcpm_fastopen;
    695		} while (read_seqretry(&fastopen_seqlock, seq));
    696
    697		tfom = tfom_copy;
    698		if (tfom->mss &&
    699		    nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
    700				tfom->mss) < 0)
    701			goto nla_put_failure;
    702		if (tfom->syn_loss &&
    703		    (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
    704				tfom->syn_loss) < 0 ||
    705		     nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
    706				jiffies - tfom->last_syn_loss,
    707				TCP_METRICS_ATTR_PAD) < 0))
    708			goto nla_put_failure;
    709		if (tfom->cookie.len > 0 &&
    710		    nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
    711			    tfom->cookie.len, tfom->cookie.val) < 0)
    712			goto nla_put_failure;
    713	}
    714
    715	return 0;
    716
    717nla_put_failure:
    718	return -EMSGSIZE;
    719}
    720
    721static int tcp_metrics_dump_info(struct sk_buff *skb,
    722				 struct netlink_callback *cb,
    723				 struct tcp_metrics_block *tm)
    724{
    725	void *hdr;
    726
    727	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
    728			  &tcp_metrics_nl_family, NLM_F_MULTI,
    729			  TCP_METRICS_CMD_GET);
    730	if (!hdr)
    731		return -EMSGSIZE;
    732
    733	if (tcp_metrics_fill_info(skb, tm) < 0)
    734		goto nla_put_failure;
    735
    736	genlmsg_end(skb, hdr);
    737	return 0;
    738
    739nla_put_failure:
    740	genlmsg_cancel(skb, hdr);
    741	return -EMSGSIZE;
    742}
    743
    744static int tcp_metrics_nl_dump(struct sk_buff *skb,
    745			       struct netlink_callback *cb)
    746{
    747	struct net *net = sock_net(skb->sk);
    748	unsigned int max_rows = 1U << tcp_metrics_hash_log;
    749	unsigned int row, s_row = cb->args[0];
    750	int s_col = cb->args[1], col = s_col;
    751
    752	for (row = s_row; row < max_rows; row++, s_col = 0) {
    753		struct tcp_metrics_block *tm;
    754		struct tcpm_hash_bucket *hb = tcp_metrics_hash + row;
    755
    756		rcu_read_lock();
    757		for (col = 0, tm = rcu_dereference(hb->chain); tm;
    758		     tm = rcu_dereference(tm->tcpm_next), col++) {
    759			if (!net_eq(tm_net(tm), net))
    760				continue;
    761			if (col < s_col)
    762				continue;
    763			if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
    764				rcu_read_unlock();
    765				goto done;
    766			}
    767		}
    768		rcu_read_unlock();
    769	}
    770
    771done:
    772	cb->args[0] = row;
    773	cb->args[1] = col;
    774	return skb->len;
    775}
    776
    777static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
    778			   unsigned int *hash, int optional, int v4, int v6)
    779{
    780	struct nlattr *a;
    781
    782	a = info->attrs[v4];
    783	if (a) {
    784		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
    785		if (hash)
    786			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
    787		return 0;
    788	}
    789	a = info->attrs[v6];
    790	if (a) {
    791		struct in6_addr in6;
    792
    793		if (nla_len(a) != sizeof(struct in6_addr))
    794			return -EINVAL;
    795		in6 = nla_get_in6_addr(a);
    796		inetpeer_set_addr_v6(addr, &in6);
    797		if (hash)
    798			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
    799		return 0;
    800	}
    801	return optional ? 1 : -EAFNOSUPPORT;
    802}
    803
    804static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
    805			 unsigned int *hash, int optional)
    806{
    807	return __parse_nl_addr(info, addr, hash, optional,
    808			       TCP_METRICS_ATTR_ADDR_IPV4,
    809			       TCP_METRICS_ATTR_ADDR_IPV6);
    810}
    811
    812static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr)
    813{
    814	return __parse_nl_addr(info, addr, NULL, 0,
    815			       TCP_METRICS_ATTR_SADDR_IPV4,
    816			       TCP_METRICS_ATTR_SADDR_IPV6);
    817}
    818
    819static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
    820{
    821	struct tcp_metrics_block *tm;
    822	struct inetpeer_addr saddr, daddr;
    823	unsigned int hash;
    824	struct sk_buff *msg;
    825	struct net *net = genl_info_net(info);
    826	void *reply;
    827	int ret;
    828	bool src = true;
    829
    830	ret = parse_nl_addr(info, &daddr, &hash, 0);
    831	if (ret < 0)
    832		return ret;
    833
    834	ret = parse_nl_saddr(info, &saddr);
    835	if (ret < 0)
    836		src = false;
    837
    838	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
    839	if (!msg)
    840		return -ENOMEM;
    841
    842	reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
    843				  info->genlhdr->cmd);
    844	if (!reply)
    845		goto nla_put_failure;
    846
    847	hash ^= net_hash_mix(net);
    848	hash = hash_32(hash, tcp_metrics_hash_log);
    849	ret = -ESRCH;
    850	rcu_read_lock();
    851	for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm;
    852	     tm = rcu_dereference(tm->tcpm_next)) {
    853		if (addr_same(&tm->tcpm_daddr, &daddr) &&
    854		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
    855		    net_eq(tm_net(tm), net)) {
    856			ret = tcp_metrics_fill_info(msg, tm);
    857			break;
    858		}
    859	}
    860	rcu_read_unlock();
    861	if (ret < 0)
    862		goto out_free;
    863
    864	genlmsg_end(msg, reply);
    865	return genlmsg_reply(msg, info);
    866
    867nla_put_failure:
    868	ret = -EMSGSIZE;
    869
    870out_free:
    871	nlmsg_free(msg);
    872	return ret;
    873}
    874
    875static void tcp_metrics_flush_all(struct net *net)
    876{
    877	unsigned int max_rows = 1U << tcp_metrics_hash_log;
    878	struct tcpm_hash_bucket *hb = tcp_metrics_hash;
    879	struct tcp_metrics_block *tm;
    880	unsigned int row;
    881
    882	for (row = 0; row < max_rows; row++, hb++) {
    883		struct tcp_metrics_block __rcu **pp;
    884		bool match;
    885
    886		spin_lock_bh(&tcp_metrics_lock);
    887		pp = &hb->chain;
    888		for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
    889			match = net ? net_eq(tm_net(tm), net) :
    890				!refcount_read(&tm_net(tm)->ns.count);
    891			if (match) {
    892				*pp = tm->tcpm_next;
    893				kfree_rcu(tm, rcu_head);
    894			} else {
    895				pp = &tm->tcpm_next;
    896			}
    897		}
    898		spin_unlock_bh(&tcp_metrics_lock);
    899	}
    900}
    901
    902static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
    903{
    904	struct tcpm_hash_bucket *hb;
    905	struct tcp_metrics_block *tm;
    906	struct tcp_metrics_block __rcu **pp;
    907	struct inetpeer_addr saddr, daddr;
    908	unsigned int hash;
    909	struct net *net = genl_info_net(info);
    910	int ret;
    911	bool src = true, found = false;
    912
    913	ret = parse_nl_addr(info, &daddr, &hash, 1);
    914	if (ret < 0)
    915		return ret;
    916	if (ret > 0) {
    917		tcp_metrics_flush_all(net);
    918		return 0;
    919	}
    920	ret = parse_nl_saddr(info, &saddr);
    921	if (ret < 0)
    922		src = false;
    923
    924	hash ^= net_hash_mix(net);
    925	hash = hash_32(hash, tcp_metrics_hash_log);
    926	hb = tcp_metrics_hash + hash;
    927	pp = &hb->chain;
    928	spin_lock_bh(&tcp_metrics_lock);
    929	for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) {
    930		if (addr_same(&tm->tcpm_daddr, &daddr) &&
    931		    (!src || addr_same(&tm->tcpm_saddr, &saddr)) &&
    932		    net_eq(tm_net(tm), net)) {
    933			*pp = tm->tcpm_next;
    934			kfree_rcu(tm, rcu_head);
    935			found = true;
    936		} else {
    937			pp = &tm->tcpm_next;
    938		}
    939	}
    940	spin_unlock_bh(&tcp_metrics_lock);
    941	if (!found)
    942		return -ESRCH;
    943	return 0;
    944}
    945
    946static const struct genl_small_ops tcp_metrics_nl_ops[] = {
    947	{
    948		.cmd = TCP_METRICS_CMD_GET,
    949		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
    950		.doit = tcp_metrics_nl_cmd_get,
    951		.dumpit = tcp_metrics_nl_dump,
    952	},
    953	{
    954		.cmd = TCP_METRICS_CMD_DEL,
    955		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
    956		.doit = tcp_metrics_nl_cmd_del,
    957		.flags = GENL_ADMIN_PERM,
    958	},
    959};
    960
    961static struct genl_family tcp_metrics_nl_family __ro_after_init = {
    962	.hdrsize	= 0,
    963	.name		= TCP_METRICS_GENL_NAME,
    964	.version	= TCP_METRICS_GENL_VERSION,
    965	.maxattr	= TCP_METRICS_ATTR_MAX,
    966	.policy = tcp_metrics_nl_policy,
    967	.netnsok	= true,
    968	.module		= THIS_MODULE,
    969	.small_ops	= tcp_metrics_nl_ops,
    970	.n_small_ops	= ARRAY_SIZE(tcp_metrics_nl_ops),
    971};
    972
    973static unsigned int tcpmhash_entries;
    974static int __init set_tcpmhash_entries(char *str)
    975{
    976	ssize_t ret;
    977
    978	if (!str)
    979		return 0;
    980
    981	ret = kstrtouint(str, 0, &tcpmhash_entries);
    982	if (ret)
    983		return 0;
    984
    985	return 1;
    986}
    987__setup("tcpmhash_entries=", set_tcpmhash_entries);
    988
    989static int __net_init tcp_net_metrics_init(struct net *net)
    990{
    991	size_t size;
    992	unsigned int slots;
    993
    994	if (!net_eq(net, &init_net))
    995		return 0;
    996
    997	slots = tcpmhash_entries;
    998	if (!slots) {
    999		if (totalram_pages() >= 128 * 1024)
   1000			slots = 16 * 1024;
   1001		else
   1002			slots = 8 * 1024;
   1003	}
   1004
   1005	tcp_metrics_hash_log = order_base_2(slots);
   1006	size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log;
   1007
   1008	tcp_metrics_hash = kvzalloc(size, GFP_KERNEL);
   1009	if (!tcp_metrics_hash)
   1010		return -ENOMEM;
   1011
   1012	return 0;
   1013}
   1014
   1015static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list)
   1016{
   1017	tcp_metrics_flush_all(NULL);
   1018}
   1019
   1020static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
   1021	.init		=	tcp_net_metrics_init,
   1022	.exit_batch	=	tcp_net_metrics_exit_batch,
   1023};
   1024
   1025void __init tcp_metrics_init(void)
   1026{
   1027	int ret;
   1028
   1029	ret = register_pernet_subsys(&tcp_net_metrics_ops);
   1030	if (ret < 0)
   1031		panic("Could not allocate the tcp_metrics hash table\n");
   1032
   1033	ret = genl_register_family(&tcp_metrics_nl_family);
   1034	if (ret < 0)
   1035		panic("Could not register tcp_metrics generic netlink\n");
   1036}