cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tcp_recovery.c (7708B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/tcp.h>
      3#include <net/tcp.h>
      4
      5static u32 tcp_rack_reo_wnd(const struct sock *sk)
      6{
      7	struct tcp_sock *tp = tcp_sk(sk);
      8
      9	if (!tp->reord_seen) {
     10		/* If reordering has not been observed, be aggressive during
     11		 * the recovery or starting the recovery by DUPACK threshold.
     12		 */
     13		if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
     14			return 0;
     15
     16		if (tp->sacked_out >= tp->reordering &&
     17		    !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
     18			return 0;
     19	}
     20
     21	/* To be more reordering resilient, allow min_rtt/4 settling delay.
     22	 * Use min_rtt instead of the smoothed RTT because reordering is
     23	 * often a path property and less related to queuing or delayed ACKs.
     24	 * Upon receiving DSACKs, linearly increase the window up to the
     25	 * smoothed RTT.
     26	 */
     27	return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
     28		   tp->srtt_us >> 3);
     29}
     30
     31s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
     32{
     33	return tp->rack.rtt_us + reo_wnd -
     34	       tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
     35}
     36
     37/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
     38 *
     39 * Marks a packet lost, if some packet sent later has been (s)acked.
     40 * The underlying idea is similar to the traditional dupthresh and FACK
     41 * but they look at different metrics:
     42 *
     43 * dupthresh: 3 OOO packets delivered (packet count)
     44 * FACK: sequence delta to highest sacked sequence (sequence space)
     45 * RACK: sent time delta to the latest delivered packet (time domain)
     46 *
     47 * The advantage of RACK is it applies to both original and retransmitted
     48 * packet and therefore is robust against tail losses. Another advantage
     49 * is being more resilient to reordering by simply allowing some
     50 * "settling delay", instead of tweaking the dupthresh.
     51 *
     52 * When tcp_rack_detect_loss() detects some packets are lost and we
     53 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
     54 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
     55 * make us enter the CA_Recovery state.
     56 */
     57static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
     58{
     59	struct tcp_sock *tp = tcp_sk(sk);
     60	struct sk_buff *skb, *n;
     61	u32 reo_wnd;
     62
     63	*reo_timeout = 0;
     64	reo_wnd = tcp_rack_reo_wnd(sk);
     65	list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
     66				 tcp_tsorted_anchor) {
     67		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
     68		s32 remaining;
     69
     70		/* Skip ones marked lost but not yet retransmitted */
     71		if ((scb->sacked & TCPCB_LOST) &&
     72		    !(scb->sacked & TCPCB_SACKED_RETRANS))
     73			continue;
     74
     75		if (!tcp_skb_sent_after(tp->rack.mstamp,
     76					tcp_skb_timestamp_us(skb),
     77					tp->rack.end_seq, scb->end_seq))
     78			break;
     79
     80		/* A packet is lost if it has not been s/acked beyond
     81		 * the recent RTT plus the reordering window.
     82		 */
     83		remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
     84		if (remaining <= 0) {
     85			tcp_mark_skb_lost(sk, skb);
     86			list_del_init(&skb->tcp_tsorted_anchor);
     87		} else {
     88			/* Record maximum wait time */
     89			*reo_timeout = max_t(u32, *reo_timeout, remaining);
     90		}
     91	}
     92}
     93
     94bool tcp_rack_mark_lost(struct sock *sk)
     95{
     96	struct tcp_sock *tp = tcp_sk(sk);
     97	u32 timeout;
     98
     99	if (!tp->rack.advanced)
    100		return false;
    101
    102	/* Reset the advanced flag to avoid unnecessary queue scanning */
    103	tp->rack.advanced = 0;
    104	tcp_rack_detect_loss(sk, &timeout);
    105	if (timeout) {
    106		timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
    107		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
    108					  timeout, inet_csk(sk)->icsk_rto);
    109	}
    110	return !!timeout;
    111}
    112
    113/* Record the most recently (re)sent time among the (s)acked packets
    114 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
    115 * draft-cheng-tcpm-rack-00.txt
    116 */
    117void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
    118		      u64 xmit_time)
    119{
    120	u32 rtt_us;
    121
    122	rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
    123	if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
    124		/* If the sacked packet was retransmitted, it's ambiguous
    125		 * whether the retransmission or the original (or the prior
    126		 * retransmission) was sacked.
    127		 *
    128		 * If the original is lost, there is no ambiguity. Otherwise
    129		 * we assume the original can be delayed up to aRTT + min_rtt.
    130		 * the aRTT term is bounded by the fast recovery or timeout,
    131		 * so it's at least one RTT (i.e., retransmission is at least
    132		 * an RTT later).
    133		 */
    134		return;
    135	}
    136	tp->rack.advanced = 1;
    137	tp->rack.rtt_us = rtt_us;
    138	if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
    139			       end_seq, tp->rack.end_seq)) {
    140		tp->rack.mstamp = xmit_time;
    141		tp->rack.end_seq = end_seq;
    142	}
    143}
    144
    145/* We have waited long enough to accommodate reordering. Mark the expired
    146 * packets lost and retransmit them.
    147 */
    148void tcp_rack_reo_timeout(struct sock *sk)
    149{
    150	struct tcp_sock *tp = tcp_sk(sk);
    151	u32 timeout, prior_inflight;
    152	u32 lost = tp->lost;
    153
    154	prior_inflight = tcp_packets_in_flight(tp);
    155	tcp_rack_detect_loss(sk, &timeout);
    156	if (prior_inflight != tcp_packets_in_flight(tp)) {
    157		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
    158			tcp_enter_recovery(sk, false);
    159			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
    160				tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
    161		}
    162		tcp_xmit_retransmit_queue(sk);
    163	}
    164	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
    165		tcp_rearm_rto(sk);
    166}
    167
    168/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
    169 *
    170 * If a DSACK is received that seems like it may have been due to reordering
    171 * triggering fast recovery, increment reo_wnd by min_rtt/4 (upper bounded
    172 * by srtt), since there is possibility that spurious retransmission was
    173 * due to reordering delay longer than reo_wnd.
    174 *
    175 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
    176 * no. of successful recoveries (accounts for full DSACK-based loss
    177 * recovery undo). After that, reset it to default (min_rtt/4).
    178 *
    179 * At max, reo_wnd is incremented only once per rtt. So that the new
    180 * DSACK on which we are reacting, is due to the spurious retx (approx)
    181 * after the reo_wnd has been updated last time.
    182 *
    183 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
    184 * absolute value to account for change in rtt.
    185 */
    186void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
    187{
    188	struct tcp_sock *tp = tcp_sk(sk);
    189
    190	if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
    191	    !rs->prior_delivered)
    192		return;
    193
    194	/* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
    195	if (before(rs->prior_delivered, tp->rack.last_delivered))
    196		tp->rack.dsack_seen = 0;
    197
    198	/* Adjust the reo_wnd if update is pending */
    199	if (tp->rack.dsack_seen) {
    200		tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
    201					       tp->rack.reo_wnd_steps + 1);
    202		tp->rack.dsack_seen = 0;
    203		tp->rack.last_delivered = tp->delivered;
    204		tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
    205	} else if (!tp->rack.reo_wnd_persist) {
    206		tp->rack.reo_wnd_steps = 1;
    207	}
    208}
    209
    210/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
    211 * the next unacked packet upon receiving
    212 * a) three or more DUPACKs to start the fast recovery
    213 * b) an ACK acknowledging new data during the fast recovery.
    214 */
    215void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
    216{
    217	const u8 state = inet_csk(sk)->icsk_ca_state;
    218	struct tcp_sock *tp = tcp_sk(sk);
    219
    220	if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
    221	    (state == TCP_CA_Recovery && snd_una_advanced)) {
    222		struct sk_buff *skb = tcp_rtx_queue_head(sk);
    223		u32 mss;
    224
    225		if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
    226			return;
    227
    228		mss = tcp_skb_mss(skb);
    229		if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
    230			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
    231				     mss, mss, GFP_ATOMIC);
    232
    233		tcp_mark_skb_lost(sk, skb);
    234	}
    235}