cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

call_event.c (12368B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
      3 *
      4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
      5 * Written by David Howells (dhowells@redhat.com)
      6 */
      7
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/module.h>
     11#include <linux/circ_buf.h>
     12#include <linux/net.h>
     13#include <linux/skbuff.h>
     14#include <linux/slab.h>
     15#include <linux/udp.h>
     16#include <net/sock.h>
     17#include <net/af_rxrpc.h>
     18#include "ar-internal.h"
     19
     20/*
     21 * Propose a PING ACK be sent.
     22 */
     23static void rxrpc_propose_ping(struct rxrpc_call *call,
     24			       bool immediate, bool background)
     25{
     26	if (immediate) {
     27		if (background &&
     28		    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
     29			rxrpc_queue_call(call);
     30	} else {
     31		unsigned long now = jiffies;
     32		unsigned long ping_at = now + rxrpc_idle_ack_delay;
     33
     34		if (time_before(ping_at, call->ping_at)) {
     35			WRITE_ONCE(call->ping_at, ping_at);
     36			rxrpc_reduce_call_timer(call, ping_at, now,
     37						rxrpc_timer_set_for_ping);
     38		}
     39	}
     40}
     41
     42/*
     43 * propose an ACK be sent
     44 */
     45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
     46				u32 serial, bool immediate, bool background,
     47				enum rxrpc_propose_ack_trace why)
     48{
     49	enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
     50	unsigned long expiry = rxrpc_soft_ack_delay;
     51	s8 prior = rxrpc_ack_priority[ack_reason];
     52
     53	/* Pings are handled specially because we don't want to accidentally
     54	 * lose a ping response by subsuming it into a ping.
     55	 */
     56	if (ack_reason == RXRPC_ACK_PING) {
     57		rxrpc_propose_ping(call, immediate, background);
     58		goto trace;
     59	}
     60
     61	/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
     62	 * numbers, but we don't alter the timeout.
     63	 */
     64	_debug("prior %u %u vs %u %u",
     65	       ack_reason, prior,
     66	       call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
     67	if (ack_reason == call->ackr_reason) {
     68		if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
     69			outcome = rxrpc_propose_ack_update;
     70			call->ackr_serial = serial;
     71		}
     72		if (!immediate)
     73			goto trace;
     74	} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
     75		call->ackr_reason = ack_reason;
     76		call->ackr_serial = serial;
     77	} else {
     78		outcome = rxrpc_propose_ack_subsume;
     79	}
     80
     81	switch (ack_reason) {
     82	case RXRPC_ACK_REQUESTED:
     83		if (rxrpc_requested_ack_delay < expiry)
     84			expiry = rxrpc_requested_ack_delay;
     85		if (serial == 1)
     86			immediate = false;
     87		break;
     88
     89	case RXRPC_ACK_DELAY:
     90		if (rxrpc_soft_ack_delay < expiry)
     91			expiry = rxrpc_soft_ack_delay;
     92		break;
     93
     94	case RXRPC_ACK_IDLE:
     95		if (rxrpc_idle_ack_delay < expiry)
     96			expiry = rxrpc_idle_ack_delay;
     97		break;
     98
     99	default:
    100		immediate = true;
    101		break;
    102	}
    103
    104	if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
    105		_debug("already scheduled");
    106	} else if (immediate || expiry == 0) {
    107		_debug("immediate ACK %lx", call->events);
    108		if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
    109		    background)
    110			rxrpc_queue_call(call);
    111	} else {
    112		unsigned long now = jiffies, ack_at;
    113
    114		if (call->peer->srtt_us != 0)
    115			ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
    116		else
    117			ack_at = expiry;
    118
    119		ack_at += READ_ONCE(call->tx_backoff);
    120		ack_at += now;
    121		if (time_before(ack_at, call->ack_at)) {
    122			WRITE_ONCE(call->ack_at, ack_at);
    123			rxrpc_reduce_call_timer(call, ack_at, now,
    124						rxrpc_timer_set_for_ack);
    125		}
    126	}
    127
    128trace:
    129	trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
    130				background, outcome);
    131}
    132
    133/*
    134 * propose an ACK be sent, locking the call structure
    135 */
    136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
    137		       u32 serial, bool immediate, bool background,
    138		       enum rxrpc_propose_ack_trace why)
    139{
    140	spin_lock_bh(&call->lock);
    141	__rxrpc_propose_ACK(call, ack_reason, serial,
    142			    immediate, background, why);
    143	spin_unlock_bh(&call->lock);
    144}
    145
    146/*
    147 * Handle congestion being detected by the retransmit timeout.
    148 */
    149static void rxrpc_congestion_timeout(struct rxrpc_call *call)
    150{
    151	set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
    152}
    153
    154/*
    155 * Perform retransmission of NAK'd and unack'd packets.
    156 */
    157static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
    158{
    159	struct sk_buff *skb;
    160	unsigned long resend_at;
    161	rxrpc_seq_t cursor, seq, top;
    162	ktime_t now, max_age, oldest, ack_ts;
    163	int ix;
    164	u8 annotation, anno_type, retrans = 0, unacked = 0;
    165
    166	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
    167
    168	now = ktime_get_real();
    169	max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
    170
    171	spin_lock_bh(&call->lock);
    172
    173	cursor = call->tx_hard_ack;
    174	top = call->tx_top;
    175	ASSERT(before_eq(cursor, top));
    176	if (cursor == top)
    177		goto out_unlock;
    178
    179	/* Scan the packet list without dropping the lock and decide which of
    180	 * the packets in the Tx buffer we're going to resend and what the new
    181	 * resend timeout will be.
    182	 */
    183	trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
    184	oldest = now;
    185	for (seq = cursor + 1; before_eq(seq, top); seq++) {
    186		ix = seq & RXRPC_RXTX_BUFF_MASK;
    187		annotation = call->rxtx_annotations[ix];
    188		anno_type = annotation & RXRPC_TX_ANNO_MASK;
    189		annotation &= ~RXRPC_TX_ANNO_MASK;
    190		if (anno_type == RXRPC_TX_ANNO_ACK)
    191			continue;
    192
    193		skb = call->rxtx_buffer[ix];
    194		rxrpc_see_skb(skb, rxrpc_skb_seen);
    195
    196		if (anno_type == RXRPC_TX_ANNO_UNACK) {
    197			if (ktime_after(skb->tstamp, max_age)) {
    198				if (ktime_before(skb->tstamp, oldest))
    199					oldest = skb->tstamp;
    200				continue;
    201			}
    202			if (!(annotation & RXRPC_TX_ANNO_RESENT))
    203				unacked++;
    204		}
    205
    206		/* Okay, we need to retransmit a packet. */
    207		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
    208		retrans++;
    209		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
    210				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
    211	}
    212
    213	resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
    214	resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
    215	WRITE_ONCE(call->resend_at, resend_at);
    216
    217	if (unacked)
    218		rxrpc_congestion_timeout(call);
    219
    220	/* If there was nothing that needed retransmission then it's likely
    221	 * that an ACK got lost somewhere.  Send a ping to find out instead of
    222	 * retransmitting data.
    223	 */
    224	if (!retrans) {
    225		rxrpc_reduce_call_timer(call, resend_at, now_j,
    226					rxrpc_timer_set_for_resend);
    227		spin_unlock_bh(&call->lock);
    228		ack_ts = ktime_sub(now, call->acks_latest_ts);
    229		if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
    230			goto out;
    231		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
    232				  rxrpc_propose_ack_ping_for_lost_ack);
    233		rxrpc_send_ack_packet(call, true, NULL);
    234		goto out;
    235	}
    236
    237	/* Now go through the Tx window and perform the retransmissions.  We
    238	 * have to drop the lock for each send.  If an ACK comes in whilst the
    239	 * lock is dropped, it may clear some of the retransmission markers for
    240	 * packets that it soft-ACKs.
    241	 */
    242	for (seq = cursor + 1; before_eq(seq, top); seq++) {
    243		ix = seq & RXRPC_RXTX_BUFF_MASK;
    244		annotation = call->rxtx_annotations[ix];
    245		anno_type = annotation & RXRPC_TX_ANNO_MASK;
    246		if (anno_type != RXRPC_TX_ANNO_RETRANS)
    247			continue;
    248
    249		/* We need to reset the retransmission state, but we need to do
    250		 * so before we drop the lock as a new ACK/NAK may come in and
    251		 * confuse things
    252		 */
    253		annotation &= ~RXRPC_TX_ANNO_MASK;
    254		annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
    255		call->rxtx_annotations[ix] = annotation;
    256
    257		skb = call->rxtx_buffer[ix];
    258		if (!skb)
    259			continue;
    260
    261		rxrpc_get_skb(skb, rxrpc_skb_got);
    262		spin_unlock_bh(&call->lock);
    263
    264		if (rxrpc_send_data_packet(call, skb, true) < 0) {
    265			rxrpc_free_skb(skb, rxrpc_skb_freed);
    266			return;
    267		}
    268
    269		if (rxrpc_is_client_call(call))
    270			rxrpc_expose_client_call(call);
    271
    272		rxrpc_free_skb(skb, rxrpc_skb_freed);
    273		spin_lock_bh(&call->lock);
    274		if (after(call->tx_hard_ack, seq))
    275			seq = call->tx_hard_ack;
    276	}
    277
    278out_unlock:
    279	spin_unlock_bh(&call->lock);
    280out:
    281	_leave("");
    282}
    283
    284/*
    285 * Handle retransmission and deferred ACK/abort generation.
    286 */
    287void rxrpc_process_call(struct work_struct *work)
    288{
    289	struct rxrpc_call *call =
    290		container_of(work, struct rxrpc_call, processor);
    291	rxrpc_serial_t *send_ack;
    292	unsigned long now, next, t;
    293	unsigned int iterations = 0;
    294
    295	rxrpc_see_call(call);
    296
    297	//printk("\n--------------------\n");
    298	_enter("{%d,%s,%lx}",
    299	       call->debug_id, rxrpc_call_states[call->state], call->events);
    300
    301recheck_state:
    302	/* Limit the number of times we do this before returning to the manager */
    303	iterations++;
    304	if (iterations > 5)
    305		goto requeue;
    306
    307	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
    308		rxrpc_send_abort_packet(call);
    309		goto recheck_state;
    310	}
    311
    312	if (call->state == RXRPC_CALL_COMPLETE) {
    313		rxrpc_delete_call_timer(call);
    314		goto out_put;
    315	}
    316
    317	/* Work out if any timeouts tripped */
    318	now = jiffies;
    319	t = READ_ONCE(call->expect_rx_by);
    320	if (time_after_eq(now, t)) {
    321		trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
    322		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
    323	}
    324
    325	t = READ_ONCE(call->expect_req_by);
    326	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
    327	    time_after_eq(now, t)) {
    328		trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
    329		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
    330	}
    331
    332	t = READ_ONCE(call->expect_term_by);
    333	if (time_after_eq(now, t)) {
    334		trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
    335		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
    336	}
    337
    338	t = READ_ONCE(call->ack_at);
    339	if (time_after_eq(now, t)) {
    340		trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
    341		cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
    342		set_bit(RXRPC_CALL_EV_ACK, &call->events);
    343	}
    344
    345	t = READ_ONCE(call->ack_lost_at);
    346	if (time_after_eq(now, t)) {
    347		trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
    348		cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
    349		set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
    350	}
    351
    352	t = READ_ONCE(call->keepalive_at);
    353	if (time_after_eq(now, t)) {
    354		trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
    355		cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
    356		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
    357				  rxrpc_propose_ack_ping_for_keepalive);
    358		set_bit(RXRPC_CALL_EV_PING, &call->events);
    359	}
    360
    361	t = READ_ONCE(call->ping_at);
    362	if (time_after_eq(now, t)) {
    363		trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
    364		cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
    365		set_bit(RXRPC_CALL_EV_PING, &call->events);
    366	}
    367
    368	t = READ_ONCE(call->resend_at);
    369	if (time_after_eq(now, t)) {
    370		trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
    371		cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
    372		set_bit(RXRPC_CALL_EV_RESEND, &call->events);
    373	}
    374
    375	/* Process events */
    376	if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
    377		if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
    378		    (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
    379			trace_rxrpc_call_reset(call);
    380			rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
    381		} else {
    382			rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
    383		}
    384		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
    385		goto recheck_state;
    386	}
    387
    388	send_ack = NULL;
    389	if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
    390		call->acks_lost_top = call->tx_top;
    391		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
    392				  rxrpc_propose_ack_ping_for_lost_ack);
    393		send_ack = &call->acks_lost_ping;
    394	}
    395
    396	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
    397	    send_ack) {
    398		if (call->ackr_reason) {
    399			rxrpc_send_ack_packet(call, false, send_ack);
    400			goto recheck_state;
    401		}
    402	}
    403
    404	if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
    405		rxrpc_send_ack_packet(call, true, NULL);
    406		goto recheck_state;
    407	}
    408
    409	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) &&
    410	    call->state != RXRPC_CALL_CLIENT_RECV_REPLY) {
    411		rxrpc_resend(call, now);
    412		goto recheck_state;
    413	}
    414
    415	/* Make sure the timer is restarted */
    416	next = call->expect_rx_by;
    417
    418#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
    419
    420	set(call->expect_req_by);
    421	set(call->expect_term_by);
    422	set(call->ack_at);
    423	set(call->ack_lost_at);
    424	set(call->resend_at);
    425	set(call->keepalive_at);
    426	set(call->ping_at);
    427
    428	now = jiffies;
    429	if (time_after_eq(now, next))
    430		goto recheck_state;
    431
    432	rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
    433
    434	/* other events may have been raised since we started checking */
    435	if (call->events && call->state < RXRPC_CALL_COMPLETE)
    436		goto requeue;
    437
    438out_put:
    439	rxrpc_put_call(call, rxrpc_call_put);
    440out:
    441	_leave("");
    442	return;
    443
    444requeue:
    445	__rxrpc_queue_call(call);
    446	goto out;
    447}