cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

minisocks.c (7552B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *  net/dccp/minisocks.c
      4 *
      5 *  An implementation of the DCCP protocol
      6 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
      7 */
      8
      9#include <linux/dccp.h>
     10#include <linux/gfp.h>
     11#include <linux/kernel.h>
     12#include <linux/skbuff.h>
     13#include <linux/timer.h>
     14
     15#include <net/sock.h>
     16#include <net/xfrm.h>
     17#include <net/inet_timewait_sock.h>
     18
     19#include "ackvec.h"
     20#include "ccid.h"
     21#include "dccp.h"
     22#include "feat.h"
     23
     24struct inet_timewait_death_row dccp_death_row = {
     25	.tw_refcount = REFCOUNT_INIT(1),
     26	.sysctl_max_tw_buckets = NR_FILE * 2,
     27	.hashinfo	= &dccp_hashinfo,
     28};
     29
     30EXPORT_SYMBOL_GPL(dccp_death_row);
     31
     32void dccp_time_wait(struct sock *sk, int state, int timeo)
     33{
     34	struct inet_timewait_sock *tw;
     35
     36	tw = inet_twsk_alloc(sk, &dccp_death_row, state);
     37
     38	if (tw != NULL) {
     39		const struct inet_connection_sock *icsk = inet_csk(sk);
     40		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
     41#if IS_ENABLED(CONFIG_IPV6)
     42		if (tw->tw_family == PF_INET6) {
     43			tw->tw_v6_daddr = sk->sk_v6_daddr;
     44			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
     45			tw->tw_ipv6only = sk->sk_ipv6only;
     46		}
     47#endif
     48
     49		/* Get the TIME_WAIT timeout firing. */
     50		if (timeo < rto)
     51			timeo = rto;
     52
     53		if (state == DCCP_TIME_WAIT)
     54			timeo = DCCP_TIMEWAIT_LEN;
     55
     56		/* tw_timer is pinned, so we need to make sure BH are disabled
     57		 * in following section, otherwise timer handler could run before
     58		 * we complete the initialization.
     59		 */
     60		local_bh_disable();
     61		inet_twsk_schedule(tw, timeo);
     62		/* Linkage updates.
     63		 * Note that access to tw after this point is illegal.
     64		 */
     65		inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
     66		local_bh_enable();
     67	} else {
     68		/* Sorry, if we're out of memory, just CLOSE this
     69		 * socket up.  We've got bigger problems than
     70		 * non-graceful socket closings.
     71		 */
     72		DCCP_WARN("time wait bucket table overflow\n");
     73	}
     74
     75	dccp_done(sk);
     76}
     77
     78struct sock *dccp_create_openreq_child(const struct sock *sk,
     79				       const struct request_sock *req,
     80				       const struct sk_buff *skb)
     81{
     82	/*
     83	 * Step 3: Process LISTEN state
     84	 *
     85	 *   (* Generate a new socket and switch to that socket *)
     86	 *   Set S := new socket for this port pair
     87	 */
     88	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
     89
     90	if (newsk != NULL) {
     91		struct dccp_request_sock *dreq = dccp_rsk(req);
     92		struct inet_connection_sock *newicsk = inet_csk(newsk);
     93		struct dccp_sock *newdp = dccp_sk(newsk);
     94
     95		newdp->dccps_role	    = DCCP_ROLE_SERVER;
     96		newdp->dccps_hc_rx_ackvec   = NULL;
     97		newdp->dccps_service_list   = NULL;
     98		newdp->dccps_hc_rx_ccid     = NULL;
     99		newdp->dccps_hc_tx_ccid     = NULL;
    100		newdp->dccps_service	    = dreq->dreq_service;
    101		newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
    102		newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
    103		newicsk->icsk_rto	    = DCCP_TIMEOUT_INIT;
    104
    105		INIT_LIST_HEAD(&newdp->dccps_featneg);
    106		/*
    107		 * Step 3: Process LISTEN state
    108		 *
    109		 *    Choose S.ISS (initial seqno) or set from Init Cookies
    110		 *    Initialize S.GAR := S.ISS
    111		 *    Set S.ISR, S.GSR from packet (or Init Cookies)
    112		 *
    113		 *    Setting AWL/AWH and SWL/SWH happens as part of the feature
    114		 *    activation below, as these windows all depend on the local
    115		 *    and remote Sequence Window feature values (7.5.2).
    116		 */
    117		newdp->dccps_iss = dreq->dreq_iss;
    118		newdp->dccps_gss = dreq->dreq_gss;
    119		newdp->dccps_gar = newdp->dccps_iss;
    120		newdp->dccps_isr = dreq->dreq_isr;
    121		newdp->dccps_gsr = dreq->dreq_gsr;
    122
    123		/*
    124		 * Activate features: initialise CCIDs, sequence windows etc.
    125		 */
    126		if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) {
    127			sk_free_unlock_clone(newsk);
    128			return NULL;
    129		}
    130		dccp_init_xmit_timers(newsk);
    131
    132		__DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
    133	}
    134	return newsk;
    135}
    136
    137EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
    138
    139/*
    140 * Process an incoming packet for RESPOND sockets represented
    141 * as an request_sock.
    142 */
    143struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
    144			    struct request_sock *req)
    145{
    146	struct sock *child = NULL;
    147	struct dccp_request_sock *dreq = dccp_rsk(req);
    148	bool own_req;
    149
    150	/* TCP/DCCP listeners became lockless.
    151	 * DCCP stores complex state in its request_sock, so we need
    152	 * a protection for them, now this code runs without being protected
    153	 * by the parent (listener) lock.
    154	 */
    155	spin_lock_bh(&dreq->dreq_lock);
    156
    157	/* Check for retransmitted REQUEST */
    158	if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
    159
    160		if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) {
    161			dccp_pr_debug("Retransmitted REQUEST\n");
    162			dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq;
    163			/*
    164			 * Send another RESPONSE packet
    165			 * To protect against Request floods, increment retrans
    166			 * counter (backoff, monitored by dccp_response_timer).
    167			 */
    168			inet_rtx_syn_ack(sk, req);
    169		}
    170		/* Network Duplicate, discard packet */
    171		goto out;
    172	}
    173
    174	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
    175
    176	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
    177	    dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
    178		goto drop;
    179
    180	/* Invalid ACK */
    181	if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
    182				dreq->dreq_iss, dreq->dreq_gss)) {
    183		dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
    184			      "dreq_iss=%llu, dreq_gss=%llu\n",
    185			      (unsigned long long)
    186			      DCCP_SKB_CB(skb)->dccpd_ack_seq,
    187			      (unsigned long long) dreq->dreq_iss,
    188			      (unsigned long long) dreq->dreq_gss);
    189		goto drop;
    190	}
    191
    192	if (dccp_parse_options(sk, dreq, skb))
    193		 goto drop;
    194
    195	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
    196							 req, &own_req);
    197	if (child) {
    198		child = inet_csk_complete_hashdance(sk, child, req, own_req);
    199		goto out;
    200	}
    201
    202	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
    203drop:
    204	if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
    205		req->rsk_ops->send_reset(sk, skb);
    206
    207	inet_csk_reqsk_queue_drop(sk, req);
    208out:
    209	spin_unlock_bh(&dreq->dreq_lock);
    210	return child;
    211}
    212
    213EXPORT_SYMBOL_GPL(dccp_check_req);
    214
    215/*
    216 *  Queue segment on the new socket if the new socket is active,
    217 *  otherwise we just shortcircuit this and continue with
    218 *  the new socket.
    219 */
    220int dccp_child_process(struct sock *parent, struct sock *child,
    221		       struct sk_buff *skb)
    222	__releases(child)
    223{
    224	int ret = 0;
    225	const int state = child->sk_state;
    226
    227	if (!sock_owned_by_user(child)) {
    228		ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
    229					     skb->len);
    230
    231		/* Wakeup parent, send SIGIO */
    232		if (state == DCCP_RESPOND && child->sk_state != state)
    233			parent->sk_data_ready(parent);
    234	} else {
    235		/* Alas, it is possible again, because we do lookup
    236		 * in main socket hash table and lock on listening
    237		 * socket does not protect us more.
    238		 */
    239		__sk_add_backlog(child, skb);
    240	}
    241
    242	bh_unlock_sock(child);
    243	sock_put(child);
    244	return ret;
    245}
    246
    247EXPORT_SYMBOL_GPL(dccp_child_process);
    248
    249void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
    250			 struct request_sock *rsk)
    251{
    252	DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
    253}
    254
    255EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
    256
    257int dccp_reqsk_init(struct request_sock *req,
    258		    struct dccp_sock const *dp, struct sk_buff const *skb)
    259{
    260	struct dccp_request_sock *dreq = dccp_rsk(req);
    261
    262	spin_lock_init(&dreq->dreq_lock);
    263	inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
    264	inet_rsk(req)->ir_num	   = ntohs(dccp_hdr(skb)->dccph_dport);
    265	inet_rsk(req)->acked	   = 0;
    266	dreq->dreq_timestamp_echo  = 0;
    267
    268	/* inherit feature negotiation options from listening socket */
    269	return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
    270}
    271
    272EXPORT_SYMBOL_GPL(dccp_reqsk_init);