cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

output.c (24455B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* SCTP kernel implementation
      3 * (C) Copyright IBM Corp. 2001, 2004
      4 * Copyright (c) 1999-2000 Cisco, Inc.
      5 * Copyright (c) 1999-2001 Motorola, Inc.
      6 *
      7 * This file is part of the SCTP kernel implementation
      8 *
      9 * These functions handle output processing.
     10 *
     11 * Please send any bug reports or fixes you make to the
     12 * email address(es):
     13 *    lksctp developers <linux-sctp@vger.kernel.org>
     14 *
     15 * Written or modified by:
     16 *    La Monte H.P. Yarroll <piggy@acm.org>
     17 *    Karl Knutson          <karl@athena.chicago.il.us>
     18 *    Jon Grimm             <jgrimm@austin.ibm.com>
     19 *    Sridhar Samudrala     <sri@us.ibm.com>
     20 */
     21
     22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     23
     24#include <linux/types.h>
     25#include <linux/kernel.h>
     26#include <linux/wait.h>
     27#include <linux/time.h>
     28#include <linux/ip.h>
     29#include <linux/ipv6.h>
     30#include <linux/init.h>
     31#include <linux/slab.h>
     32#include <net/inet_ecn.h>
     33#include <net/ip.h>
     34#include <net/icmp.h>
     35#include <net/net_namespace.h>
     36
     37#include <linux/socket.h> /* for sa_family_t */
     38#include <net/sock.h>
     39
     40#include <net/sctp/sctp.h>
     41#include <net/sctp/sm.h>
     42#include <net/sctp/checksum.h>
     43
     44/* Forward declarations for private helpers. */
     45static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
     46						 struct sctp_chunk *chunk);
     47static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
     48						  struct sctp_chunk *chunk);
     49static void sctp_packet_append_data(struct sctp_packet *packet,
     50				    struct sctp_chunk *chunk);
     51static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
     52					   struct sctp_chunk *chunk,
     53					   u16 chunk_len);
     54
     55static void sctp_packet_reset(struct sctp_packet *packet)
     56{
     57	/* sctp_packet_transmit() relies on this to reset size to the
     58	 * current overhead after sending packets.
     59	 */
     60	packet->size = packet->overhead;
     61
     62	packet->has_cookie_echo = 0;
     63	packet->has_sack = 0;
     64	packet->has_data = 0;
     65	packet->has_auth = 0;
     66	packet->ipfragok = 0;
     67	packet->auth = NULL;
     68}
     69
     70/* Config a packet.
     71 * This appears to be a followup set of initializations.
     72 */
     73void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
     74			int ecn_capable)
     75{
     76	struct sctp_transport *tp = packet->transport;
     77	struct sctp_association *asoc = tp->asoc;
     78	struct sctp_sock *sp = NULL;
     79	struct sock *sk;
     80
     81	pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
     82	packet->vtag = vtag;
     83
     84	/* do the following jobs only once for a flush schedule */
     85	if (!sctp_packet_empty(packet))
     86		return;
     87
     88	/* set packet max_size with pathmtu, then calculate overhead */
     89	packet->max_size = tp->pathmtu;
     90
     91	if (asoc) {
     92		sk = asoc->base.sk;
     93		sp = sctp_sk(sk);
     94	}
     95	packet->overhead = sctp_mtu_payload(sp, 0, 0);
     96	packet->size = packet->overhead;
     97
     98	if (!asoc)
     99		return;
    100
    101	/* update dst or transport pathmtu if in need */
    102	if (!sctp_transport_dst_check(tp)) {
    103		sctp_transport_route(tp, NULL, sp);
    104		if (asoc->param_flags & SPP_PMTUD_ENABLE)
    105			sctp_assoc_sync_pmtu(asoc);
    106	} else if (!sctp_transport_pl_enabled(tp) &&
    107		   asoc->param_flags & SPP_PMTUD_ENABLE) {
    108		if (!sctp_transport_pmtu_check(tp))
    109			sctp_assoc_sync_pmtu(asoc);
    110	}
    111
    112	if (asoc->pmtu_pending) {
    113		if (asoc->param_flags & SPP_PMTUD_ENABLE)
    114			sctp_assoc_sync_pmtu(asoc);
    115		asoc->pmtu_pending = 0;
    116	}
    117
    118	/* If there a is a prepend chunk stick it on the list before
    119	 * any other chunks get appended.
    120	 */
    121	if (ecn_capable) {
    122		struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
    123
    124		if (chunk)
    125			sctp_packet_append_chunk(packet, chunk);
    126	}
    127
    128	if (!tp->dst)
    129		return;
    130
    131	/* set packet max_size with gso_max_size if gso is enabled*/
    132	rcu_read_lock();
    133	if (__sk_dst_get(sk) != tp->dst) {
    134		dst_hold(tp->dst);
    135		sk_setup_caps(sk, tp->dst);
    136	}
    137	packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
    138						GSO_LEGACY_MAX_SIZE)
    139					  : asoc->pathmtu;
    140	rcu_read_unlock();
    141}
    142
    143/* Initialize the packet structure. */
    144void sctp_packet_init(struct sctp_packet *packet,
    145		      struct sctp_transport *transport,
    146		      __u16 sport, __u16 dport)
    147{
    148	pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
    149
    150	packet->transport = transport;
    151	packet->source_port = sport;
    152	packet->destination_port = dport;
    153	INIT_LIST_HEAD(&packet->chunk_list);
    154	/* The overhead will be calculated by sctp_packet_config() */
    155	packet->overhead = 0;
    156	sctp_packet_reset(packet);
    157	packet->vtag = 0;
    158}
    159
    160/* Free a packet.  */
    161void sctp_packet_free(struct sctp_packet *packet)
    162{
    163	struct sctp_chunk *chunk, *tmp;
    164
    165	pr_debug("%s: packet:%p\n", __func__, packet);
    166
    167	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
    168		list_del_init(&chunk->list);
    169		sctp_chunk_free(chunk);
    170	}
    171}
    172
    173/* This routine tries to append the chunk to the offered packet. If adding
    174 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
    175 * is not present in the packet, it transmits the input packet.
    176 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
    177 * as it can fit in the packet, but any more data that does not fit in this
    178 * packet can be sent only after receiving the COOKIE_ACK.
    179 */
    180enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
    181					  struct sctp_chunk *chunk,
    182					  int one_packet, gfp_t gfp)
    183{
    184	enum sctp_xmit retval;
    185
    186	pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
    187		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
    188
    189	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
    190	case SCTP_XMIT_PMTU_FULL:
    191		if (!packet->has_cookie_echo) {
    192			int error = 0;
    193
    194			error = sctp_packet_transmit(packet, gfp);
    195			if (error < 0)
    196				chunk->skb->sk->sk_err = -error;
    197
    198			/* If we have an empty packet, then we can NOT ever
    199			 * return PMTU_FULL.
    200			 */
    201			if (!one_packet)
    202				retval = sctp_packet_append_chunk(packet,
    203								  chunk);
    204		}
    205		break;
    206
    207	case SCTP_XMIT_RWND_FULL:
    208	case SCTP_XMIT_OK:
    209	case SCTP_XMIT_DELAY:
    210		break;
    211	}
    212
    213	return retval;
    214}
    215
    216/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
    217static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
    218{
    219	struct sctp_transport *t = pkt->transport;
    220	struct sctp_chunk *pad;
    221	int overhead = 0;
    222
    223	if (!chunk->pmtu_probe)
    224		return SCTP_XMIT_OK;
    225
    226	/* calculate the Padding Data size for the pad chunk */
    227	overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
    228	overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
    229	pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
    230	if (!pad)
    231		return SCTP_XMIT_DELAY;
    232
    233	list_add_tail(&pad->list, &pkt->chunk_list);
    234	pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
    235	chunk->transport = t;
    236
    237	return SCTP_XMIT_OK;
    238}
    239
    240/* Try to bundle an auth chunk into the packet. */
    241static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
    242					      struct sctp_chunk *chunk)
    243{
    244	struct sctp_association *asoc = pkt->transport->asoc;
    245	enum sctp_xmit retval = SCTP_XMIT_OK;
    246	struct sctp_chunk *auth;
    247
    248	/* if we don't have an association, we can't do authentication */
    249	if (!asoc)
    250		return retval;
    251
    252	/* See if this is an auth chunk we are bundling or if
    253	 * auth is already bundled.
    254	 */
    255	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
    256		return retval;
    257
    258	/* if the peer did not request this chunk to be authenticated,
    259	 * don't do it
    260	 */
    261	if (!chunk->auth)
    262		return retval;
    263
    264	auth = sctp_make_auth(asoc, chunk->shkey->key_id);
    265	if (!auth)
    266		return retval;
    267
    268	auth->shkey = chunk->shkey;
    269	sctp_auth_shkey_hold(auth->shkey);
    270
    271	retval = __sctp_packet_append_chunk(pkt, auth);
    272
    273	if (retval != SCTP_XMIT_OK)
    274		sctp_chunk_free(auth);
    275
    276	return retval;
    277}
    278
    279/* Try to bundle a SACK with the packet. */
    280static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
    281					      struct sctp_chunk *chunk)
    282{
    283	enum sctp_xmit retval = SCTP_XMIT_OK;
    284
    285	/* If sending DATA and haven't aleady bundled a SACK, try to
    286	 * bundle one in to the packet.
    287	 */
    288	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
    289	    !pkt->has_cookie_echo) {
    290		struct sctp_association *asoc;
    291		struct timer_list *timer;
    292		asoc = pkt->transport->asoc;
    293		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
    294
    295		/* If the SACK timer is running, we have a pending SACK */
    296		if (timer_pending(timer)) {
    297			struct sctp_chunk *sack;
    298
    299			if (pkt->transport->sack_generation !=
    300			    pkt->transport->asoc->peer.sack_generation)
    301				return retval;
    302
    303			asoc->a_rwnd = asoc->rwnd;
    304			sack = sctp_make_sack(asoc);
    305			if (sack) {
    306				retval = __sctp_packet_append_chunk(pkt, sack);
    307				if (retval != SCTP_XMIT_OK) {
    308					sctp_chunk_free(sack);
    309					goto out;
    310				}
    311				SCTP_INC_STATS(asoc->base.net,
    312					       SCTP_MIB_OUTCTRLCHUNKS);
    313				asoc->stats.octrlchunks++;
    314				asoc->peer.sack_needed = 0;
    315				if (del_timer(timer))
    316					sctp_association_put(asoc);
    317			}
    318		}
    319	}
    320out:
    321	return retval;
    322}
    323
    324
    325/* Append a chunk to the offered packet reporting back any inability to do
    326 * so.
    327 */
    328static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
    329						 struct sctp_chunk *chunk)
    330{
    331	__u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
    332	enum sctp_xmit retval = SCTP_XMIT_OK;
    333
    334	/* Check to see if this chunk will fit into the packet */
    335	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
    336	if (retval != SCTP_XMIT_OK)
    337		goto finish;
    338
    339	/* We believe that this chunk is OK to add to the packet */
    340	switch (chunk->chunk_hdr->type) {
    341	case SCTP_CID_DATA:
    342	case SCTP_CID_I_DATA:
    343		/* Account for the data being in the packet */
    344		sctp_packet_append_data(packet, chunk);
    345		/* Disallow SACK bundling after DATA. */
    346		packet->has_sack = 1;
    347		/* Disallow AUTH bundling after DATA */
    348		packet->has_auth = 1;
    349		/* Let it be knows that packet has DATA in it */
    350		packet->has_data = 1;
    351		/* timestamp the chunk for rtx purposes */
    352		chunk->sent_at = jiffies;
    353		/* Mainly used for prsctp RTX policy */
    354		chunk->sent_count++;
    355		break;
    356	case SCTP_CID_COOKIE_ECHO:
    357		packet->has_cookie_echo = 1;
    358		break;
    359
    360	case SCTP_CID_SACK:
    361		packet->has_sack = 1;
    362		if (chunk->asoc)
    363			chunk->asoc->stats.osacks++;
    364		break;
    365
    366	case SCTP_CID_AUTH:
    367		packet->has_auth = 1;
    368		packet->auth = chunk;
    369		break;
    370	}
    371
    372	/* It is OK to send this chunk.  */
    373	list_add_tail(&chunk->list, &packet->chunk_list);
    374	packet->size += chunk_len;
    375	chunk->transport = packet->transport;
    376finish:
    377	return retval;
    378}
    379
    380/* Append a chunk to the offered packet reporting back any inability to do
    381 * so.
    382 */
    383enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
    384					struct sctp_chunk *chunk)
    385{
    386	enum sctp_xmit retval = SCTP_XMIT_OK;
    387
    388	pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
    389
    390	/* Data chunks are special.  Before seeing what else we can
    391	 * bundle into this packet, check to see if we are allowed to
    392	 * send this DATA.
    393	 */
    394	if (sctp_chunk_is_data(chunk)) {
    395		retval = sctp_packet_can_append_data(packet, chunk);
    396		if (retval != SCTP_XMIT_OK)
    397			goto finish;
    398	}
    399
    400	/* Try to bundle AUTH chunk */
    401	retval = sctp_packet_bundle_auth(packet, chunk);
    402	if (retval != SCTP_XMIT_OK)
    403		goto finish;
    404
    405	/* Try to bundle SACK chunk */
    406	retval = sctp_packet_bundle_sack(packet, chunk);
    407	if (retval != SCTP_XMIT_OK)
    408		goto finish;
    409
    410	retval = __sctp_packet_append_chunk(packet, chunk);
    411	if (retval != SCTP_XMIT_OK)
    412		goto finish;
    413
    414	retval = sctp_packet_bundle_pad(packet, chunk);
    415
    416finish:
    417	return retval;
    418}
    419
    420static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
    421{
    422	if (SCTP_OUTPUT_CB(head)->last == head)
    423		skb_shinfo(head)->frag_list = skb;
    424	else
    425		SCTP_OUTPUT_CB(head)->last->next = skb;
    426	SCTP_OUTPUT_CB(head)->last = skb;
    427
    428	head->truesize += skb->truesize;
    429	head->data_len += skb->len;
    430	head->len += skb->len;
    431	refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
    432
    433	__skb_header_release(skb);
    434}
    435
    436static int sctp_packet_pack(struct sctp_packet *packet,
    437			    struct sk_buff *head, int gso, gfp_t gfp)
    438{
    439	struct sctp_transport *tp = packet->transport;
    440	struct sctp_auth_chunk *auth = NULL;
    441	struct sctp_chunk *chunk, *tmp;
    442	int pkt_count = 0, pkt_size;
    443	struct sock *sk = head->sk;
    444	struct sk_buff *nskb;
    445	int auth_len = 0;
    446
    447	if (gso) {
    448		skb_shinfo(head)->gso_type = sk->sk_gso_type;
    449		SCTP_OUTPUT_CB(head)->last = head;
    450	} else {
    451		nskb = head;
    452		pkt_size = packet->size;
    453		goto merge;
    454	}
    455
    456	do {
    457		/* calculate the pkt_size and alloc nskb */
    458		pkt_size = packet->overhead;
    459		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
    460					 list) {
    461			int padded = SCTP_PAD4(chunk->skb->len);
    462
    463			if (chunk == packet->auth)
    464				auth_len = padded;
    465			else if (auth_len + padded + packet->overhead >
    466				 tp->pathmtu)
    467				return 0;
    468			else if (pkt_size + padded > tp->pathmtu)
    469				break;
    470			pkt_size += padded;
    471		}
    472		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
    473		if (!nskb)
    474			return 0;
    475		skb_reserve(nskb, packet->overhead + MAX_HEADER);
    476
    477merge:
    478		/* merge chunks into nskb and append nskb into head list */
    479		pkt_size -= packet->overhead;
    480		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
    481			int padding;
    482
    483			list_del_init(&chunk->list);
    484			if (sctp_chunk_is_data(chunk)) {
    485				if (!sctp_chunk_retransmitted(chunk) &&
    486				    !tp->rto_pending) {
    487					chunk->rtt_in_progress = 1;
    488					tp->rto_pending = 1;
    489				}
    490			}
    491
    492			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
    493			if (padding)
    494				skb_put_zero(chunk->skb, padding);
    495
    496			if (chunk == packet->auth)
    497				auth = (struct sctp_auth_chunk *)
    498							skb_tail_pointer(nskb);
    499
    500			skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
    501
    502			pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
    503				 chunk,
    504				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
    505				 chunk->has_tsn ? "TSN" : "No TSN",
    506				 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
    507				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
    508				 chunk->rtt_in_progress);
    509
    510			pkt_size -= SCTP_PAD4(chunk->skb->len);
    511
    512			if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
    513				sctp_chunk_free(chunk);
    514
    515			if (!pkt_size)
    516				break;
    517		}
    518
    519		if (auth) {
    520			sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
    521						 packet->auth->shkey, gfp);
    522			/* free auth if no more chunks, or add it back */
    523			if (list_empty(&packet->chunk_list))
    524				sctp_chunk_free(packet->auth);
    525			else
    526				list_add(&packet->auth->list,
    527					 &packet->chunk_list);
    528		}
    529
    530		if (gso)
    531			sctp_packet_gso_append(head, nskb);
    532
    533		pkt_count++;
    534	} while (!list_empty(&packet->chunk_list));
    535
    536	if (gso) {
    537		memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
    538					sizeof(struct inet6_skb_parm)));
    539		skb_shinfo(head)->gso_segs = pkt_count;
    540		skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
    541		goto chksum;
    542	}
    543
    544	if (sctp_checksum_disable)
    545		return 1;
    546
    547	if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
    548	    dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
    549		struct sctphdr *sh =
    550			(struct sctphdr *)skb_transport_header(head);
    551
    552		sh->checksum = sctp_compute_cksum(head, 0);
    553	} else {
    554chksum:
    555		head->ip_summed = CHECKSUM_PARTIAL;
    556		head->csum_not_inet = 1;
    557		head->csum_start = skb_transport_header(head) - head->head;
    558		head->csum_offset = offsetof(struct sctphdr, checksum);
    559	}
    560
    561	return pkt_count;
    562}
    563
    564/* All packets are sent to the network through this function from
    565 * sctp_outq_tail().
    566 *
    567 * The return value is always 0 for now.
    568 */
    569int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
    570{
    571	struct sctp_transport *tp = packet->transport;
    572	struct sctp_association *asoc = tp->asoc;
    573	struct sctp_chunk *chunk, *tmp;
    574	int pkt_count, gso = 0;
    575	struct sk_buff *head;
    576	struct sctphdr *sh;
    577	struct sock *sk;
    578
    579	pr_debug("%s: packet:%p\n", __func__, packet);
    580	if (list_empty(&packet->chunk_list))
    581		return 0;
    582	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
    583	sk = chunk->skb->sk;
    584
    585	if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
    586		if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */
    587			packet->ipfragok = 1;
    588		} else {
    589			if (!sk_can_gso(sk)) { /* check gso */
    590				pr_err_once("Trying to GSO but underlying device doesn't support it.");
    591				goto out;
    592			}
    593			gso = 1;
    594		}
    595	}
    596
    597	/* alloc head skb */
    598	head = alloc_skb((gso ? packet->overhead : packet->size) +
    599			 MAX_HEADER, gfp);
    600	if (!head)
    601		goto out;
    602	skb_reserve(head, packet->overhead + MAX_HEADER);
    603	skb_set_owner_w(head, sk);
    604
    605	/* set sctp header */
    606	sh = skb_push(head, sizeof(struct sctphdr));
    607	skb_reset_transport_header(head);
    608	sh->source = htons(packet->source_port);
    609	sh->dest = htons(packet->destination_port);
    610	sh->vtag = htonl(packet->vtag);
    611	sh->checksum = 0;
    612
    613	/* drop packet if no dst */
    614	if (!tp->dst) {
    615		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
    616		kfree_skb(head);
    617		goto out;
    618	}
    619
    620	/* pack up chunks */
    621	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
    622	if (!pkt_count) {
    623		kfree_skb(head);
    624		goto out;
    625	}
    626	pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
    627
    628	/* start autoclose timer */
    629	if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
    630	    asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
    631		struct timer_list *timer =
    632			&asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
    633		unsigned long timeout =
    634			asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
    635
    636		if (!mod_timer(timer, jiffies + timeout))
    637			sctp_association_hold(asoc);
    638	}
    639
    640	/* sctp xmit */
    641	tp->af_specific->ecn_capable(sk);
    642	if (asoc) {
    643		asoc->stats.opackets += pkt_count;
    644		if (asoc->peer.last_sent_to != tp)
    645			asoc->peer.last_sent_to = tp;
    646	}
    647	head->ignore_df = packet->ipfragok;
    648	if (tp->dst_pending_confirm)
    649		skb_set_dst_pending_confirm(head, 1);
    650	/* neighbour should be confirmed on successful transmission or
    651	 * positive error
    652	 */
    653	if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
    654	    tp->dst_pending_confirm)
    655		tp->dst_pending_confirm = 0;
    656
    657out:
    658	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
    659		list_del_init(&chunk->list);
    660		if (!sctp_chunk_is_data(chunk))
    661			sctp_chunk_free(chunk);
    662	}
    663	sctp_packet_reset(packet);
    664	return 0;
    665}
    666
    667/********************************************************************
    668 * 2nd Level Abstractions
    669 ********************************************************************/
    670
    671/* This private function check to see if a chunk can be added */
    672static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
    673						  struct sctp_chunk *chunk)
    674{
    675	size_t datasize, rwnd, inflight, flight_size;
    676	struct sctp_transport *transport = packet->transport;
    677	struct sctp_association *asoc = transport->asoc;
    678	struct sctp_outq *q = &asoc->outqueue;
    679
    680	/* RFC 2960 6.1  Transmission of DATA Chunks
    681	 *
    682	 * A) At any given time, the data sender MUST NOT transmit new data to
    683	 * any destination transport address if its peer's rwnd indicates
    684	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
    685	 * 6.2.1).  However, regardless of the value of rwnd (including if it
    686	 * is 0), the data sender can always have one DATA chunk in flight to
    687	 * the receiver if allowed by cwnd (see rule B below).  This rule
    688	 * allows the sender to probe for a change in rwnd that the sender
    689	 * missed due to the SACK having been lost in transit from the data
    690	 * receiver to the data sender.
    691	 */
    692
    693	rwnd = asoc->peer.rwnd;
    694	inflight = q->outstanding_bytes;
    695	flight_size = transport->flight_size;
    696
    697	datasize = sctp_data_size(chunk);
    698
    699	if (datasize > rwnd && inflight > 0)
    700		/* We have (at least) one data chunk in flight,
    701		 * so we can't fall back to rule 6.1 B).
    702		 */
    703		return SCTP_XMIT_RWND_FULL;
    704
    705	/* RFC 2960 6.1  Transmission of DATA Chunks
    706	 *
    707	 * B) At any given time, the sender MUST NOT transmit new data
    708	 * to a given transport address if it has cwnd or more bytes
    709	 * of data outstanding to that transport address.
    710	 */
    711	/* RFC 7.2.4 & the Implementers Guide 2.8.
    712	 *
    713	 * 3) ...
    714	 *    When a Fast Retransmit is being performed the sender SHOULD
    715	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
    716	 */
    717	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
    718	    flight_size >= transport->cwnd)
    719		return SCTP_XMIT_RWND_FULL;
    720
    721	/* Nagle's algorithm to solve small-packet problem:
    722	 * Inhibit the sending of new chunks when new outgoing data arrives
    723	 * if any previously transmitted data on the connection remains
    724	 * unacknowledged.
    725	 */
    726
    727	if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
    728	    !asoc->force_delay)
    729		/* Nothing unacked */
    730		return SCTP_XMIT_OK;
    731
    732	if (!sctp_packet_empty(packet))
    733		/* Append to packet */
    734		return SCTP_XMIT_OK;
    735
    736	if (!sctp_state(asoc, ESTABLISHED))
    737		return SCTP_XMIT_OK;
    738
    739	/* Check whether this chunk and all the rest of pending data will fit
    740	 * or delay in hopes of bundling a full sized packet.
    741	 */
    742	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
    743	    packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
    744		/* Enough data queued to fill a packet */
    745		return SCTP_XMIT_OK;
    746
    747	/* Don't delay large message writes that may have been fragmented */
    748	if (!chunk->msg->can_delay)
    749		return SCTP_XMIT_OK;
    750
    751	/* Defer until all data acked or packet full */
    752	return SCTP_XMIT_DELAY;
    753}
    754
    755/* This private function does management things when adding DATA chunk */
    756static void sctp_packet_append_data(struct sctp_packet *packet,
    757				struct sctp_chunk *chunk)
    758{
    759	struct sctp_transport *transport = packet->transport;
    760	size_t datasize = sctp_data_size(chunk);
    761	struct sctp_association *asoc = transport->asoc;
    762	u32 rwnd = asoc->peer.rwnd;
    763
    764	/* Keep track of how many bytes are in flight over this transport. */
    765	transport->flight_size += datasize;
    766
    767	/* Keep track of how many bytes are in flight to the receiver. */
    768	asoc->outqueue.outstanding_bytes += datasize;
    769
    770	/* Update our view of the receiver's rwnd. */
    771	if (datasize < rwnd)
    772		rwnd -= datasize;
    773	else
    774		rwnd = 0;
    775
    776	asoc->peer.rwnd = rwnd;
    777	sctp_chunk_assign_tsn(chunk);
    778	asoc->stream.si->assign_number(chunk);
    779}
    780
    781static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
    782					   struct sctp_chunk *chunk,
    783					   u16 chunk_len)
    784{
    785	enum sctp_xmit retval = SCTP_XMIT_OK;
    786	size_t psize, pmtu, maxsize;
    787
    788	/* Don't bundle in this packet if this chunk's auth key doesn't
    789	 * match other chunks already enqueued on this packet. Also,
    790	 * don't bundle the chunk with auth key if other chunks in this
    791	 * packet don't have auth key.
    792	 */
    793	if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
    794	    (!packet->auth && chunk->shkey &&
    795	     chunk->chunk_hdr->type != SCTP_CID_AUTH))
    796		return SCTP_XMIT_PMTU_FULL;
    797
    798	psize = packet->size;
    799	if (packet->transport->asoc)
    800		pmtu = packet->transport->asoc->pathmtu;
    801	else
    802		pmtu = packet->transport->pathmtu;
    803
    804	/* Decide if we need to fragment or resubmit later. */
    805	if (psize + chunk_len > pmtu) {
    806		/* It's OK to fragment at IP level if any one of the following
    807		 * is true:
    808		 *	1. The packet is empty (meaning this chunk is greater
    809		 *	   the MTU)
    810		 *	2. The packet doesn't have any data in it yet and data
    811		 *	   requires authentication.
    812		 */
    813		if (sctp_packet_empty(packet) ||
    814		    (!packet->has_data && chunk->auth)) {
    815			/* We no longer do re-fragmentation.
    816			 * Just fragment at the IP layer, if we
    817			 * actually hit this condition
    818			 */
    819			packet->ipfragok = 1;
    820			goto out;
    821		}
    822
    823		/* Similarly, if this chunk was built before a PMTU
    824		 * reduction, we have to fragment it at IP level now. So
    825		 * if the packet already contains something, we need to
    826		 * flush.
    827		 */
    828		maxsize = pmtu - packet->overhead;
    829		if (packet->auth)
    830			maxsize -= SCTP_PAD4(packet->auth->skb->len);
    831		if (chunk_len > maxsize)
    832			retval = SCTP_XMIT_PMTU_FULL;
    833
    834		/* It is also okay to fragment if the chunk we are
    835		 * adding is a control chunk, but only if current packet
    836		 * is not a GSO one otherwise it causes fragmentation of
    837		 * a large frame. So in this case we allow the
    838		 * fragmentation by forcing it to be in a new packet.
    839		 */
    840		if (!sctp_chunk_is_data(chunk) && packet->has_data)
    841			retval = SCTP_XMIT_PMTU_FULL;
    842
    843		if (psize + chunk_len > packet->max_size)
    844			/* Hit GSO/PMTU limit, gotta flush */
    845			retval = SCTP_XMIT_PMTU_FULL;
    846
    847		if (!packet->transport->burst_limited &&
    848		    psize + chunk_len > (packet->transport->cwnd >> 1))
    849			/* Do not allow a single GSO packet to use more
    850			 * than half of cwnd.
    851			 */
    852			retval = SCTP_XMIT_PMTU_FULL;
    853
    854		if (packet->transport->burst_limited &&
    855		    psize + chunk_len > (packet->transport->burst_limited >> 1))
    856			/* Do not allow a single GSO packet to use more
    857			 * than half of original cwnd.
    858			 */
    859			retval = SCTP_XMIT_PMTU_FULL;
    860		/* Otherwise it will fit in the GSO packet */
    861	}
    862
    863out:
    864	return retval;
    865}