cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

outqueue.c (56928B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* SCTP kernel implementation
      3 * (C) Copyright IBM Corp. 2001, 2004
      4 * Copyright (c) 1999-2000 Cisco, Inc.
      5 * Copyright (c) 1999-2001 Motorola, Inc.
      6 * Copyright (c) 2001-2003 Intel Corp.
      7 *
      8 * This file is part of the SCTP kernel implementation
      9 *
     10 * These functions implement the sctp_outq class.   The outqueue handles
     11 * bundling and queueing of outgoing SCTP chunks.
     12 *
     13 * Please send any bug reports or fixes you make to the
     14 * email address(es):
     15 *    lksctp developers <linux-sctp@vger.kernel.org>
     16 *
     17 * Written or modified by:
     18 *    La Monte H.P. Yarroll <piggy@acm.org>
     19 *    Karl Knutson          <karl@athena.chicago.il.us>
     20 *    Perry Melange         <pmelange@null.cc.uic.edu>
     21 *    Xingang Guo           <xingang.guo@intel.com>
     22 *    Hui Huang 	    <hui.huang@nokia.com>
     23 *    Sridhar Samudrala     <sri@us.ibm.com>
     24 *    Jon Grimm             <jgrimm@us.ibm.com>
     25 */
     26
     27#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     28
     29#include <linux/types.h>
     30#include <linux/list.h>   /* For struct list_head */
     31#include <linux/socket.h>
     32#include <linux/ip.h>
     33#include <linux/slab.h>
     34#include <net/sock.h>	  /* For skb_set_owner_w */
     35
     36#include <net/sctp/sctp.h>
     37#include <net/sctp/sm.h>
     38#include <net/sctp/stream_sched.h>
     39#include <trace/events/sctp.h>
     40
     41/* Declare internal functions here.  */
     42static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
     43static void sctp_check_transmitted(struct sctp_outq *q,
     44				   struct list_head *transmitted_queue,
     45				   struct sctp_transport *transport,
     46				   union sctp_addr *saddr,
     47				   struct sctp_sackhdr *sack,
     48				   __u32 *highest_new_tsn);
     49
     50static void sctp_mark_missing(struct sctp_outq *q,
     51			      struct list_head *transmitted_queue,
     52			      struct sctp_transport *transport,
     53			      __u32 highest_new_tsn,
     54			      int count_of_newacks);
     55
     56static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
     57
     58/* Add data to the front of the queue. */
     59static inline void sctp_outq_head_data(struct sctp_outq *q,
     60				       struct sctp_chunk *ch)
     61{
     62	struct sctp_stream_out_ext *oute;
     63	__u16 stream;
     64
     65	list_add(&ch->list, &q->out_chunk_list);
     66	q->out_qlen += ch->skb->len;
     67
     68	stream = sctp_chunk_stream_no(ch);
     69	oute = SCTP_SO(&q->asoc->stream, stream)->ext;
     70	list_add(&ch->stream_list, &oute->outq);
     71}
     72
     73/* Take data from the front of the queue. */
     74static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
     75{
     76	return q->sched->dequeue(q);
     77}
     78
     79/* Add data chunk to the end of the queue. */
     80static inline void sctp_outq_tail_data(struct sctp_outq *q,
     81				       struct sctp_chunk *ch)
     82{
     83	struct sctp_stream_out_ext *oute;
     84	__u16 stream;
     85
     86	list_add_tail(&ch->list, &q->out_chunk_list);
     87	q->out_qlen += ch->skb->len;
     88
     89	stream = sctp_chunk_stream_no(ch);
     90	oute = SCTP_SO(&q->asoc->stream, stream)->ext;
     91	list_add_tail(&ch->stream_list, &oute->outq);
     92}
     93
     94/*
     95 * SFR-CACC algorithm:
     96 * D) If count_of_newacks is greater than or equal to 2
     97 * and t was not sent to the current primary then the
     98 * sender MUST NOT increment missing report count for t.
     99 */
    100static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
    101				       struct sctp_transport *transport,
    102				       int count_of_newacks)
    103{
    104	if (count_of_newacks >= 2 && transport != primary)
    105		return 1;
    106	return 0;
    107}
    108
    109/*
    110 * SFR-CACC algorithm:
    111 * F) If count_of_newacks is less than 2, let d be the
    112 * destination to which t was sent. If cacc_saw_newack
    113 * is 0 for destination d, then the sender MUST NOT
    114 * increment missing report count for t.
    115 */
    116static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
    117				       int count_of_newacks)
    118{
    119	if (count_of_newacks < 2 &&
    120			(transport && !transport->cacc.cacc_saw_newack))
    121		return 1;
    122	return 0;
    123}
    124
    125/*
    126 * SFR-CACC algorithm:
    127 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
    128 * execute steps C, D, F.
    129 *
    130 * C has been implemented in sctp_outq_sack
    131 */
    132static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
    133				     struct sctp_transport *transport,
    134				     int count_of_newacks)
    135{
    136	if (!primary->cacc.cycling_changeover) {
    137		if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
    138			return 1;
    139		if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
    140			return 1;
    141		return 0;
    142	}
    143	return 0;
    144}
    145
    146/*
    147 * SFR-CACC algorithm:
    148 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
    149 * than next_tsn_at_change of the current primary, then
    150 * the sender MUST NOT increment missing report count
    151 * for t.
    152 */
    153static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
    154{
    155	if (primary->cacc.cycling_changeover &&
    156	    TSN_lt(tsn, primary->cacc.next_tsn_at_change))
    157		return 1;
    158	return 0;
    159}
    160
    161/*
    162 * SFR-CACC algorithm:
    163 * 3) If the missing report count for TSN t is to be
    164 * incremented according to [RFC2960] and
    165 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
    166 * then the sender MUST further execute steps 3.1 and
    167 * 3.2 to determine if the missing report count for
    168 * TSN t SHOULD NOT be incremented.
    169 *
    170 * 3.3) If 3.1 and 3.2 do not dictate that the missing
    171 * report count for t should not be incremented, then
    172 * the sender SHOULD increment missing report count for
    173 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
    174 */
    175static inline int sctp_cacc_skip(struct sctp_transport *primary,
    176				 struct sctp_transport *transport,
    177				 int count_of_newacks,
    178				 __u32 tsn)
    179{
    180	if (primary->cacc.changeover_active &&
    181	    (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
    182	     sctp_cacc_skip_3_2(primary, tsn)))
    183		return 1;
    184	return 0;
    185}
    186
    187/* Initialize an existing sctp_outq.  This does the boring stuff.
    188 * You still need to define handlers if you really want to DO
    189 * something with this structure...
    190 */
    191void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
    192{
    193	memset(q, 0, sizeof(struct sctp_outq));
    194
    195	q->asoc = asoc;
    196	INIT_LIST_HEAD(&q->out_chunk_list);
    197	INIT_LIST_HEAD(&q->control_chunk_list);
    198	INIT_LIST_HEAD(&q->retransmit);
    199	INIT_LIST_HEAD(&q->sacked);
    200	INIT_LIST_HEAD(&q->abandoned);
    201	sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
    202}
    203
    204/* Free the outqueue structure and any related pending chunks.
    205 */
    206static void __sctp_outq_teardown(struct sctp_outq *q)
    207{
    208	struct sctp_transport *transport;
    209	struct list_head *lchunk, *temp;
    210	struct sctp_chunk *chunk, *tmp;
    211
    212	/* Throw away unacknowledged chunks. */
    213	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
    214			transports) {
    215		while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
    216			chunk = list_entry(lchunk, struct sctp_chunk,
    217					   transmitted_list);
    218			/* Mark as part of a failed message. */
    219			sctp_chunk_fail(chunk, q->error);
    220			sctp_chunk_free(chunk);
    221		}
    222	}
    223
    224	/* Throw away chunks that have been gap ACKed.  */
    225	list_for_each_safe(lchunk, temp, &q->sacked) {
    226		list_del_init(lchunk);
    227		chunk = list_entry(lchunk, struct sctp_chunk,
    228				   transmitted_list);
    229		sctp_chunk_fail(chunk, q->error);
    230		sctp_chunk_free(chunk);
    231	}
    232
    233	/* Throw away any chunks in the retransmit queue. */
    234	list_for_each_safe(lchunk, temp, &q->retransmit) {
    235		list_del_init(lchunk);
    236		chunk = list_entry(lchunk, struct sctp_chunk,
    237				   transmitted_list);
    238		sctp_chunk_fail(chunk, q->error);
    239		sctp_chunk_free(chunk);
    240	}
    241
    242	/* Throw away any chunks that are in the abandoned queue. */
    243	list_for_each_safe(lchunk, temp, &q->abandoned) {
    244		list_del_init(lchunk);
    245		chunk = list_entry(lchunk, struct sctp_chunk,
    246				   transmitted_list);
    247		sctp_chunk_fail(chunk, q->error);
    248		sctp_chunk_free(chunk);
    249	}
    250
    251	/* Throw away any leftover data chunks. */
    252	while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
    253		sctp_sched_dequeue_done(q, chunk);
    254
    255		/* Mark as send failure. */
    256		sctp_chunk_fail(chunk, q->error);
    257		sctp_chunk_free(chunk);
    258	}
    259
    260	/* Throw away any leftover control chunks. */
    261	list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
    262		list_del_init(&chunk->list);
    263		sctp_chunk_free(chunk);
    264	}
    265}
    266
    267void sctp_outq_teardown(struct sctp_outq *q)
    268{
    269	__sctp_outq_teardown(q);
    270	sctp_outq_init(q->asoc, q);
    271}
    272
    273/* Free the outqueue structure and any related pending chunks.  */
    274void sctp_outq_free(struct sctp_outq *q)
    275{
    276	/* Throw away leftover chunks. */
    277	__sctp_outq_teardown(q);
    278}
    279
    280/* Put a new chunk in an sctp_outq.  */
    281void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
    282{
    283	struct net *net = q->asoc->base.net;
    284
    285	pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
    286		 chunk && chunk->chunk_hdr ?
    287		 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
    288		 "illegal chunk");
    289
    290	/* If it is data, queue it up, otherwise, send it
    291	 * immediately.
    292	 */
    293	if (sctp_chunk_is_data(chunk)) {
    294		pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
    295			 __func__, q, chunk, chunk && chunk->chunk_hdr ?
    296			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
    297			 "illegal chunk");
    298
    299		sctp_outq_tail_data(q, chunk);
    300		if (chunk->asoc->peer.prsctp_capable &&
    301		    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
    302			chunk->asoc->sent_cnt_removable++;
    303		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
    304			SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
    305		else
    306			SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
    307	} else {
    308		list_add_tail(&chunk->list, &q->control_chunk_list);
    309		SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
    310	}
    311
    312	if (!q->cork)
    313		sctp_outq_flush(q, 0, gfp);
    314}
    315
    316/* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
    317 * and the abandoned list are in ascending order.
    318 */
    319static void sctp_insert_list(struct list_head *head, struct list_head *new)
    320{
    321	struct list_head *pos;
    322	struct sctp_chunk *nchunk, *lchunk;
    323	__u32 ntsn, ltsn;
    324	int done = 0;
    325
    326	nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
    327	ntsn = ntohl(nchunk->subh.data_hdr->tsn);
    328
    329	list_for_each(pos, head) {
    330		lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
    331		ltsn = ntohl(lchunk->subh.data_hdr->tsn);
    332		if (TSN_lt(ntsn, ltsn)) {
    333			list_add(new, pos->prev);
    334			done = 1;
    335			break;
    336		}
    337	}
    338	if (!done)
    339		list_add_tail(new, head);
    340}
    341
    342static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
    343				  struct sctp_sndrcvinfo *sinfo,
    344				  struct list_head *queue, int msg_len)
    345{
    346	struct sctp_chunk *chk, *temp;
    347
    348	list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
    349		struct sctp_stream_out *streamout;
    350
    351		if (!chk->msg->abandoned &&
    352		    (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
    353		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
    354			continue;
    355
    356		chk->msg->abandoned = 1;
    357		list_del_init(&chk->transmitted_list);
    358		sctp_insert_list(&asoc->outqueue.abandoned,
    359				 &chk->transmitted_list);
    360
    361		streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
    362		asoc->sent_cnt_removable--;
    363		asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
    364		streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
    365
    366		if (queue != &asoc->outqueue.retransmit &&
    367		    !chk->tsn_gap_acked) {
    368			if (chk->transport)
    369				chk->transport->flight_size -=
    370						sctp_data_size(chk);
    371			asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
    372		}
    373
    374		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
    375		if (msg_len <= 0)
    376			break;
    377	}
    378
    379	return msg_len;
    380}
    381
    382static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
    383				    struct sctp_sndrcvinfo *sinfo, int msg_len)
    384{
    385	struct sctp_outq *q = &asoc->outqueue;
    386	struct sctp_chunk *chk, *temp;
    387
    388	q->sched->unsched_all(&asoc->stream);
    389
    390	list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
    391		if (!chk->msg->abandoned &&
    392		    (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
    393		     !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
    394		     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
    395			continue;
    396
    397		chk->msg->abandoned = 1;
    398		sctp_sched_dequeue_common(q, chk);
    399		asoc->sent_cnt_removable--;
    400		asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
    401		if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
    402			struct sctp_stream_out *streamout =
    403				SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
    404
    405			streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
    406		}
    407
    408		msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
    409		sctp_chunk_free(chk);
    410		if (msg_len <= 0)
    411			break;
    412	}
    413
    414	q->sched->sched_all(&asoc->stream);
    415
    416	return msg_len;
    417}
    418
    419/* Abandon the chunks according their priorities */
    420void sctp_prsctp_prune(struct sctp_association *asoc,
    421		       struct sctp_sndrcvinfo *sinfo, int msg_len)
    422{
    423	struct sctp_transport *transport;
    424
    425	if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
    426		return;
    427
    428	msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
    429					 &asoc->outqueue.retransmit,
    430					 msg_len);
    431	if (msg_len <= 0)
    432		return;
    433
    434	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
    435			    transports) {
    436		msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
    437						 &transport->transmitted,
    438						 msg_len);
    439		if (msg_len <= 0)
    440			return;
    441	}
    442
    443	sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
    444}
    445
    446/* Mark all the eligible packets on a transport for retransmission.  */
    447void sctp_retransmit_mark(struct sctp_outq *q,
    448			  struct sctp_transport *transport,
    449			  __u8 reason)
    450{
    451	struct list_head *lchunk, *ltemp;
    452	struct sctp_chunk *chunk;
    453
    454	/* Walk through the specified transmitted queue.  */
    455	list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
    456		chunk = list_entry(lchunk, struct sctp_chunk,
    457				   transmitted_list);
    458
    459		/* If the chunk is abandoned, move it to abandoned list. */
    460		if (sctp_chunk_abandoned(chunk)) {
    461			list_del_init(lchunk);
    462			sctp_insert_list(&q->abandoned, lchunk);
    463
    464			/* If this chunk has not been previousely acked,
    465			 * stop considering it 'outstanding'.  Our peer
    466			 * will most likely never see it since it will
    467			 * not be retransmitted
    468			 */
    469			if (!chunk->tsn_gap_acked) {
    470				if (chunk->transport)
    471					chunk->transport->flight_size -=
    472							sctp_data_size(chunk);
    473				q->outstanding_bytes -= sctp_data_size(chunk);
    474				q->asoc->peer.rwnd += sctp_data_size(chunk);
    475			}
    476			continue;
    477		}
    478
    479		/* If we are doing  retransmission due to a timeout or pmtu
    480		 * discovery, only the  chunks that are not yet acked should
    481		 * be added to the retransmit queue.
    482		 */
    483		if ((reason == SCTP_RTXR_FAST_RTX  &&
    484			    (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
    485		    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
    486			/* RFC 2960 6.2.1 Processing a Received SACK
    487			 *
    488			 * C) Any time a DATA chunk is marked for
    489			 * retransmission (via either T3-rtx timer expiration
    490			 * (Section 6.3.3) or via fast retransmit
    491			 * (Section 7.2.4)), add the data size of those
    492			 * chunks to the rwnd.
    493			 */
    494			q->asoc->peer.rwnd += sctp_data_size(chunk);
    495			q->outstanding_bytes -= sctp_data_size(chunk);
    496			if (chunk->transport)
    497				transport->flight_size -= sctp_data_size(chunk);
    498
    499			/* sctpimpguide-05 Section 2.8.2
    500			 * M5) If a T3-rtx timer expires, the
    501			 * 'TSN.Missing.Report' of all affected TSNs is set
    502			 * to 0.
    503			 */
    504			chunk->tsn_missing_report = 0;
    505
    506			/* If a chunk that is being used for RTT measurement
    507			 * has to be retransmitted, we cannot use this chunk
    508			 * anymore for RTT measurements. Reset rto_pending so
    509			 * that a new RTT measurement is started when a new
    510			 * data chunk is sent.
    511			 */
    512			if (chunk->rtt_in_progress) {
    513				chunk->rtt_in_progress = 0;
    514				transport->rto_pending = 0;
    515			}
    516
    517			/* Move the chunk to the retransmit queue. The chunks
    518			 * on the retransmit queue are always kept in order.
    519			 */
    520			list_del_init(lchunk);
    521			sctp_insert_list(&q->retransmit, lchunk);
    522		}
    523	}
    524
    525	pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
    526		 "flight_size:%d, pba:%d\n", __func__, transport, reason,
    527		 transport->cwnd, transport->ssthresh, transport->flight_size,
    528		 transport->partial_bytes_acked);
    529}
    530
    531/* Mark all the eligible packets on a transport for retransmission and force
    532 * one packet out.
    533 */
    534void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
    535		     enum sctp_retransmit_reason reason)
    536{
    537	struct net *net = q->asoc->base.net;
    538
    539	switch (reason) {
    540	case SCTP_RTXR_T3_RTX:
    541		SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
    542		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
    543		/* Update the retran path if the T3-rtx timer has expired for
    544		 * the current retran path.
    545		 */
    546		if (transport == transport->asoc->peer.retran_path)
    547			sctp_assoc_update_retran_path(transport->asoc);
    548		transport->asoc->rtx_data_chunks +=
    549			transport->asoc->unack_data;
    550		if (transport->pl.state == SCTP_PL_COMPLETE &&
    551		    transport->asoc->unack_data)
    552			sctp_transport_reset_probe_timer(transport);
    553		break;
    554	case SCTP_RTXR_FAST_RTX:
    555		SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
    556		sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
    557		q->fast_rtx = 1;
    558		break;
    559	case SCTP_RTXR_PMTUD:
    560		SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
    561		break;
    562	case SCTP_RTXR_T1_RTX:
    563		SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
    564		transport->asoc->init_retries++;
    565		break;
    566	default:
    567		BUG();
    568	}
    569
    570	sctp_retransmit_mark(q, transport, reason);
    571
    572	/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
    573	 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
    574	 * following the procedures outlined in C1 - C5.
    575	 */
    576	if (reason == SCTP_RTXR_T3_RTX)
    577		q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
    578
    579	/* Flush the queues only on timeout, since fast_rtx is only
    580	 * triggered during sack processing and the queue
    581	 * will be flushed at the end.
    582	 */
    583	if (reason != SCTP_RTXR_FAST_RTX)
    584		sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
    585}
    586
    587/*
    588 * Transmit DATA chunks on the retransmit queue.  Upon return from
    589 * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
    590 * need to be transmitted by the caller.
    591 * We assume that pkt->transport has already been set.
    592 *
    593 * The return value is a normal kernel error return value.
    594 */
    595static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
    596				 int rtx_timeout, int *start_timer, gfp_t gfp)
    597{
    598	struct sctp_transport *transport = pkt->transport;
    599	struct sctp_chunk *chunk, *chunk1;
    600	struct list_head *lqueue;
    601	enum sctp_xmit status;
    602	int error = 0;
    603	int timer = 0;
    604	int done = 0;
    605	int fast_rtx;
    606
    607	lqueue = &q->retransmit;
    608	fast_rtx = q->fast_rtx;
    609
    610	/* This loop handles time-out retransmissions, fast retransmissions,
    611	 * and retransmissions due to opening of whindow.
    612	 *
    613	 * RFC 2960 6.3.3 Handle T3-rtx Expiration
    614	 *
    615	 * E3) Determine how many of the earliest (i.e., lowest TSN)
    616	 * outstanding DATA chunks for the address for which the
    617	 * T3-rtx has expired will fit into a single packet, subject
    618	 * to the MTU constraint for the path corresponding to the
    619	 * destination transport address to which the retransmission
    620	 * is being sent (this may be different from the address for
    621	 * which the timer expires [see Section 6.4]). Call this value
    622	 * K. Bundle and retransmit those K DATA chunks in a single
    623	 * packet to the destination endpoint.
    624	 *
    625	 * [Just to be painfully clear, if we are retransmitting
    626	 * because a timeout just happened, we should send only ONE
    627	 * packet of retransmitted data.]
    628	 *
    629	 * For fast retransmissions we also send only ONE packet.  However,
    630	 * if we are just flushing the queue due to open window, we'll
    631	 * try to send as much as possible.
    632	 */
    633	list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
    634		/* If the chunk is abandoned, move it to abandoned list. */
    635		if (sctp_chunk_abandoned(chunk)) {
    636			list_del_init(&chunk->transmitted_list);
    637			sctp_insert_list(&q->abandoned,
    638					 &chunk->transmitted_list);
    639			continue;
    640		}
    641
    642		/* Make sure that Gap Acked TSNs are not retransmitted.  A
    643		 * simple approach is just to move such TSNs out of the
    644		 * way and into a 'transmitted' queue and skip to the
    645		 * next chunk.
    646		 */
    647		if (chunk->tsn_gap_acked) {
    648			list_move_tail(&chunk->transmitted_list,
    649				       &transport->transmitted);
    650			continue;
    651		}
    652
    653		/* If we are doing fast retransmit, ignore non-fast_rtransmit
    654		 * chunks
    655		 */
    656		if (fast_rtx && !chunk->fast_retransmit)
    657			continue;
    658
    659redo:
    660		/* Attempt to append this chunk to the packet. */
    661		status = sctp_packet_append_chunk(pkt, chunk);
    662
    663		switch (status) {
    664		case SCTP_XMIT_PMTU_FULL:
    665			if (!pkt->has_data && !pkt->has_cookie_echo) {
    666				/* If this packet did not contain DATA then
    667				 * retransmission did not happen, so do it
    668				 * again.  We'll ignore the error here since
    669				 * control chunks are already freed so there
    670				 * is nothing we can do.
    671				 */
    672				sctp_packet_transmit(pkt, gfp);
    673				goto redo;
    674			}
    675
    676			/* Send this packet.  */
    677			error = sctp_packet_transmit(pkt, gfp);
    678
    679			/* If we are retransmitting, we should only
    680			 * send a single packet.
    681			 * Otherwise, try appending this chunk again.
    682			 */
    683			if (rtx_timeout || fast_rtx)
    684				done = 1;
    685			else
    686				goto redo;
    687
    688			/* Bundle next chunk in the next round.  */
    689			break;
    690
    691		case SCTP_XMIT_RWND_FULL:
    692			/* Send this packet. */
    693			error = sctp_packet_transmit(pkt, gfp);
    694
    695			/* Stop sending DATA as there is no more room
    696			 * at the receiver.
    697			 */
    698			done = 1;
    699			break;
    700
    701		case SCTP_XMIT_DELAY:
    702			/* Send this packet. */
    703			error = sctp_packet_transmit(pkt, gfp);
    704
    705			/* Stop sending DATA because of nagle delay. */
    706			done = 1;
    707			break;
    708
    709		default:
    710			/* The append was successful, so add this chunk to
    711			 * the transmitted list.
    712			 */
    713			list_move_tail(&chunk->transmitted_list,
    714				       &transport->transmitted);
    715
    716			/* Mark the chunk as ineligible for fast retransmit
    717			 * after it is retransmitted.
    718			 */
    719			if (chunk->fast_retransmit == SCTP_NEED_FRTX)
    720				chunk->fast_retransmit = SCTP_DONT_FRTX;
    721
    722			q->asoc->stats.rtxchunks++;
    723			break;
    724		}
    725
    726		/* Set the timer if there were no errors */
    727		if (!error && !timer)
    728			timer = 1;
    729
    730		if (done)
    731			break;
    732	}
    733
    734	/* If we are here due to a retransmit timeout or a fast
    735	 * retransmit and if there are any chunks left in the retransmit
    736	 * queue that could not fit in the PMTU sized packet, they need
    737	 * to be marked as ineligible for a subsequent fast retransmit.
    738	 */
    739	if (rtx_timeout || fast_rtx) {
    740		list_for_each_entry(chunk1, lqueue, transmitted_list) {
    741			if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
    742				chunk1->fast_retransmit = SCTP_DONT_FRTX;
    743		}
    744	}
    745
    746	*start_timer = timer;
    747
    748	/* Clear fast retransmit hint */
    749	if (fast_rtx)
    750		q->fast_rtx = 0;
    751
    752	return error;
    753}
    754
    755/* Cork the outqueue so queued chunks are really queued. */
    756void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
    757{
    758	if (q->cork)
    759		q->cork = 0;
    760
    761	sctp_outq_flush(q, 0, gfp);
    762}
    763
    764static int sctp_packet_singleton(struct sctp_transport *transport,
    765				 struct sctp_chunk *chunk, gfp_t gfp)
    766{
    767	const struct sctp_association *asoc = transport->asoc;
    768	const __u16 sport = asoc->base.bind_addr.port;
    769	const __u16 dport = asoc->peer.port;
    770	const __u32 vtag = asoc->peer.i.init_tag;
    771	struct sctp_packet singleton;
    772
    773	sctp_packet_init(&singleton, transport, sport, dport);
    774	sctp_packet_config(&singleton, vtag, 0);
    775	if (sctp_packet_append_chunk(&singleton, chunk) != SCTP_XMIT_OK) {
    776		list_del_init(&chunk->list);
    777		sctp_chunk_free(chunk);
    778		return -ENOMEM;
    779	}
    780	return sctp_packet_transmit(&singleton, gfp);
    781}
    782
    783/* Struct to hold the context during sctp outq flush */
    784struct sctp_flush_ctx {
    785	struct sctp_outq *q;
    786	/* Current transport being used. It's NOT the same as curr active one */
    787	struct sctp_transport *transport;
    788	/* These transports have chunks to send. */
    789	struct list_head transport_list;
    790	struct sctp_association *asoc;
    791	/* Packet on the current transport above */
    792	struct sctp_packet *packet;
    793	gfp_t gfp;
    794};
    795
    796/* transport: current transport */
    797static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
    798				       struct sctp_chunk *chunk)
    799{
    800	struct sctp_transport *new_transport = chunk->transport;
    801
    802	if (!new_transport) {
    803		if (!sctp_chunk_is_data(chunk)) {
    804			/* If we have a prior transport pointer, see if
    805			 * the destination address of the chunk
    806			 * matches the destination address of the
    807			 * current transport.  If not a match, then
    808			 * try to look up the transport with a given
    809			 * destination address.  We do this because
    810			 * after processing ASCONFs, we may have new
    811			 * transports created.
    812			 */
    813			if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
    814							&ctx->transport->ipaddr))
    815				new_transport = ctx->transport;
    816			else
    817				new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
    818								  &chunk->dest);
    819		}
    820
    821		/* if we still don't have a new transport, then
    822		 * use the current active path.
    823		 */
    824		if (!new_transport)
    825			new_transport = ctx->asoc->peer.active_path;
    826	} else {
    827		__u8 type;
    828
    829		switch (new_transport->state) {
    830		case SCTP_INACTIVE:
    831		case SCTP_UNCONFIRMED:
    832		case SCTP_PF:
    833			/* If the chunk is Heartbeat or Heartbeat Ack,
    834			 * send it to chunk->transport, even if it's
    835			 * inactive.
    836			 *
    837			 * 3.3.6 Heartbeat Acknowledgement:
    838			 * ...
    839			 * A HEARTBEAT ACK is always sent to the source IP
    840			 * address of the IP datagram containing the
    841			 * HEARTBEAT chunk to which this ack is responding.
    842			 * ...
    843			 *
    844			 * ASCONF_ACKs also must be sent to the source.
    845			 */
    846			type = chunk->chunk_hdr->type;
    847			if (type != SCTP_CID_HEARTBEAT &&
    848			    type != SCTP_CID_HEARTBEAT_ACK &&
    849			    type != SCTP_CID_ASCONF_ACK)
    850				new_transport = ctx->asoc->peer.active_path;
    851			break;
    852		default:
    853			break;
    854		}
    855	}
    856
    857	/* Are we switching transports? Take care of transport locks. */
    858	if (new_transport != ctx->transport) {
    859		ctx->transport = new_transport;
    860		ctx->packet = &ctx->transport->packet;
    861
    862		if (list_empty(&ctx->transport->send_ready))
    863			list_add_tail(&ctx->transport->send_ready,
    864				      &ctx->transport_list);
    865
    866		sctp_packet_config(ctx->packet,
    867				   ctx->asoc->peer.i.init_tag,
    868				   ctx->asoc->peer.ecn_capable);
    869		/* We've switched transports, so apply the
    870		 * Burst limit to the new transport.
    871		 */
    872		sctp_transport_burst_limited(ctx->transport);
    873	}
    874}
    875
    876static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
    877{
    878	struct sctp_chunk *chunk, *tmp;
    879	enum sctp_xmit status;
    880	int one_packet, error;
    881
    882	list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
    883		one_packet = 0;
    884
    885		/* RFC 5061, 5.3
    886		 * F1) This means that until such time as the ASCONF
    887		 * containing the add is acknowledged, the sender MUST
    888		 * NOT use the new IP address as a source for ANY SCTP
    889		 * packet except on carrying an ASCONF Chunk.
    890		 */
    891		if (ctx->asoc->src_out_of_asoc_ok &&
    892		    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
    893			continue;
    894
    895		list_del_init(&chunk->list);
    896
    897		/* Pick the right transport to use. Should always be true for
    898		 * the first chunk as we don't have a transport by then.
    899		 */
    900		sctp_outq_select_transport(ctx, chunk);
    901
    902		switch (chunk->chunk_hdr->type) {
    903		/* 6.10 Bundling
    904		 *   ...
    905		 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
    906		 *   COMPLETE with any other chunks.  [Send them immediately.]
    907		 */
    908		case SCTP_CID_INIT:
    909		case SCTP_CID_INIT_ACK:
    910		case SCTP_CID_SHUTDOWN_COMPLETE:
    911			error = sctp_packet_singleton(ctx->transport, chunk,
    912						      ctx->gfp);
    913			if (error < 0) {
    914				ctx->asoc->base.sk->sk_err = -error;
    915				return;
    916			}
    917			ctx->asoc->stats.octrlchunks++;
    918			break;
    919
    920		case SCTP_CID_ABORT:
    921			if (sctp_test_T_bit(chunk))
    922				ctx->packet->vtag = ctx->asoc->c.my_vtag;
    923			fallthrough;
    924
    925		/* The following chunks are "response" chunks, i.e.
    926		 * they are generated in response to something we
    927		 * received.  If we are sending these, then we can
    928		 * send only 1 packet containing these chunks.
    929		 */
    930		case SCTP_CID_HEARTBEAT_ACK:
    931		case SCTP_CID_SHUTDOWN_ACK:
    932		case SCTP_CID_COOKIE_ACK:
    933		case SCTP_CID_COOKIE_ECHO:
    934		case SCTP_CID_ERROR:
    935		case SCTP_CID_ECN_CWR:
    936		case SCTP_CID_ASCONF_ACK:
    937			one_packet = 1;
    938			fallthrough;
    939
    940		case SCTP_CID_HEARTBEAT:
    941			if (chunk->pmtu_probe) {
    942				error = sctp_packet_singleton(ctx->transport,
    943							      chunk, ctx->gfp);
    944				if (!error)
    945					ctx->asoc->stats.octrlchunks++;
    946				break;
    947			}
    948			fallthrough;
    949		case SCTP_CID_SACK:
    950		case SCTP_CID_SHUTDOWN:
    951		case SCTP_CID_ECN_ECNE:
    952		case SCTP_CID_ASCONF:
    953		case SCTP_CID_FWD_TSN:
    954		case SCTP_CID_I_FWD_TSN:
    955		case SCTP_CID_RECONF:
    956			status = sctp_packet_transmit_chunk(ctx->packet, chunk,
    957							    one_packet, ctx->gfp);
    958			if (status != SCTP_XMIT_OK) {
    959				/* put the chunk back */
    960				list_add(&chunk->list, &ctx->q->control_chunk_list);
    961				break;
    962			}
    963
    964			ctx->asoc->stats.octrlchunks++;
    965			/* PR-SCTP C5) If a FORWARD TSN is sent, the
    966			 * sender MUST assure that at least one T3-rtx
    967			 * timer is running.
    968			 */
    969			if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
    970			    chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
    971				sctp_transport_reset_t3_rtx(ctx->transport);
    972				ctx->transport->last_time_sent = jiffies;
    973			}
    974
    975			if (chunk == ctx->asoc->strreset_chunk)
    976				sctp_transport_reset_reconf_timer(ctx->transport);
    977
    978			break;
    979
    980		default:
    981			/* We built a chunk with an illegal type! */
    982			BUG();
    983		}
    984	}
    985}
    986
    987/* Returns false if new data shouldn't be sent */
    988static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
    989				int rtx_timeout)
    990{
    991	int error, start_timer = 0;
    992
    993	if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
    994		return false;
    995
    996	if (ctx->transport != ctx->asoc->peer.retran_path) {
    997		/* Switch transports & prepare the packet.  */
    998		ctx->transport = ctx->asoc->peer.retran_path;
    999		ctx->packet = &ctx->transport->packet;
   1000
   1001		if (list_empty(&ctx->transport->send_ready))
   1002			list_add_tail(&ctx->transport->send_ready,
   1003				      &ctx->transport_list);
   1004
   1005		sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
   1006				   ctx->asoc->peer.ecn_capable);
   1007	}
   1008
   1009	error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
   1010				      &start_timer, ctx->gfp);
   1011	if (error < 0)
   1012		ctx->asoc->base.sk->sk_err = -error;
   1013
   1014	if (start_timer) {
   1015		sctp_transport_reset_t3_rtx(ctx->transport);
   1016		ctx->transport->last_time_sent = jiffies;
   1017	}
   1018
   1019	/* This can happen on COOKIE-ECHO resend.  Only
   1020	 * one chunk can get bundled with a COOKIE-ECHO.
   1021	 */
   1022	if (ctx->packet->has_cookie_echo)
   1023		return false;
   1024
   1025	/* Don't send new data if there is still data
   1026	 * waiting to retransmit.
   1027	 */
   1028	if (!list_empty(&ctx->q->retransmit))
   1029		return false;
   1030
   1031	return true;
   1032}
   1033
   1034static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
   1035				 int rtx_timeout)
   1036{
   1037	struct sctp_chunk *chunk;
   1038	enum sctp_xmit status;
   1039
   1040	/* Is it OK to send data chunks?  */
   1041	switch (ctx->asoc->state) {
   1042	case SCTP_STATE_COOKIE_ECHOED:
   1043		/* Only allow bundling when this packet has a COOKIE-ECHO
   1044		 * chunk.
   1045		 */
   1046		if (!ctx->packet || !ctx->packet->has_cookie_echo)
   1047			return;
   1048
   1049		fallthrough;
   1050	case SCTP_STATE_ESTABLISHED:
   1051	case SCTP_STATE_SHUTDOWN_PENDING:
   1052	case SCTP_STATE_SHUTDOWN_RECEIVED:
   1053		break;
   1054
   1055	default:
   1056		/* Do nothing. */
   1057		return;
   1058	}
   1059
   1060	/* RFC 2960 6.1  Transmission of DATA Chunks
   1061	 *
   1062	 * C) When the time comes for the sender to transmit,
   1063	 * before sending new DATA chunks, the sender MUST
   1064	 * first transmit any outstanding DATA chunks which
   1065	 * are marked for retransmission (limited by the
   1066	 * current cwnd).
   1067	 */
   1068	if (!list_empty(&ctx->q->retransmit) &&
   1069	    !sctp_outq_flush_rtx(ctx, rtx_timeout))
   1070		return;
   1071
   1072	/* Apply Max.Burst limitation to the current transport in
   1073	 * case it will be used for new data.  We are going to
   1074	 * rest it before we return, but we want to apply the limit
   1075	 * to the currently queued data.
   1076	 */
   1077	if (ctx->transport)
   1078		sctp_transport_burst_limited(ctx->transport);
   1079
   1080	/* Finally, transmit new packets.  */
   1081	while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
   1082		__u32 sid = ntohs(chunk->subh.data_hdr->stream);
   1083		__u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
   1084
   1085		/* Has this chunk expired? */
   1086		if (sctp_chunk_abandoned(chunk)) {
   1087			sctp_sched_dequeue_done(ctx->q, chunk);
   1088			sctp_chunk_fail(chunk, 0);
   1089			sctp_chunk_free(chunk);
   1090			continue;
   1091		}
   1092
   1093		if (stream_state == SCTP_STREAM_CLOSED) {
   1094			sctp_outq_head_data(ctx->q, chunk);
   1095			break;
   1096		}
   1097
   1098		sctp_outq_select_transport(ctx, chunk);
   1099
   1100		pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
   1101			 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
   1102			 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
   1103			 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
   1104			 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
   1105			 refcount_read(&chunk->skb->users) : -1);
   1106
   1107		/* Add the chunk to the packet.  */
   1108		status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
   1109						    ctx->gfp);
   1110		if (status != SCTP_XMIT_OK) {
   1111			/* We could not append this chunk, so put
   1112			 * the chunk back on the output queue.
   1113			 */
   1114			pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
   1115				 __func__, ntohl(chunk->subh.data_hdr->tsn),
   1116				 status);
   1117
   1118			sctp_outq_head_data(ctx->q, chunk);
   1119			break;
   1120		}
   1121
   1122		/* The sender is in the SHUTDOWN-PENDING state,
   1123		 * The sender MAY set the I-bit in the DATA
   1124		 * chunk header.
   1125		 */
   1126		if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
   1127			chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
   1128		if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
   1129			ctx->asoc->stats.ouodchunks++;
   1130		else
   1131			ctx->asoc->stats.oodchunks++;
   1132
   1133		/* Only now it's safe to consider this
   1134		 * chunk as sent, sched-wise.
   1135		 */
   1136		sctp_sched_dequeue_done(ctx->q, chunk);
   1137
   1138		list_add_tail(&chunk->transmitted_list,
   1139			      &ctx->transport->transmitted);
   1140
   1141		sctp_transport_reset_t3_rtx(ctx->transport);
   1142		ctx->transport->last_time_sent = jiffies;
   1143
   1144		/* Only let one DATA chunk get bundled with a
   1145		 * COOKIE-ECHO chunk.
   1146		 */
   1147		if (ctx->packet->has_cookie_echo)
   1148			break;
   1149	}
   1150}
   1151
   1152static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
   1153{
   1154	struct sock *sk = ctx->asoc->base.sk;
   1155	struct list_head *ltransport;
   1156	struct sctp_packet *packet;
   1157	struct sctp_transport *t;
   1158	int error = 0;
   1159
   1160	while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
   1161		t = list_entry(ltransport, struct sctp_transport, send_ready);
   1162		packet = &t->packet;
   1163		if (!sctp_packet_empty(packet)) {
   1164			rcu_read_lock();
   1165			if (t->dst && __sk_dst_get(sk) != t->dst) {
   1166				dst_hold(t->dst);
   1167				sk_setup_caps(sk, t->dst);
   1168			}
   1169			rcu_read_unlock();
   1170			error = sctp_packet_transmit(packet, ctx->gfp);
   1171			if (error < 0)
   1172				ctx->q->asoc->base.sk->sk_err = -error;
   1173		}
   1174
   1175		/* Clear the burst limited state, if any */
   1176		sctp_transport_burst_reset(t);
   1177	}
   1178}
   1179
   1180/* Try to flush an outqueue.
   1181 *
   1182 * Description: Send everything in q which we legally can, subject to
   1183 * congestion limitations.
   1184 * * Note: This function can be called from multiple contexts so appropriate
   1185 * locking concerns must be made.  Today we use the sock lock to protect
   1186 * this function.
   1187 */
   1188
   1189static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
   1190{
   1191	struct sctp_flush_ctx ctx = {
   1192		.q = q,
   1193		.transport = NULL,
   1194		.transport_list = LIST_HEAD_INIT(ctx.transport_list),
   1195		.asoc = q->asoc,
   1196		.packet = NULL,
   1197		.gfp = gfp,
   1198	};
   1199
   1200	/* 6.10 Bundling
   1201	 *   ...
   1202	 *   When bundling control chunks with DATA chunks, an
   1203	 *   endpoint MUST place control chunks first in the outbound
   1204	 *   SCTP packet.  The transmitter MUST transmit DATA chunks
   1205	 *   within a SCTP packet in increasing order of TSN.
   1206	 *   ...
   1207	 */
   1208
   1209	sctp_outq_flush_ctrl(&ctx);
   1210
   1211	if (q->asoc->src_out_of_asoc_ok)
   1212		goto sctp_flush_out;
   1213
   1214	sctp_outq_flush_data(&ctx, rtx_timeout);
   1215
   1216sctp_flush_out:
   1217
   1218	sctp_outq_flush_transports(&ctx);
   1219}
   1220
   1221/* Update unack_data based on the incoming SACK chunk */
   1222static void sctp_sack_update_unack_data(struct sctp_association *assoc,
   1223					struct sctp_sackhdr *sack)
   1224{
   1225	union sctp_sack_variable *frags;
   1226	__u16 unack_data;
   1227	int i;
   1228
   1229	unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
   1230
   1231	frags = sack->variable;
   1232	for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
   1233		unack_data -= ((ntohs(frags[i].gab.end) -
   1234				ntohs(frags[i].gab.start) + 1));
   1235	}
   1236
   1237	assoc->unack_data = unack_data;
   1238}
   1239
   1240/* This is where we REALLY process a SACK.
   1241 *
   1242 * Process the SACK against the outqueue.  Mostly, this just frees
   1243 * things off the transmitted queue.
   1244 */
   1245int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
   1246{
   1247	struct sctp_association *asoc = q->asoc;
   1248	struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
   1249	struct sctp_transport *transport;
   1250	struct sctp_chunk *tchunk = NULL;
   1251	struct list_head *lchunk, *transport_list, *temp;
   1252	union sctp_sack_variable *frags = sack->variable;
   1253	__u32 sack_ctsn, ctsn, tsn;
   1254	__u32 highest_tsn, highest_new_tsn;
   1255	__u32 sack_a_rwnd;
   1256	unsigned int outstanding;
   1257	struct sctp_transport *primary = asoc->peer.primary_path;
   1258	int count_of_newacks = 0;
   1259	int gap_ack_blocks;
   1260	u8 accum_moved = 0;
   1261
   1262	/* Grab the association's destination address list. */
   1263	transport_list = &asoc->peer.transport_addr_list;
   1264
   1265	/* SCTP path tracepoint for congestion control debugging. */
   1266	if (trace_sctp_probe_path_enabled()) {
   1267		list_for_each_entry(transport, transport_list, transports)
   1268			trace_sctp_probe_path(transport, asoc);
   1269	}
   1270
   1271	sack_ctsn = ntohl(sack->cum_tsn_ack);
   1272	gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
   1273	asoc->stats.gapcnt += gap_ack_blocks;
   1274	/*
   1275	 * SFR-CACC algorithm:
   1276	 * On receipt of a SACK the sender SHOULD execute the
   1277	 * following statements.
   1278	 *
   1279	 * 1) If the cumulative ack in the SACK passes next tsn_at_change
   1280	 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
   1281	 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
   1282	 * all destinations.
   1283	 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
   1284	 * is set the receiver of the SACK MUST take the following actions:
   1285	 *
   1286	 * A) Initialize the cacc_saw_newack to 0 for all destination
   1287	 * addresses.
   1288	 *
   1289	 * Only bother if changeover_active is set. Otherwise, this is
   1290	 * totally suboptimal to do on every SACK.
   1291	 */
   1292	if (primary->cacc.changeover_active) {
   1293		u8 clear_cycling = 0;
   1294
   1295		if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
   1296			primary->cacc.changeover_active = 0;
   1297			clear_cycling = 1;
   1298		}
   1299
   1300		if (clear_cycling || gap_ack_blocks) {
   1301			list_for_each_entry(transport, transport_list,
   1302					transports) {
   1303				if (clear_cycling)
   1304					transport->cacc.cycling_changeover = 0;
   1305				if (gap_ack_blocks)
   1306					transport->cacc.cacc_saw_newack = 0;
   1307			}
   1308		}
   1309	}
   1310
   1311	/* Get the highest TSN in the sack. */
   1312	highest_tsn = sack_ctsn;
   1313	if (gap_ack_blocks)
   1314		highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
   1315
   1316	if (TSN_lt(asoc->highest_sacked, highest_tsn))
   1317		asoc->highest_sacked = highest_tsn;
   1318
   1319	highest_new_tsn = sack_ctsn;
   1320
   1321	/* Run through the retransmit queue.  Credit bytes received
   1322	 * and free those chunks that we can.
   1323	 */
   1324	sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
   1325
   1326	/* Run through the transmitted queue.
   1327	 * Credit bytes received and free those chunks which we can.
   1328	 *
   1329	 * This is a MASSIVE candidate for optimization.
   1330	 */
   1331	list_for_each_entry(transport, transport_list, transports) {
   1332		sctp_check_transmitted(q, &transport->transmitted,
   1333				       transport, &chunk->source, sack,
   1334				       &highest_new_tsn);
   1335		/*
   1336		 * SFR-CACC algorithm:
   1337		 * C) Let count_of_newacks be the number of
   1338		 * destinations for which cacc_saw_newack is set.
   1339		 */
   1340		if (transport->cacc.cacc_saw_newack)
   1341			count_of_newacks++;
   1342	}
   1343
   1344	/* Move the Cumulative TSN Ack Point if appropriate.  */
   1345	if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
   1346		asoc->ctsn_ack_point = sack_ctsn;
   1347		accum_moved = 1;
   1348	}
   1349
   1350	if (gap_ack_blocks) {
   1351
   1352		if (asoc->fast_recovery && accum_moved)
   1353			highest_new_tsn = highest_tsn;
   1354
   1355		list_for_each_entry(transport, transport_list, transports)
   1356			sctp_mark_missing(q, &transport->transmitted, transport,
   1357					  highest_new_tsn, count_of_newacks);
   1358	}
   1359
   1360	/* Update unack_data field in the assoc. */
   1361	sctp_sack_update_unack_data(asoc, sack);
   1362
   1363	ctsn = asoc->ctsn_ack_point;
   1364
   1365	/* Throw away stuff rotting on the sack queue.  */
   1366	list_for_each_safe(lchunk, temp, &q->sacked) {
   1367		tchunk = list_entry(lchunk, struct sctp_chunk,
   1368				    transmitted_list);
   1369		tsn = ntohl(tchunk->subh.data_hdr->tsn);
   1370		if (TSN_lte(tsn, ctsn)) {
   1371			list_del_init(&tchunk->transmitted_list);
   1372			if (asoc->peer.prsctp_capable &&
   1373			    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
   1374				asoc->sent_cnt_removable--;
   1375			sctp_chunk_free(tchunk);
   1376		}
   1377	}
   1378
   1379	/* ii) Set rwnd equal to the newly received a_rwnd minus the
   1380	 *     number of bytes still outstanding after processing the
   1381	 *     Cumulative TSN Ack and the Gap Ack Blocks.
   1382	 */
   1383
   1384	sack_a_rwnd = ntohl(sack->a_rwnd);
   1385	asoc->peer.zero_window_announced = !sack_a_rwnd;
   1386	outstanding = q->outstanding_bytes;
   1387
   1388	if (outstanding < sack_a_rwnd)
   1389		sack_a_rwnd -= outstanding;
   1390	else
   1391		sack_a_rwnd = 0;
   1392
   1393	asoc->peer.rwnd = sack_a_rwnd;
   1394
   1395	asoc->stream.si->generate_ftsn(q, sack_ctsn);
   1396
   1397	pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
   1398	pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
   1399		 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
   1400		 asoc->adv_peer_ack_point);
   1401
   1402	return sctp_outq_is_empty(q);
   1403}
   1404
   1405/* Is the outqueue empty?
   1406 * The queue is empty when we have not pending data, no in-flight data
   1407 * and nothing pending retransmissions.
   1408 */
   1409int sctp_outq_is_empty(const struct sctp_outq *q)
   1410{
   1411	return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
   1412	       list_empty(&q->retransmit);
   1413}
   1414
   1415/********************************************************************
   1416 * 2nd Level Abstractions
   1417 ********************************************************************/
   1418
   1419/* Go through a transport's transmitted list or the association's retransmit
   1420 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
   1421 * The retransmit list will not have an associated transport.
   1422 *
   1423 * I added coherent debug information output.	--xguo
   1424 *
   1425 * Instead of printing 'sacked' or 'kept' for each TSN on the
   1426 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
   1427 * KEPT TSN6-TSN7, etc.
   1428 */
   1429static void sctp_check_transmitted(struct sctp_outq *q,
   1430				   struct list_head *transmitted_queue,
   1431				   struct sctp_transport *transport,
   1432				   union sctp_addr *saddr,
   1433				   struct sctp_sackhdr *sack,
   1434				   __u32 *highest_new_tsn_in_sack)
   1435{
   1436	struct list_head *lchunk;
   1437	struct sctp_chunk *tchunk;
   1438	struct list_head tlist;
   1439	__u32 tsn;
   1440	__u32 sack_ctsn;
   1441	__u32 rtt;
   1442	__u8 restart_timer = 0;
   1443	int bytes_acked = 0;
   1444	int migrate_bytes = 0;
   1445	bool forward_progress = false;
   1446
   1447	sack_ctsn = ntohl(sack->cum_tsn_ack);
   1448
   1449	INIT_LIST_HEAD(&tlist);
   1450
   1451	/* The while loop will skip empty transmitted queues. */
   1452	while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
   1453		tchunk = list_entry(lchunk, struct sctp_chunk,
   1454				    transmitted_list);
   1455
   1456		if (sctp_chunk_abandoned(tchunk)) {
   1457			/* Move the chunk to abandoned list. */
   1458			sctp_insert_list(&q->abandoned, lchunk);
   1459
   1460			/* If this chunk has not been acked, stop
   1461			 * considering it as 'outstanding'.
   1462			 */
   1463			if (transmitted_queue != &q->retransmit &&
   1464			    !tchunk->tsn_gap_acked) {
   1465				if (tchunk->transport)
   1466					tchunk->transport->flight_size -=
   1467							sctp_data_size(tchunk);
   1468				q->outstanding_bytes -= sctp_data_size(tchunk);
   1469			}
   1470			continue;
   1471		}
   1472
   1473		tsn = ntohl(tchunk->subh.data_hdr->tsn);
   1474		if (sctp_acked(sack, tsn)) {
   1475			/* If this queue is the retransmit queue, the
   1476			 * retransmit timer has already reclaimed
   1477			 * the outstanding bytes for this chunk, so only
   1478			 * count bytes associated with a transport.
   1479			 */
   1480			if (transport && !tchunk->tsn_gap_acked) {
   1481				/* If this chunk is being used for RTT
   1482				 * measurement, calculate the RTT and update
   1483				 * the RTO using this value.
   1484				 *
   1485				 * 6.3.1 C5) Karn's algorithm: RTT measurements
   1486				 * MUST NOT be made using packets that were
   1487				 * retransmitted (and thus for which it is
   1488				 * ambiguous whether the reply was for the
   1489				 * first instance of the packet or a later
   1490				 * instance).
   1491				 */
   1492				if (!sctp_chunk_retransmitted(tchunk) &&
   1493				    tchunk->rtt_in_progress) {
   1494					tchunk->rtt_in_progress = 0;
   1495					rtt = jiffies - tchunk->sent_at;
   1496					sctp_transport_update_rto(transport,
   1497								  rtt);
   1498				}
   1499
   1500				if (TSN_lte(tsn, sack_ctsn)) {
   1501					/*
   1502					 * SFR-CACC algorithm:
   1503					 * 2) If the SACK contains gap acks
   1504					 * and the flag CHANGEOVER_ACTIVE is
   1505					 * set the receiver of the SACK MUST
   1506					 * take the following action:
   1507					 *
   1508					 * B) For each TSN t being acked that
   1509					 * has not been acked in any SACK so
   1510					 * far, set cacc_saw_newack to 1 for
   1511					 * the destination that the TSN was
   1512					 * sent to.
   1513					 */
   1514					if (sack->num_gap_ack_blocks &&
   1515					    q->asoc->peer.primary_path->cacc.
   1516					    changeover_active)
   1517						transport->cacc.cacc_saw_newack
   1518							= 1;
   1519				}
   1520			}
   1521
   1522			/* If the chunk hasn't been marked as ACKED,
   1523			 * mark it and account bytes_acked if the
   1524			 * chunk had a valid transport (it will not
   1525			 * have a transport if ASCONF had deleted it
   1526			 * while DATA was outstanding).
   1527			 */
   1528			if (!tchunk->tsn_gap_acked) {
   1529				tchunk->tsn_gap_acked = 1;
   1530				if (TSN_lt(*highest_new_tsn_in_sack, tsn))
   1531					*highest_new_tsn_in_sack = tsn;
   1532				bytes_acked += sctp_data_size(tchunk);
   1533				if (!tchunk->transport)
   1534					migrate_bytes += sctp_data_size(tchunk);
   1535				forward_progress = true;
   1536			}
   1537
   1538			if (TSN_lte(tsn, sack_ctsn)) {
   1539				/* RFC 2960  6.3.2 Retransmission Timer Rules
   1540				 *
   1541				 * R3) Whenever a SACK is received
   1542				 * that acknowledges the DATA chunk
   1543				 * with the earliest outstanding TSN
   1544				 * for that address, restart T3-rtx
   1545				 * timer for that address with its
   1546				 * current RTO.
   1547				 */
   1548				restart_timer = 1;
   1549				forward_progress = true;
   1550
   1551				list_add_tail(&tchunk->transmitted_list,
   1552					      &q->sacked);
   1553			} else {
   1554				/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
   1555				 * M2) Each time a SACK arrives reporting
   1556				 * 'Stray DATA chunk(s)' record the highest TSN
   1557				 * reported as newly acknowledged, call this
   1558				 * value 'HighestTSNinSack'. A newly
   1559				 * acknowledged DATA chunk is one not
   1560				 * previously acknowledged in a SACK.
   1561				 *
   1562				 * When the SCTP sender of data receives a SACK
   1563				 * chunk that acknowledges, for the first time,
   1564				 * the receipt of a DATA chunk, all the still
   1565				 * unacknowledged DATA chunks whose TSN is
   1566				 * older than that newly acknowledged DATA
   1567				 * chunk, are qualified as 'Stray DATA chunks'.
   1568				 */
   1569				list_add_tail(lchunk, &tlist);
   1570			}
   1571		} else {
   1572			if (tchunk->tsn_gap_acked) {
   1573				pr_debug("%s: receiver reneged on data TSN:0x%x\n",
   1574					 __func__, tsn);
   1575
   1576				tchunk->tsn_gap_acked = 0;
   1577
   1578				if (tchunk->transport)
   1579					bytes_acked -= sctp_data_size(tchunk);
   1580
   1581				/* RFC 2960 6.3.2 Retransmission Timer Rules
   1582				 *
   1583				 * R4) Whenever a SACK is received missing a
   1584				 * TSN that was previously acknowledged via a
   1585				 * Gap Ack Block, start T3-rtx for the
   1586				 * destination address to which the DATA
   1587				 * chunk was originally
   1588				 * transmitted if it is not already running.
   1589				 */
   1590				restart_timer = 1;
   1591			}
   1592
   1593			list_add_tail(lchunk, &tlist);
   1594		}
   1595	}
   1596
   1597	if (transport) {
   1598		if (bytes_acked) {
   1599			struct sctp_association *asoc = transport->asoc;
   1600
   1601			/* We may have counted DATA that was migrated
   1602			 * to this transport due to DEL-IP operation.
   1603			 * Subtract those bytes, since the were never
   1604			 * send on this transport and shouldn't be
   1605			 * credited to this transport.
   1606			 */
   1607			bytes_acked -= migrate_bytes;
   1608
   1609			/* 8.2. When an outstanding TSN is acknowledged,
   1610			 * the endpoint shall clear the error counter of
   1611			 * the destination transport address to which the
   1612			 * DATA chunk was last sent.
   1613			 * The association's overall error counter is
   1614			 * also cleared.
   1615			 */
   1616			transport->error_count = 0;
   1617			transport->asoc->overall_error_count = 0;
   1618			forward_progress = true;
   1619
   1620			/*
   1621			 * While in SHUTDOWN PENDING, we may have started
   1622			 * the T5 shutdown guard timer after reaching the
   1623			 * retransmission limit. Stop that timer as soon
   1624			 * as the receiver acknowledged any data.
   1625			 */
   1626			if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
   1627			    del_timer(&asoc->timers
   1628				[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
   1629					sctp_association_put(asoc);
   1630
   1631			/* Mark the destination transport address as
   1632			 * active if it is not so marked.
   1633			 */
   1634			if ((transport->state == SCTP_INACTIVE ||
   1635			     transport->state == SCTP_UNCONFIRMED) &&
   1636			    sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
   1637				sctp_assoc_control_transport(
   1638					transport->asoc,
   1639					transport,
   1640					SCTP_TRANSPORT_UP,
   1641					SCTP_RECEIVED_SACK);
   1642			}
   1643
   1644			sctp_transport_raise_cwnd(transport, sack_ctsn,
   1645						  bytes_acked);
   1646
   1647			transport->flight_size -= bytes_acked;
   1648			if (transport->flight_size == 0)
   1649				transport->partial_bytes_acked = 0;
   1650			q->outstanding_bytes -= bytes_acked + migrate_bytes;
   1651		} else {
   1652			/* RFC 2960 6.1, sctpimpguide-06 2.15.2
   1653			 * When a sender is doing zero window probing, it
   1654			 * should not timeout the association if it continues
   1655			 * to receive new packets from the receiver. The
   1656			 * reason is that the receiver MAY keep its window
   1657			 * closed for an indefinite time.
   1658			 * A sender is doing zero window probing when the
   1659			 * receiver's advertised window is zero, and there is
   1660			 * only one data chunk in flight to the receiver.
   1661			 *
   1662			 * Allow the association to timeout while in SHUTDOWN
   1663			 * PENDING or SHUTDOWN RECEIVED in case the receiver
   1664			 * stays in zero window mode forever.
   1665			 */
   1666			if (!q->asoc->peer.rwnd &&
   1667			    !list_empty(&tlist) &&
   1668			    (sack_ctsn+2 == q->asoc->next_tsn) &&
   1669			    q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
   1670				pr_debug("%s: sack received for zero window "
   1671					 "probe:%u\n", __func__, sack_ctsn);
   1672
   1673				q->asoc->overall_error_count = 0;
   1674				transport->error_count = 0;
   1675			}
   1676		}
   1677
   1678		/* RFC 2960 6.3.2 Retransmission Timer Rules
   1679		 *
   1680		 * R2) Whenever all outstanding data sent to an address have
   1681		 * been acknowledged, turn off the T3-rtx timer of that
   1682		 * address.
   1683		 */
   1684		if (!transport->flight_size) {
   1685			if (del_timer(&transport->T3_rtx_timer))
   1686				sctp_transport_put(transport);
   1687		} else if (restart_timer) {
   1688			if (!mod_timer(&transport->T3_rtx_timer,
   1689				       jiffies + transport->rto))
   1690				sctp_transport_hold(transport);
   1691		}
   1692
   1693		if (forward_progress) {
   1694			if (transport->dst)
   1695				sctp_transport_dst_confirm(transport);
   1696		}
   1697	}
   1698
   1699	list_splice(&tlist, transmitted_queue);
   1700}
   1701
   1702/* Mark chunks as missing and consequently may get retransmitted. */
   1703static void sctp_mark_missing(struct sctp_outq *q,
   1704			      struct list_head *transmitted_queue,
   1705			      struct sctp_transport *transport,
   1706			      __u32 highest_new_tsn_in_sack,
   1707			      int count_of_newacks)
   1708{
   1709	struct sctp_chunk *chunk;
   1710	__u32 tsn;
   1711	char do_fast_retransmit = 0;
   1712	struct sctp_association *asoc = q->asoc;
   1713	struct sctp_transport *primary = asoc->peer.primary_path;
   1714
   1715	list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
   1716
   1717		tsn = ntohl(chunk->subh.data_hdr->tsn);
   1718
   1719		/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
   1720		 * 'Unacknowledged TSN's', if the TSN number of an
   1721		 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
   1722		 * value, increment the 'TSN.Missing.Report' count on that
   1723		 * chunk if it has NOT been fast retransmitted or marked for
   1724		 * fast retransmit already.
   1725		 */
   1726		if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
   1727		    !chunk->tsn_gap_acked &&
   1728		    TSN_lt(tsn, highest_new_tsn_in_sack)) {
   1729
   1730			/* SFR-CACC may require us to skip marking
   1731			 * this chunk as missing.
   1732			 */
   1733			if (!transport || !sctp_cacc_skip(primary,
   1734						chunk->transport,
   1735						count_of_newacks, tsn)) {
   1736				chunk->tsn_missing_report++;
   1737
   1738				pr_debug("%s: tsn:0x%x missing counter:%d\n",
   1739					 __func__, tsn, chunk->tsn_missing_report);
   1740			}
   1741		}
   1742		/*
   1743		 * M4) If any DATA chunk is found to have a
   1744		 * 'TSN.Missing.Report'
   1745		 * value larger than or equal to 3, mark that chunk for
   1746		 * retransmission and start the fast retransmit procedure.
   1747		 */
   1748
   1749		if (chunk->tsn_missing_report >= 3) {
   1750			chunk->fast_retransmit = SCTP_NEED_FRTX;
   1751			do_fast_retransmit = 1;
   1752		}
   1753	}
   1754
   1755	if (transport) {
   1756		if (do_fast_retransmit)
   1757			sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
   1758
   1759		pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
   1760			 "flight_size:%d, pba:%d\n",  __func__, transport,
   1761			 transport->cwnd, transport->ssthresh,
   1762			 transport->flight_size, transport->partial_bytes_acked);
   1763	}
   1764}
   1765
   1766/* Is the given TSN acked by this packet?  */
   1767static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
   1768{
   1769	__u32 ctsn = ntohl(sack->cum_tsn_ack);
   1770	union sctp_sack_variable *frags;
   1771	__u16 tsn_offset, blocks;
   1772	int i;
   1773
   1774	if (TSN_lte(tsn, ctsn))
   1775		goto pass;
   1776
   1777	/* 3.3.4 Selective Acknowledgment (SACK) (3):
   1778	 *
   1779	 * Gap Ack Blocks:
   1780	 *  These fields contain the Gap Ack Blocks. They are repeated
   1781	 *  for each Gap Ack Block up to the number of Gap Ack Blocks
   1782	 *  defined in the Number of Gap Ack Blocks field. All DATA
   1783	 *  chunks with TSNs greater than or equal to (Cumulative TSN
   1784	 *  Ack + Gap Ack Block Start) and less than or equal to
   1785	 *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
   1786	 *  Block are assumed to have been received correctly.
   1787	 */
   1788
   1789	frags = sack->variable;
   1790	blocks = ntohs(sack->num_gap_ack_blocks);
   1791	tsn_offset = tsn - ctsn;
   1792	for (i = 0; i < blocks; ++i) {
   1793		if (tsn_offset >= ntohs(frags[i].gab.start) &&
   1794		    tsn_offset <= ntohs(frags[i].gab.end))
   1795			goto pass;
   1796	}
   1797
   1798	return 0;
   1799pass:
   1800	return 1;
   1801}
   1802
   1803static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
   1804				    int nskips, __be16 stream)
   1805{
   1806	int i;
   1807
   1808	for (i = 0; i < nskips; i++) {
   1809		if (skiplist[i].stream == stream)
   1810			return i;
   1811	}
   1812	return i;
   1813}
   1814
   1815/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
   1816void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
   1817{
   1818	struct sctp_association *asoc = q->asoc;
   1819	struct sctp_chunk *ftsn_chunk = NULL;
   1820	struct sctp_fwdtsn_skip ftsn_skip_arr[10];
   1821	int nskips = 0;
   1822	int skip_pos = 0;
   1823	__u32 tsn;
   1824	struct sctp_chunk *chunk;
   1825	struct list_head *lchunk, *temp;
   1826
   1827	if (!asoc->peer.prsctp_capable)
   1828		return;
   1829
   1830	/* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
   1831	 * received SACK.
   1832	 *
   1833	 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
   1834	 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
   1835	 */
   1836	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
   1837		asoc->adv_peer_ack_point = ctsn;
   1838
   1839	/* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
   1840	 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
   1841	 * the chunk next in the out-queue space is marked as "abandoned" as
   1842	 * shown in the following example:
   1843	 *
   1844	 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
   1845	 * and the Advanced.Peer.Ack.Point is updated to this value:
   1846	 *
   1847	 *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
   1848	 *   normal SACK processing           local advancement
   1849	 *                ...                           ...
   1850	 *   Adv.Ack.Pt-> 102 acked                     102 acked
   1851	 *                103 abandoned                 103 abandoned
   1852	 *                104 abandoned     Adv.Ack.P-> 104 abandoned
   1853	 *                105                           105
   1854	 *                106 acked                     106 acked
   1855	 *                ...                           ...
   1856	 *
   1857	 * In this example, the data sender successfully advanced the
   1858	 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
   1859	 */
   1860	list_for_each_safe(lchunk, temp, &q->abandoned) {
   1861		chunk = list_entry(lchunk, struct sctp_chunk,
   1862					transmitted_list);
   1863		tsn = ntohl(chunk->subh.data_hdr->tsn);
   1864
   1865		/* Remove any chunks in the abandoned queue that are acked by
   1866		 * the ctsn.
   1867		 */
   1868		if (TSN_lte(tsn, ctsn)) {
   1869			list_del_init(lchunk);
   1870			sctp_chunk_free(chunk);
   1871		} else {
   1872			if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
   1873				asoc->adv_peer_ack_point = tsn;
   1874				if (chunk->chunk_hdr->flags &
   1875					 SCTP_DATA_UNORDERED)
   1876					continue;
   1877				skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
   1878						nskips,
   1879						chunk->subh.data_hdr->stream);
   1880				ftsn_skip_arr[skip_pos].stream =
   1881					chunk->subh.data_hdr->stream;
   1882				ftsn_skip_arr[skip_pos].ssn =
   1883					 chunk->subh.data_hdr->ssn;
   1884				if (skip_pos == nskips)
   1885					nskips++;
   1886				if (nskips == 10)
   1887					break;
   1888			} else
   1889				break;
   1890		}
   1891	}
   1892
   1893	/* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
   1894	 * is greater than the Cumulative TSN ACK carried in the received
   1895	 * SACK, the data sender MUST send the data receiver a FORWARD TSN
   1896	 * chunk containing the latest value of the
   1897	 * "Advanced.Peer.Ack.Point".
   1898	 *
   1899	 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
   1900	 * list each stream and sequence number in the forwarded TSN. This
   1901	 * information will enable the receiver to easily find any
   1902	 * stranded TSN's waiting on stream reorder queues. Each stream
   1903	 * SHOULD only be reported once; this means that if multiple
   1904	 * abandoned messages occur in the same stream then only the
   1905	 * highest abandoned stream sequence number is reported. If the
   1906	 * total size of the FORWARD TSN does NOT fit in a single MTU then
   1907	 * the sender of the FORWARD TSN SHOULD lower the
   1908	 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
   1909	 * single MTU.
   1910	 */
   1911	if (asoc->adv_peer_ack_point > ctsn)
   1912		ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
   1913					      nskips, &ftsn_skip_arr[0]);
   1914
   1915	if (ftsn_chunk) {
   1916		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
   1917		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
   1918	}
   1919}