cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

x25_out.c (5238B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	X.25 Packet Layer release 002
      4 *
      5 *	This is ALPHA test software. This code may break your machine,
      6 *	randomly fail to work with new releases, misbehave and/or generally
      7 *	screw up. It might even work.
      8 *
      9 *	This code REQUIRES 2.1.15 or higher
     10 *
     11 *	History
     12 *	X.25 001	Jonathan Naylor	Started coding.
     13 *	X.25 002	Jonathan Naylor	New timer architecture.
     14 *	2000-09-04	Henner Eisen	Prevented x25_output() skb leakage.
     15 *	2000-10-27	Henner Eisen	MSG_DONTWAIT for fragment allocation.
     16 *	2000-11-10	Henner Eisen	x25_send_iframe(): re-queued frames
     17 *					needed cleaned seq-number fields.
     18 */
     19
     20#include <linux/slab.h>
     21#include <linux/socket.h>
     22#include <linux/kernel.h>
     23#include <linux/string.h>
     24#include <linux/skbuff.h>
     25#include <net/sock.h>
     26#include <net/x25.h>
     27
     28static int x25_pacsize_to_bytes(unsigned int pacsize)
     29{
     30	int bytes = 1;
     31
     32	if (!pacsize)
     33		return 128;
     34
     35	while (pacsize-- > 0)
     36		bytes *= 2;
     37
     38	return bytes;
     39}
     40
     41/*
     42 *	This is where all X.25 information frames pass.
     43 *
     44 *      Returns the amount of user data bytes sent on success
     45 *      or a negative error code on failure.
     46 */
     47int x25_output(struct sock *sk, struct sk_buff *skb)
     48{
     49	struct sk_buff *skbn;
     50	unsigned char header[X25_EXT_MIN_LEN];
     51	int err, frontlen, len;
     52	int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
     53	struct x25_sock *x25 = x25_sk(sk);
     54	int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
     55						    X25_STD_MIN_LEN;
     56	int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);
     57
     58	if (skb->len - header_len > max_len) {
     59		/* Save a copy of the Header */
     60		skb_copy_from_linear_data(skb, header, header_len);
     61		skb_pull(skb, header_len);
     62
     63		frontlen = skb_headroom(skb);
     64
     65		while (skb->len > 0) {
     66			release_sock(sk);
     67			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
     68						   noblock, &err);
     69			lock_sock(sk);
     70			if (!skbn) {
     71				if (err == -EWOULDBLOCK && noblock){
     72					kfree_skb(skb);
     73					return sent;
     74				}
     75				SOCK_DEBUG(sk, "x25_output: fragment alloc"
     76					       " failed, err=%d, %d bytes "
     77					       "sent\n", err, sent);
     78				return err;
     79			}
     80
     81			skb_reserve(skbn, frontlen);
     82
     83			len = max_len > skb->len ? skb->len : max_len;
     84
     85			/* Copy the user data */
     86			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
     87			skb_pull(skb, len);
     88
     89			/* Duplicate the Header */
     90			skb_push(skbn, header_len);
     91			skb_copy_to_linear_data(skbn, header, header_len);
     92
     93			if (skb->len > 0) {
     94				if (x25->neighbour->extended)
     95					skbn->data[3] |= X25_EXT_M_BIT;
     96				else
     97					skbn->data[2] |= X25_STD_M_BIT;
     98			}
     99
    100			skb_queue_tail(&sk->sk_write_queue, skbn);
    101			sent += len;
    102		}
    103
    104		kfree_skb(skb);
    105	} else {
    106		skb_queue_tail(&sk->sk_write_queue, skb);
    107		sent = skb->len - header_len;
    108	}
    109	return sent;
    110}
    111
    112/*
    113 *	This procedure is passed a buffer descriptor for an iframe. It builds
    114 *	the rest of the control part of the frame and then writes it out.
    115 */
    116static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
    117{
    118	struct x25_sock *x25 = x25_sk(sk);
    119
    120	if (!skb)
    121		return;
    122
    123	if (x25->neighbour->extended) {
    124		skb->data[2]  = (x25->vs << 1) & 0xFE;
    125		skb->data[3] &= X25_EXT_M_BIT;
    126		skb->data[3] |= (x25->vr << 1) & 0xFE;
    127	} else {
    128		skb->data[2] &= X25_STD_M_BIT;
    129		skb->data[2] |= (x25->vs << 1) & 0x0E;
    130		skb->data[2] |= (x25->vr << 5) & 0xE0;
    131	}
    132
    133	x25_transmit_link(skb, x25->neighbour);
    134}
    135
    136void x25_kick(struct sock *sk)
    137{
    138	struct sk_buff *skb, *skbn;
    139	unsigned short start, end;
    140	int modulus;
    141	struct x25_sock *x25 = x25_sk(sk);
    142
    143	if (x25->state != X25_STATE_3)
    144		return;
    145
    146	/*
    147	 *	Transmit interrupt data.
    148	 */
    149	if (skb_peek(&x25->interrupt_out_queue) != NULL &&
    150		!test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {
    151
    152		skb = skb_dequeue(&x25->interrupt_out_queue);
    153		x25_transmit_link(skb, x25->neighbour);
    154	}
    155
    156	if (x25->condition & X25_COND_PEER_RX_BUSY)
    157		return;
    158
    159	if (!skb_peek(&sk->sk_write_queue))
    160		return;
    161
    162	modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
    163
    164	start   = skb_peek(&x25->ack_queue) ? x25->vs : x25->va;
    165	end     = (x25->va + x25->facilities.winsize_out) % modulus;
    166
    167	if (start == end)
    168		return;
    169
    170	x25->vs = start;
    171
    172	/*
    173	 * Transmit data until either we're out of data to send or
    174	 * the window is full.
    175	 */
    176
    177	skb = skb_dequeue(&sk->sk_write_queue);
    178
    179	do {
    180		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
    181			skb_queue_head(&sk->sk_write_queue, skb);
    182			break;
    183		}
    184
    185		skb_set_owner_w(skbn, sk);
    186
    187		/*
    188		 * Transmit the frame copy.
    189		 */
    190		x25_send_iframe(sk, skbn);
    191
    192		x25->vs = (x25->vs + 1) % modulus;
    193
    194		/*
    195		 * Requeue the original data frame.
    196		 */
    197		skb_queue_tail(&x25->ack_queue, skb);
    198
    199	} while (x25->vs != end &&
    200		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
    201
    202	x25->vl         = x25->vr;
    203	x25->condition &= ~X25_COND_ACK_PENDING;
    204
    205	x25_stop_timer(sk);
    206}
    207
    208/*
    209 * The following routines are taken from page 170 of the 7th ARRL Computer
    210 * Networking Conference paper, as is the whole state machine.
    211 */
    212
    213void x25_enquiry_response(struct sock *sk)
    214{
    215	struct x25_sock *x25 = x25_sk(sk);
    216
    217	if (x25->condition & X25_COND_OWN_RX_BUSY)
    218		x25_write_internal(sk, X25_RNR);
    219	else
    220		x25_write_internal(sk, X25_RR);
    221
    222	x25->vl         = x25->vr;
    223	x25->condition &= ~X25_COND_ACK_PENDING;
    224
    225	x25_stop_timer(sk);
    226}