cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stream.c (5462B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *     SUCS NET3:
      4 *
      5 *     Generic stream handling routines. These are generic for most
      6 *     protocols. Even IP. Tonight 8-).
      7 *     This is used because TCP, LLC (others too) layer all have mostly
      8 *     identical sendmsg() and recvmsg() code.
      9 *     So we (will) share it here.
     10 *
     11 *     Authors:        Arnaldo Carvalho de Melo <acme@conectiva.com.br>
     12 *                     (from old tcp.c code)
     13 *                     Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
     14 */
     15
     16#include <linux/module.h>
     17#include <linux/sched/signal.h>
     18#include <linux/net.h>
     19#include <linux/signal.h>
     20#include <linux/tcp.h>
     21#include <linux/wait.h>
     22#include <net/sock.h>
     23
     24/**
     25 * sk_stream_write_space - stream socket write_space callback.
     26 * @sk: socket
     27 *
     28 * FIXME: write proper description
     29 */
     30void sk_stream_write_space(struct sock *sk)
     31{
     32	struct socket *sock = sk->sk_socket;
     33	struct socket_wq *wq;
     34
     35	if (__sk_stream_is_writeable(sk, 1) && sock) {
     36		clear_bit(SOCK_NOSPACE, &sock->flags);
     37
     38		rcu_read_lock();
     39		wq = rcu_dereference(sk->sk_wq);
     40		if (skwq_has_sleeper(wq))
     41			wake_up_interruptible_poll(&wq->wait, EPOLLOUT |
     42						EPOLLWRNORM | EPOLLWRBAND);
     43		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
     44			sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
     45		rcu_read_unlock();
     46	}
     47}
     48
     49/**
     50 * sk_stream_wait_connect - Wait for a socket to get into the connected state
     51 * @sk: sock to wait on
     52 * @timeo_p: for how long to wait
     53 *
     54 * Must be called with the socket locked.
     55 */
     56int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
     57{
     58	DEFINE_WAIT_FUNC(wait, woken_wake_function);
     59	struct task_struct *tsk = current;
     60	int done;
     61
     62	do {
     63		int err = sock_error(sk);
     64		if (err)
     65			return err;
     66		if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
     67			return -EPIPE;
     68		if (!*timeo_p)
     69			return -EAGAIN;
     70		if (signal_pending(tsk))
     71			return sock_intr_errno(*timeo_p);
     72
     73		add_wait_queue(sk_sleep(sk), &wait);
     74		sk->sk_write_pending++;
     75		done = sk_wait_event(sk, timeo_p,
     76				     !sk->sk_err &&
     77				     !((1 << sk->sk_state) &
     78				       ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)), &wait);
     79		remove_wait_queue(sk_sleep(sk), &wait);
     80		sk->sk_write_pending--;
     81	} while (!done);
     82	return 0;
     83}
     84EXPORT_SYMBOL(sk_stream_wait_connect);
     85
     86/**
     87 * sk_stream_closing - Return 1 if we still have things to send in our buffers.
     88 * @sk: socket to verify
     89 */
     90static inline int sk_stream_closing(struct sock *sk)
     91{
     92	return (1 << sk->sk_state) &
     93	       (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK);
     94}
     95
     96void sk_stream_wait_close(struct sock *sk, long timeout)
     97{
     98	if (timeout) {
     99		DEFINE_WAIT_FUNC(wait, woken_wake_function);
    100
    101		add_wait_queue(sk_sleep(sk), &wait);
    102
    103		do {
    104			if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk), &wait))
    105				break;
    106		} while (!signal_pending(current) && timeout);
    107
    108		remove_wait_queue(sk_sleep(sk), &wait);
    109	}
    110}
    111EXPORT_SYMBOL(sk_stream_wait_close);
    112
    113/**
    114 * sk_stream_wait_memory - Wait for more memory for a socket
    115 * @sk: socket to wait for memory
    116 * @timeo_p: for how long
    117 */
    118int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
    119{
    120	int err = 0;
    121	long vm_wait = 0;
    122	long current_timeo = *timeo_p;
    123	DEFINE_WAIT_FUNC(wait, woken_wake_function);
    124
    125	if (sk_stream_memory_free(sk))
    126		current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2;
    127
    128	add_wait_queue(sk_sleep(sk), &wait);
    129
    130	while (1) {
    131		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
    132
    133		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
    134			goto do_error;
    135		if (!*timeo_p)
    136			goto do_eagain;
    137		if (signal_pending(current))
    138			goto do_interrupted;
    139		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
    140		if (sk_stream_memory_free(sk) && !vm_wait)
    141			break;
    142
    143		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
    144		sk->sk_write_pending++;
    145		sk_wait_event(sk, &current_timeo, sk->sk_err ||
    146						  (sk->sk_shutdown & SEND_SHUTDOWN) ||
    147						  (sk_stream_memory_free(sk) &&
    148						  !vm_wait), &wait);
    149		sk->sk_write_pending--;
    150
    151		if (vm_wait) {
    152			vm_wait -= current_timeo;
    153			current_timeo = *timeo_p;
    154			if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
    155			    (current_timeo -= vm_wait) < 0)
    156				current_timeo = 0;
    157			vm_wait = 0;
    158		}
    159		*timeo_p = current_timeo;
    160	}
    161out:
    162	remove_wait_queue(sk_sleep(sk), &wait);
    163	return err;
    164
    165do_error:
    166	err = -EPIPE;
    167	goto out;
    168do_eagain:
    169	/* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
    170	 * be generated later.
    171	 * When TCP receives ACK packets that make room, tcp_check_space()
    172	 * only calls tcp_new_space() if SOCK_NOSPACE is set.
    173	 */
    174	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
    175	err = -EAGAIN;
    176	goto out;
    177do_interrupted:
    178	err = sock_intr_errno(*timeo_p);
    179	goto out;
    180}
    181EXPORT_SYMBOL(sk_stream_wait_memory);
    182
    183int sk_stream_error(struct sock *sk, int flags, int err)
    184{
    185	if (err == -EPIPE)
    186		err = sock_error(sk) ? : -EPIPE;
    187	if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
    188		send_sig(SIGPIPE, current, 0);
    189	return err;
    190}
    191EXPORT_SYMBOL(sk_stream_error);
    192
    193void sk_stream_kill_queues(struct sock *sk)
    194{
    195	/* First the read buffer. */
    196	__skb_queue_purge(&sk->sk_receive_queue);
    197
    198	/* Next, the write queue. */
    199	WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
    200
    201	/* Account for returned memory. */
    202	sk_mem_reclaim_final(sk);
    203
    204	WARN_ON(sk->sk_wmem_queued);
    205	WARN_ON(sk->sk_forward_alloc);
    206
    207	/* It is _impossible_ for the backlog to contain anything
    208	 * when we get here.  All user references to this socket
    209	 * have gone away, only the net layer knows can touch it.
    210	 */
    211}
    212EXPORT_SYMBOL(sk_stream_kill_queues);