cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tcp_recv.c (9381B)


      1/*
      2 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 *
     32 */
     33#include <linux/kernel.h>
     34#include <linux/slab.h>
     35#include <net/tcp.h>
     36
     37#include "rds.h"
     38#include "tcp.h"
     39
     40static struct kmem_cache *rds_tcp_incoming_slab;
     41
     42static void rds_tcp_inc_purge(struct rds_incoming *inc)
     43{
     44	struct rds_tcp_incoming *tinc;
     45	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
     46	rdsdebug("purging tinc %p inc %p\n", tinc, inc);
     47	skb_queue_purge(&tinc->ti_skb_list);
     48}
     49
     50void rds_tcp_inc_free(struct rds_incoming *inc)
     51{
     52	struct rds_tcp_incoming *tinc;
     53	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
     54	rds_tcp_inc_purge(inc);
     55	rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
     56	kmem_cache_free(rds_tcp_incoming_slab, tinc);
     57}
     58
     59/*
     60 * this is pretty lame, but, whatever.
     61 */
     62int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
     63{
     64	struct rds_tcp_incoming *tinc;
     65	struct sk_buff *skb;
     66	int ret = 0;
     67
     68	if (!iov_iter_count(to))
     69		goto out;
     70
     71	tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
     72
     73	skb_queue_walk(&tinc->ti_skb_list, skb) {
     74		unsigned long to_copy, skb_off;
     75		for (skb_off = 0; skb_off < skb->len; skb_off += to_copy) {
     76			to_copy = iov_iter_count(to);
     77			to_copy = min(to_copy, skb->len - skb_off);
     78
     79			if (skb_copy_datagram_iter(skb, skb_off, to, to_copy))
     80				return -EFAULT;
     81
     82			rds_stats_add(s_copy_to_user, to_copy);
     83			ret += to_copy;
     84
     85			if (!iov_iter_count(to))
     86				goto out;
     87		}
     88	}
     89out:
     90	return ret;
     91}
     92
     93/*
     94 * We have a series of skbs that have fragmented pieces of the congestion
     95 * bitmap.  They must add up to the exact size of the congestion bitmap.  We
     96 * use the skb helpers to copy those into the pages that make up the in-memory
     97 * congestion bitmap for the remote address of this connection.  We then tell
     98 * the congestion core that the bitmap has been changed so that it can wake up
     99 * sleepers.
    100 *
    101 * This is racing with sending paths which are using test_bit to see if the
    102 * bitmap indicates that their recipient is congested.
    103 */
    104
    105static void rds_tcp_cong_recv(struct rds_connection *conn,
    106			      struct rds_tcp_incoming *tinc)
    107{
    108	struct sk_buff *skb;
    109	unsigned int to_copy, skb_off;
    110	unsigned int map_off;
    111	unsigned int map_page;
    112	struct rds_cong_map *map;
    113	int ret;
    114
    115	/* catch completely corrupt packets */
    116	if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
    117		return;
    118
    119	map_page = 0;
    120	map_off = 0;
    121	map = conn->c_fcong;
    122
    123	skb_queue_walk(&tinc->ti_skb_list, skb) {
    124		skb_off = 0;
    125		while (skb_off < skb->len) {
    126			to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
    127					skb->len - skb_off);
    128
    129			BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
    130
    131			/* only returns 0 or -error */
    132			ret = skb_copy_bits(skb, skb_off,
    133				(void *)map->m_page_addrs[map_page] + map_off,
    134				to_copy);
    135			BUG_ON(ret != 0);
    136
    137			skb_off += to_copy;
    138			map_off += to_copy;
    139			if (map_off == PAGE_SIZE) {
    140				map_off = 0;
    141				map_page++;
    142			}
    143		}
    144	}
    145
    146	rds_cong_map_updated(map, ~(u64) 0);
    147}
    148
    149struct rds_tcp_desc_arg {
    150	struct rds_conn_path *conn_path;
    151	gfp_t gfp;
    152};
    153
    154static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
    155			     unsigned int offset, size_t len)
    156{
    157	struct rds_tcp_desc_arg *arg = desc->arg.data;
    158	struct rds_conn_path *cp = arg->conn_path;
    159	struct rds_tcp_connection *tc = cp->cp_transport_data;
    160	struct rds_tcp_incoming *tinc = tc->t_tinc;
    161	struct sk_buff *clone;
    162	size_t left = len, to_copy;
    163
    164	rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
    165		 len);
    166
    167	/*
    168	 * tcp_read_sock() interprets partial progress as an indication to stop
    169	 * processing.
    170	 */
    171	while (left) {
    172		if (!tinc) {
    173			tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
    174						arg->gfp);
    175			if (!tinc) {
    176				desc->error = -ENOMEM;
    177				goto out;
    178			}
    179			tc->t_tinc = tinc;
    180			rdsdebug("allocated tinc %p\n", tinc);
    181			rds_inc_path_init(&tinc->ti_inc, cp,
    182					  &cp->cp_conn->c_faddr);
    183			tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
    184					local_clock();
    185
    186			/*
    187			 * XXX * we might be able to use the __ variants when
    188			 * we've already serialized at a higher level.
    189			 */
    190			skb_queue_head_init(&tinc->ti_skb_list);
    191		}
    192
    193		if (left && tc->t_tinc_hdr_rem) {
    194			to_copy = min(tc->t_tinc_hdr_rem, left);
    195			rdsdebug("copying %zu header from skb %p\n", to_copy,
    196				 skb);
    197			skb_copy_bits(skb, offset,
    198				      (char *)&tinc->ti_inc.i_hdr +
    199						sizeof(struct rds_header) -
    200						tc->t_tinc_hdr_rem,
    201				      to_copy);
    202			tc->t_tinc_hdr_rem -= to_copy;
    203			left -= to_copy;
    204			offset += to_copy;
    205
    206			if (tc->t_tinc_hdr_rem == 0) {
    207				/* could be 0 for a 0 len message */
    208				tc->t_tinc_data_rem =
    209					be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
    210				tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
    211					local_clock();
    212			}
    213		}
    214
    215		if (left && tc->t_tinc_data_rem) {
    216			to_copy = min(tc->t_tinc_data_rem, left);
    217
    218			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
    219			if (!clone) {
    220				desc->error = -ENOMEM;
    221				goto out;
    222			}
    223
    224			skb_queue_tail(&tinc->ti_skb_list, clone);
    225
    226			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
    227				 "clone %p data %p len %d\n",
    228				 skb, skb->data, skb->len, offset, to_copy,
    229				 clone, clone->data, clone->len);
    230
    231			tc->t_tinc_data_rem -= to_copy;
    232			left -= to_copy;
    233			offset += to_copy;
    234		}
    235
    236		if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
    237			struct rds_connection *conn = cp->cp_conn;
    238
    239			if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
    240				rds_tcp_cong_recv(conn, tinc);
    241			else
    242				rds_recv_incoming(conn, &conn->c_faddr,
    243						  &conn->c_laddr,
    244						  &tinc->ti_inc,
    245						  arg->gfp);
    246
    247			tc->t_tinc_hdr_rem = sizeof(struct rds_header);
    248			tc->t_tinc_data_rem = 0;
    249			tc->t_tinc = NULL;
    250			rds_inc_put(&tinc->ti_inc);
    251			tinc = NULL;
    252		}
    253	}
    254out:
    255	rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
    256		 len, left, skb->len,
    257		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
    258	return len - left;
    259}
    260
    261/* the caller has to hold the sock lock */
    262static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
    263{
    264	struct rds_tcp_connection *tc = cp->cp_transport_data;
    265	struct socket *sock = tc->t_sock;
    266	read_descriptor_t desc;
    267	struct rds_tcp_desc_arg arg;
    268
    269	/* It's like glib in the kernel! */
    270	arg.conn_path = cp;
    271	arg.gfp = gfp;
    272	desc.arg.data = &arg;
    273	desc.error = 0;
    274	desc.count = 1; /* give more than one skb per call */
    275
    276	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
    277	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
    278		 desc.error);
    279
    280	return desc.error;
    281}
    282
    283/*
    284 * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
    285 * data_ready.
    286 *
    287 * if we fail to allocate we're in trouble.. blindly wait some time before
    288 * trying again to see if the VM can free up something for us.
    289 */
    290int rds_tcp_recv_path(struct rds_conn_path *cp)
    291{
    292	struct rds_tcp_connection *tc = cp->cp_transport_data;
    293	struct socket *sock = tc->t_sock;
    294	int ret = 0;
    295
    296	rdsdebug("recv worker path [%d] tc %p sock %p\n",
    297		 cp->cp_index, tc, sock);
    298
    299	lock_sock(sock->sk);
    300	ret = rds_tcp_read_sock(cp, GFP_KERNEL);
    301	release_sock(sock->sk);
    302
    303	return ret;
    304}
    305
    306void rds_tcp_data_ready(struct sock *sk)
    307{
    308	void (*ready)(struct sock *sk);
    309	struct rds_conn_path *cp;
    310	struct rds_tcp_connection *tc;
    311
    312	rdsdebug("data ready sk %p\n", sk);
    313
    314	read_lock_bh(&sk->sk_callback_lock);
    315	cp = sk->sk_user_data;
    316	if (!cp) { /* check for teardown race */
    317		ready = sk->sk_data_ready;
    318		goto out;
    319	}
    320
    321	tc = cp->cp_transport_data;
    322	ready = tc->t_orig_data_ready;
    323	rds_tcp_stats_inc(s_tcp_data_ready_calls);
    324
    325	if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
    326		rcu_read_lock();
    327		if (!rds_destroy_pending(cp->cp_conn))
    328			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
    329		rcu_read_unlock();
    330	}
    331out:
    332	read_unlock_bh(&sk->sk_callback_lock);
    333	ready(sk);
    334}
    335
    336int rds_tcp_recv_init(void)
    337{
    338	rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
    339					sizeof(struct rds_tcp_incoming),
    340					0, 0, NULL);
    341	if (!rds_tcp_incoming_slab)
    342		return -ENOMEM;
    343	return 0;
    344}
    345
    346void rds_tcp_recv_exit(void)
    347{
    348	kmem_cache_destroy(rds_tcp_incoming_slab);
    349}