cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

conn_service.c (6183B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/* Service connection management
      3 *
      4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
      5 * Written by David Howells (dhowells@redhat.com)
      6 */
      7
      8#include <linux/slab.h>
      9#include "ar-internal.h"
     10
     11static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
     12	.ref		= REFCOUNT_INIT(1),
     13	.debug_id	= UINT_MAX,
     14	.channel_lock	= __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
     15};
     16
     17/*
     18 * Find a service connection under RCU conditions.
     19 *
     20 * We could use a hash table, but that is subject to bucket stuffing by an
     21 * attacker as the client gets to pick the epoch and cid values and would know
     22 * the hash function.  So, instead, we use a hash table for the peer and from
     23 * that an rbtree to find the service connection.  Under ordinary circumstances
     24 * it might be slower than a large hash table, but it is at least limited in
     25 * depth.
     26 */
     27struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
     28						     struct sk_buff *skb)
     29{
     30	struct rxrpc_connection *conn = NULL;
     31	struct rxrpc_conn_proto k;
     32	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
     33	struct rb_node *p;
     34	unsigned int seq = 0;
     35
     36	k.epoch	= sp->hdr.epoch;
     37	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
     38
     39	do {
     40		/* Unfortunately, rbtree walking doesn't give reliable results
     41		 * under just the RCU read lock, so we have to check for
     42		 * changes.
     43		 */
     44		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
     45
     46		p = rcu_dereference_raw(peer->service_conns.rb_node);
     47		while (p) {
     48			conn = rb_entry(p, struct rxrpc_connection, service_node);
     49
     50			if (conn->proto.index_key < k.index_key)
     51				p = rcu_dereference_raw(p->rb_left);
     52			else if (conn->proto.index_key > k.index_key)
     53				p = rcu_dereference_raw(p->rb_right);
     54			else
     55				break;
     56			conn = NULL;
     57		}
     58	} while (need_seqretry(&peer->service_conn_lock, seq));
     59
     60	done_seqretry(&peer->service_conn_lock, seq);
     61	_leave(" = %d", conn ? conn->debug_id : -1);
     62	return conn;
     63}
     64
     65/*
     66 * Insert a service connection into a peer's tree, thereby making it a target
     67 * for incoming packets.
     68 */
     69static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
     70				       struct rxrpc_connection *conn)
     71{
     72	struct rxrpc_connection *cursor = NULL;
     73	struct rxrpc_conn_proto k = conn->proto;
     74	struct rb_node **pp, *parent;
     75
     76	write_seqlock_bh(&peer->service_conn_lock);
     77
     78	pp = &peer->service_conns.rb_node;
     79	parent = NULL;
     80	while (*pp) {
     81		parent = *pp;
     82		cursor = rb_entry(parent,
     83				  struct rxrpc_connection, service_node);
     84
     85		if (cursor->proto.index_key < k.index_key)
     86			pp = &(*pp)->rb_left;
     87		else if (cursor->proto.index_key > k.index_key)
     88			pp = &(*pp)->rb_right;
     89		else
     90			goto found_extant_conn;
     91	}
     92
     93	rb_link_node_rcu(&conn->service_node, parent, pp);
     94	rb_insert_color(&conn->service_node, &peer->service_conns);
     95conn_published:
     96	set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
     97	write_sequnlock_bh(&peer->service_conn_lock);
     98	_leave(" = %d [new]", conn->debug_id);
     99	return;
    100
    101found_extant_conn:
    102	if (refcount_read(&cursor->ref) == 0)
    103		goto replace_old_connection;
    104	write_sequnlock_bh(&peer->service_conn_lock);
    105	/* We should not be able to get here.  rxrpc_incoming_connection() is
    106	 * called in a non-reentrant context, so there can't be a race to
    107	 * insert a new connection.
    108	 */
    109	BUG();
    110
    111replace_old_connection:
    112	/* The old connection is from an outdated epoch. */
    113	_debug("replace conn");
    114	rb_replace_node_rcu(&cursor->service_node,
    115			    &conn->service_node,
    116			    &peer->service_conns);
    117	clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
    118	goto conn_published;
    119}
    120
    121/*
    122 * Preallocate a service connection.  The connection is placed on the proc and
    123 * reap lists so that we don't have to get the lock from BH context.
    124 */
    125struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
    126							   gfp_t gfp)
    127{
    128	struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
    129
    130	if (conn) {
    131		/* We maintain an extra ref on the connection whilst it is on
    132		 * the rxrpc_connections list.
    133		 */
    134		conn->state = RXRPC_CONN_SERVICE_PREALLOC;
    135		refcount_set(&conn->ref, 2);
    136		conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
    137
    138		atomic_inc(&rxnet->nr_conns);
    139		write_lock(&rxnet->conn_lock);
    140		list_add_tail(&conn->link, &rxnet->service_conns);
    141		list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
    142		write_unlock(&rxnet->conn_lock);
    143
    144		trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
    145				 refcount_read(&conn->ref),
    146				 __builtin_return_address(0));
    147	}
    148
    149	return conn;
    150}
    151
    152/*
    153 * Set up an incoming connection.  This is called in BH context with the RCU
    154 * read lock held.
    155 */
    156void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
    157				   struct rxrpc_connection *conn,
    158				   const struct rxrpc_security *sec,
    159				   struct sk_buff *skb)
    160{
    161	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
    162
    163	_enter("");
    164
    165	conn->proto.epoch	= sp->hdr.epoch;
    166	conn->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
    167	conn->params.service_id	= sp->hdr.serviceId;
    168	conn->service_id	= sp->hdr.serviceId;
    169	conn->security_ix	= sp->hdr.securityIndex;
    170	conn->out_clientflag	= 0;
    171	conn->security		= sec;
    172	if (conn->security_ix)
    173		conn->state	= RXRPC_CONN_SERVICE_UNSECURED;
    174	else
    175		conn->state	= RXRPC_CONN_SERVICE;
    176
    177	/* See if we should upgrade the service.  This can only happen on the
    178	 * first packet on a new connection.  Once done, it applies to all
    179	 * subsequent calls on that connection.
    180	 */
    181	if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
    182	    conn->service_id == rx->service_upgrade.from)
    183		conn->service_id = rx->service_upgrade.to;
    184
    185	/* Make the connection a target for incoming packets. */
    186	rxrpc_publish_service_conn(conn->params.peer, conn);
    187
    188	_net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
    189}
    190
    191/*
    192 * Remove the service connection from the peer's tree, thereby removing it as a
    193 * target for incoming packets.
    194 */
    195void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
    196{
    197	struct rxrpc_peer *peer = conn->params.peer;
    198
    199	write_seqlock_bh(&peer->service_conn_lock);
    200	if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
    201		rb_erase(&conn->service_node, &peer->service_conns);
    202	write_sequnlock_bh(&peer->service_conn_lock);
    203}