cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

x25_forward.c (3448B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	History
      4 *	03-01-2007	Added forwarding for x.25	Andrew Hendry
      5 */
      6
      7#define pr_fmt(fmt) "X25: " fmt
      8
      9#include <linux/if_arp.h>
     10#include <linux/init.h>
     11#include <linux/slab.h>
     12#include <net/x25.h>
     13
     14LIST_HEAD(x25_forward_list);
     15DEFINE_RWLOCK(x25_forward_list_lock);
     16
     17int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
     18			struct sk_buff *skb, int lci)
     19{
     20	struct x25_route *rt;
     21	struct x25_neigh *neigh_new = NULL;
     22	struct x25_forward *x25_frwd, *new_frwd;
     23	struct sk_buff *skbn;
     24	short same_lci = 0;
     25	int rc = 0;
     26
     27	if ((rt = x25_get_route(dest_addr)) == NULL)
     28		goto out_no_route;
     29
     30	if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
     31		/* This shouldn't happen, if it occurs somehow
     32		 * do something sensible
     33		 */
     34		goto out_put_route;
     35	}
     36
     37	/* Avoid a loop. This is the normal exit path for a
     38	 * system with only one x.25 iface and default route
     39	 */
     40	if (rt->dev == from->dev) {
     41		goto out_put_nb;
     42	}
     43
     44	/* Remote end sending a call request on an already
     45	 * established LCI? It shouldn't happen, just in case..
     46	 */
     47	read_lock_bh(&x25_forward_list_lock);
     48	list_for_each_entry(x25_frwd, &x25_forward_list, node) {
     49		if (x25_frwd->lci == lci) {
     50			pr_warn("call request for lci which is already registered!, transmitting but not registering new pair\n");
     51			same_lci = 1;
     52		}
     53	}
     54	read_unlock_bh(&x25_forward_list_lock);
     55
     56	/* Save the forwarding details for future traffic */
     57	if (!same_lci){
     58		if ((new_frwd = kmalloc(sizeof(struct x25_forward),
     59						GFP_ATOMIC)) == NULL){
     60			rc = -ENOMEM;
     61			goto out_put_nb;
     62		}
     63		new_frwd->lci = lci;
     64		new_frwd->dev1 = rt->dev;
     65		new_frwd->dev2 = from->dev;
     66		write_lock_bh(&x25_forward_list_lock);
     67		list_add(&new_frwd->node, &x25_forward_list);
     68		write_unlock_bh(&x25_forward_list_lock);
     69	}
     70
     71	/* Forward the call request */
     72	if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
     73		goto out_put_nb;
     74	}
     75	x25_transmit_link(skbn, neigh_new);
     76	rc = 1;
     77
     78
     79out_put_nb:
     80	x25_neigh_put(neigh_new);
     81
     82out_put_route:
     83	x25_route_put(rt);
     84
     85out_no_route:
     86	return rc;
     87}
     88
     89
     90int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
     91
     92	struct x25_forward *frwd;
     93	struct net_device *peer = NULL;
     94	struct x25_neigh *nb;
     95	struct sk_buff *skbn;
     96	int rc = 0;
     97
     98	read_lock_bh(&x25_forward_list_lock);
     99	list_for_each_entry(frwd, &x25_forward_list, node) {
    100		if (frwd->lci == lci) {
    101			/* The call is established, either side can send */
    102			if (from->dev == frwd->dev1) {
    103				peer = frwd->dev2;
    104			} else {
    105				peer = frwd->dev1;
    106			}
    107			break;
    108		}
    109	}
    110	read_unlock_bh(&x25_forward_list_lock);
    111
    112	if ( (nb = x25_get_neigh(peer)) == NULL)
    113		goto out;
    114
    115	if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
    116		goto output;
    117
    118	}
    119	x25_transmit_link(skbn, nb);
    120
    121	rc = 1;
    122output:
    123	x25_neigh_put(nb);
    124out:
    125	return rc;
    126}
    127
    128void x25_clear_forward_by_lci(unsigned int lci)
    129{
    130	struct x25_forward *fwd, *tmp;
    131
    132	write_lock_bh(&x25_forward_list_lock);
    133
    134	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
    135		if (fwd->lci == lci) {
    136			list_del(&fwd->node);
    137			kfree(fwd);
    138		}
    139	}
    140	write_unlock_bh(&x25_forward_list_lock);
    141}
    142
    143
    144void x25_clear_forward_by_dev(struct net_device *dev)
    145{
    146	struct x25_forward *fwd, *tmp;
    147
    148	write_lock_bh(&x25_forward_list_lock);
    149
    150	list_for_each_entry_safe(fwd, tmp, &x25_forward_list, node) {
    151		if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
    152			list_del(&fwd->node);
    153			kfree(fwd);
    154		}
    155	}
    156	write_unlock_bh(&x25_forward_list_lock);
    157}