cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vxcan.c (7407B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * vxcan.c - Virtual CAN Tunnel for cross namespace communication
      4 *
      5 * This code is derived from drivers/net/can/vcan.c for the virtual CAN
      6 * specific parts and from drivers/net/veth.c to implement the netlink API
      7 * for network interface pairs in a common and established way.
      8 *
      9 * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
     10 */
     11
     12#include <linux/module.h>
     13#include <linux/init.h>
     14#include <linux/netdevice.h>
     15#include <linux/if_arp.h>
     16#include <linux/if_ether.h>
     17#include <linux/can.h>
     18#include <linux/can/dev.h>
     19#include <linux/can/skb.h>
     20#include <linux/can/vxcan.h>
     21#include <linux/can/can-ml.h>
     22#include <linux/slab.h>
     23#include <net/rtnetlink.h>
     24
     25#define DRV_NAME "vxcan"
     26
     27MODULE_DESCRIPTION("Virtual CAN Tunnel");
     28MODULE_LICENSE("GPL");
     29MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
     30MODULE_ALIAS_RTNL_LINK(DRV_NAME);
     31
     32struct vxcan_priv {
     33	struct net_device __rcu	*peer;
     34};
     35
     36static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
     37{
     38	struct vxcan_priv *priv = netdev_priv(dev);
     39	struct net_device *peer;
     40	struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
     41	struct net_device_stats *peerstats, *srcstats = &dev->stats;
     42	struct sk_buff *skb;
     43	u8 len;
     44
     45	if (can_dropped_invalid_skb(dev, oskb))
     46		return NETDEV_TX_OK;
     47
     48	rcu_read_lock();
     49	peer = rcu_dereference(priv->peer);
     50	if (unlikely(!peer)) {
     51		kfree_skb(oskb);
     52		dev->stats.tx_dropped++;
     53		goto out_unlock;
     54	}
     55
     56	skb = skb_clone(oskb, GFP_ATOMIC);
     57	if (skb) {
     58		consume_skb(oskb);
     59	} else {
     60		kfree_skb(oskb);
     61		goto out_unlock;
     62	}
     63
     64	/* reset CAN GW hop counter */
     65	skb->csum_start = 0;
     66	skb->pkt_type   = PACKET_BROADCAST;
     67	skb->dev        = peer;
     68	skb->ip_summed  = CHECKSUM_UNNECESSARY;
     69
     70	len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
     71	if (netif_rx(skb) == NET_RX_SUCCESS) {
     72		srcstats->tx_packets++;
     73		srcstats->tx_bytes += len;
     74		peerstats = &peer->stats;
     75		peerstats->rx_packets++;
     76		peerstats->rx_bytes += len;
     77	}
     78
     79out_unlock:
     80	rcu_read_unlock();
     81	return NETDEV_TX_OK;
     82}
     83
     84
     85static int vxcan_open(struct net_device *dev)
     86{
     87	struct vxcan_priv *priv = netdev_priv(dev);
     88	struct net_device *peer = rtnl_dereference(priv->peer);
     89
     90	if (!peer)
     91		return -ENOTCONN;
     92
     93	if (peer->flags & IFF_UP) {
     94		netif_carrier_on(dev);
     95		netif_carrier_on(peer);
     96	}
     97	return 0;
     98}
     99
    100static int vxcan_close(struct net_device *dev)
    101{
    102	struct vxcan_priv *priv = netdev_priv(dev);
    103	struct net_device *peer = rtnl_dereference(priv->peer);
    104
    105	netif_carrier_off(dev);
    106	if (peer)
    107		netif_carrier_off(peer);
    108
    109	return 0;
    110}
    111
    112static int vxcan_get_iflink(const struct net_device *dev)
    113{
    114	struct vxcan_priv *priv = netdev_priv(dev);
    115	struct net_device *peer;
    116	int iflink;
    117
    118	rcu_read_lock();
    119	peer = rcu_dereference(priv->peer);
    120	iflink = peer ? peer->ifindex : 0;
    121	rcu_read_unlock();
    122
    123	return iflink;
    124}
    125
    126static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
    127{
    128	/* Do not allow changing the MTU while running */
    129	if (dev->flags & IFF_UP)
    130		return -EBUSY;
    131
    132	if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
    133		return -EINVAL;
    134
    135	dev->mtu = new_mtu;
    136	return 0;
    137}
    138
    139static const struct net_device_ops vxcan_netdev_ops = {
    140	.ndo_open	= vxcan_open,
    141	.ndo_stop	= vxcan_close,
    142	.ndo_start_xmit	= vxcan_xmit,
    143	.ndo_get_iflink	= vxcan_get_iflink,
    144	.ndo_change_mtu = vxcan_change_mtu,
    145};
    146
    147static void vxcan_setup(struct net_device *dev)
    148{
    149	struct can_ml_priv *can_ml;
    150
    151	dev->type		= ARPHRD_CAN;
    152	dev->mtu		= CANFD_MTU;
    153	dev->hard_header_len	= 0;
    154	dev->addr_len		= 0;
    155	dev->tx_queue_len	= 0;
    156	dev->flags		= IFF_NOARP;
    157	dev->netdev_ops		= &vxcan_netdev_ops;
    158	dev->needs_free_netdev	= true;
    159
    160	can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
    161	can_set_ml_priv(dev, can_ml);
    162}
    163
    164/* forward declaration for rtnl_create_link() */
    165static struct rtnl_link_ops vxcan_link_ops;
    166
    167static int vxcan_newlink(struct net *net, struct net_device *dev,
    168			 struct nlattr *tb[], struct nlattr *data[],
    169			 struct netlink_ext_ack *extack)
    170{
    171	struct vxcan_priv *priv;
    172	struct net_device *peer;
    173	struct net *peer_net;
    174
    175	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
    176	char ifname[IFNAMSIZ];
    177	unsigned char name_assign_type;
    178	struct ifinfomsg *ifmp = NULL;
    179	int err;
    180
    181	/* register peer device */
    182	if (data && data[VXCAN_INFO_PEER]) {
    183		struct nlattr *nla_peer;
    184
    185		nla_peer = data[VXCAN_INFO_PEER];
    186		ifmp = nla_data(nla_peer);
    187		err = rtnl_nla_parse_ifla(peer_tb,
    188					  nla_data(nla_peer) +
    189					  sizeof(struct ifinfomsg),
    190					  nla_len(nla_peer) -
    191					  sizeof(struct ifinfomsg),
    192					  NULL);
    193		if (err < 0)
    194			return err;
    195
    196		tbp = peer_tb;
    197	}
    198
    199	if (ifmp && tbp[IFLA_IFNAME]) {
    200		nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
    201		name_assign_type = NET_NAME_USER;
    202	} else {
    203		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
    204		name_assign_type = NET_NAME_ENUM;
    205	}
    206
    207	peer_net = rtnl_link_get_net(net, tbp);
    208	if (IS_ERR(peer_net))
    209		return PTR_ERR(peer_net);
    210
    211	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
    212				&vxcan_link_ops, tbp, extack);
    213	if (IS_ERR(peer)) {
    214		put_net(peer_net);
    215		return PTR_ERR(peer);
    216	}
    217
    218	if (ifmp && dev->ifindex)
    219		peer->ifindex = ifmp->ifi_index;
    220
    221	err = register_netdevice(peer);
    222	put_net(peer_net);
    223	peer_net = NULL;
    224	if (err < 0) {
    225		free_netdev(peer);
    226		return err;
    227	}
    228
    229	netif_carrier_off(peer);
    230
    231	err = rtnl_configure_link(peer, ifmp);
    232	if (err < 0)
    233		goto unregister_network_device;
    234
    235	/* register first device */
    236	if (tb[IFLA_IFNAME])
    237		nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
    238	else
    239		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
    240
    241	err = register_netdevice(dev);
    242	if (err < 0)
    243		goto unregister_network_device;
    244
    245	netif_carrier_off(dev);
    246
    247	/* cross link the device pair */
    248	priv = netdev_priv(dev);
    249	rcu_assign_pointer(priv->peer, peer);
    250
    251	priv = netdev_priv(peer);
    252	rcu_assign_pointer(priv->peer, dev);
    253
    254	return 0;
    255
    256unregister_network_device:
    257	unregister_netdevice(peer);
    258	return err;
    259}
    260
    261static void vxcan_dellink(struct net_device *dev, struct list_head *head)
    262{
    263	struct vxcan_priv *priv;
    264	struct net_device *peer;
    265
    266	priv = netdev_priv(dev);
    267	peer = rtnl_dereference(priv->peer);
    268
    269	/* Note : dellink() is called from default_device_exit_batch(),
    270	 * before a rcu_synchronize() point. The devices are guaranteed
    271	 * not being freed before one RCU grace period.
    272	 */
    273	RCU_INIT_POINTER(priv->peer, NULL);
    274	unregister_netdevice_queue(dev, head);
    275
    276	if (peer) {
    277		priv = netdev_priv(peer);
    278		RCU_INIT_POINTER(priv->peer, NULL);
    279		unregister_netdevice_queue(peer, head);
    280	}
    281}
    282
    283static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
    284	[VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
    285};
    286
    287static struct net *vxcan_get_link_net(const struct net_device *dev)
    288{
    289	struct vxcan_priv *priv = netdev_priv(dev);
    290	struct net_device *peer = rtnl_dereference(priv->peer);
    291
    292	return peer ? dev_net(peer) : dev_net(dev);
    293}
    294
    295static struct rtnl_link_ops vxcan_link_ops = {
    296	.kind		= DRV_NAME,
    297	.priv_size	= ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
    298	.setup		= vxcan_setup,
    299	.newlink	= vxcan_newlink,
    300	.dellink	= vxcan_dellink,
    301	.policy		= vxcan_policy,
    302	.maxtype	= VXCAN_INFO_MAX,
    303	.get_link_net	= vxcan_get_link_net,
    304};
    305
    306static __init int vxcan_init(void)
    307{
    308	pr_info("vxcan: Virtual CAN Tunnel driver\n");
    309
    310	return rtnl_link_register(&vxcan_link_ops);
    311}
    312
    313static __exit void vxcan_exit(void)
    314{
    315	rtnl_link_unregister(&vxcan_link_ops);
    316}
    317
    318module_init(vxcan_init);
    319module_exit(vxcan_exit);