cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

af_can.c (25185B)


      1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
      2/* af_can.c - Protocol family CAN core module
      3 *            (used by different CAN protocol modules)
      4 *
      5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
      6 * All rights reserved.
      7 *
      8 * Redistribution and use in source and binary forms, with or without
      9 * modification, are permitted provided that the following conditions
     10 * are met:
     11 * 1. Redistributions of source code must retain the above copyright
     12 *    notice, this list of conditions and the following disclaimer.
     13 * 2. Redistributions in binary form must reproduce the above copyright
     14 *    notice, this list of conditions and the following disclaimer in the
     15 *    documentation and/or other materials provided with the distribution.
     16 * 3. Neither the name of Volkswagen nor the names of its contributors
     17 *    may be used to endorse or promote products derived from this software
     18 *    without specific prior written permission.
     19 *
     20 * Alternatively, provided that this notice is retained in full, this
     21 * software may be distributed under the terms of the GNU General
     22 * Public License ("GPL") version 2, in which case the provisions of the
     23 * GPL apply INSTEAD OF those given above.
     24 *
     25 * The provided data structures and external interfaces from this code
     26 * are not restricted to be used by modules with a GPL compatible license.
     27 *
     28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
     39 * DAMAGE.
     40 *
     41 */
     42
     43#include <linux/module.h>
     44#include <linux/stddef.h>
     45#include <linux/init.h>
     46#include <linux/kmod.h>
     47#include <linux/slab.h>
     48#include <linux/list.h>
     49#include <linux/spinlock.h>
     50#include <linux/rcupdate.h>
     51#include <linux/uaccess.h>
     52#include <linux/net.h>
     53#include <linux/netdevice.h>
     54#include <linux/socket.h>
     55#include <linux/if_ether.h>
     56#include <linux/if_arp.h>
     57#include <linux/skbuff.h>
     58#include <linux/can.h>
     59#include <linux/can/core.h>
     60#include <linux/can/skb.h>
     61#include <linux/can/can-ml.h>
     62#include <linux/ratelimit.h>
     63#include <net/net_namespace.h>
     64#include <net/sock.h>
     65
     66#include "af_can.h"
     67
     68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
     69MODULE_LICENSE("Dual BSD/GPL");
     70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
     71	      "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
     72
     73MODULE_ALIAS_NETPROTO(PF_CAN);
     74
     75static int stats_timer __read_mostly = 1;
     76module_param(stats_timer, int, 0444);
     77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
     78
     79static struct kmem_cache *rcv_cache __read_mostly;
     80
     81/* table of registered CAN protocols */
     82static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
     83static DEFINE_MUTEX(proto_tab_lock);
     84
     85static atomic_t skbcounter = ATOMIC_INIT(0);
     86
     87/* af_can socket functions */
     88
     89void can_sock_destruct(struct sock *sk)
     90{
     91	skb_queue_purge(&sk->sk_receive_queue);
     92	skb_queue_purge(&sk->sk_error_queue);
     93}
     94EXPORT_SYMBOL(can_sock_destruct);
     95
     96static const struct can_proto *can_get_proto(int protocol)
     97{
     98	const struct can_proto *cp;
     99
    100	rcu_read_lock();
    101	cp = rcu_dereference(proto_tab[protocol]);
    102	if (cp && !try_module_get(cp->prot->owner))
    103		cp = NULL;
    104	rcu_read_unlock();
    105
    106	return cp;
    107}
    108
    109static inline void can_put_proto(const struct can_proto *cp)
    110{
    111	module_put(cp->prot->owner);
    112}
    113
    114static int can_create(struct net *net, struct socket *sock, int protocol,
    115		      int kern)
    116{
    117	struct sock *sk;
    118	const struct can_proto *cp;
    119	int err = 0;
    120
    121	sock->state = SS_UNCONNECTED;
    122
    123	if (protocol < 0 || protocol >= CAN_NPROTO)
    124		return -EINVAL;
    125
    126	cp = can_get_proto(protocol);
    127
    128#ifdef CONFIG_MODULES
    129	if (!cp) {
    130		/* try to load protocol module if kernel is modular */
    131
    132		err = request_module("can-proto-%d", protocol);
    133
    134		/* In case of error we only print a message but don't
    135		 * return the error code immediately.  Below we will
    136		 * return -EPROTONOSUPPORT
    137		 */
    138		if (err)
    139			pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n",
    140					   protocol);
    141
    142		cp = can_get_proto(protocol);
    143	}
    144#endif
    145
    146	/* check for available protocol and correct usage */
    147
    148	if (!cp)
    149		return -EPROTONOSUPPORT;
    150
    151	if (cp->type != sock->type) {
    152		err = -EPROTOTYPE;
    153		goto errout;
    154	}
    155
    156	sock->ops = cp->ops;
    157
    158	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
    159	if (!sk) {
    160		err = -ENOMEM;
    161		goto errout;
    162	}
    163
    164	sock_init_data(sock, sk);
    165	sk->sk_destruct = can_sock_destruct;
    166
    167	if (sk->sk_prot->init)
    168		err = sk->sk_prot->init(sk);
    169
    170	if (err) {
    171		/* release sk on errors */
    172		sock_orphan(sk);
    173		sock_put(sk);
    174	}
    175
    176 errout:
    177	can_put_proto(cp);
    178	return err;
    179}
    180
    181/* af_can tx path */
    182
    183/**
    184 * can_send - transmit a CAN frame (optional with local loopback)
    185 * @skb: pointer to socket buffer with CAN frame in data section
    186 * @loop: loopback for listeners on local CAN sockets (recommended default!)
    187 *
    188 * Due to the loopback this routine must not be called from hardirq context.
    189 *
    190 * Return:
    191 *  0 on success
    192 *  -ENETDOWN when the selected interface is down
    193 *  -ENOBUFS on full driver queue (see net_xmit_errno())
    194 *  -ENOMEM when local loopback failed at calling skb_clone()
    195 *  -EPERM when trying to send on a non-CAN interface
    196 *  -EMSGSIZE CAN frame size is bigger than CAN interface MTU
    197 *  -EINVAL when the skb->data does not contain a valid CAN frame
    198 */
    199int can_send(struct sk_buff *skb, int loop)
    200{
    201	struct sk_buff *newskb = NULL;
    202	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
    203	struct can_pkg_stats *pkg_stats = dev_net(skb->dev)->can.pkg_stats;
    204	int err = -EINVAL;
    205
    206	if (skb->len == CAN_MTU) {
    207		skb->protocol = htons(ETH_P_CAN);
    208		if (unlikely(cfd->len > CAN_MAX_DLEN))
    209			goto inval_skb;
    210	} else if (skb->len == CANFD_MTU) {
    211		skb->protocol = htons(ETH_P_CANFD);
    212		if (unlikely(cfd->len > CANFD_MAX_DLEN))
    213			goto inval_skb;
    214	} else {
    215		goto inval_skb;
    216	}
    217
    218	/* Make sure the CAN frame can pass the selected CAN netdevice.
    219	 * As structs can_frame and canfd_frame are similar, we can provide
    220	 * CAN FD frames to legacy CAN drivers as long as the length is <= 8
    221	 */
    222	if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
    223		err = -EMSGSIZE;
    224		goto inval_skb;
    225	}
    226
    227	if (unlikely(skb->dev->type != ARPHRD_CAN)) {
    228		err = -EPERM;
    229		goto inval_skb;
    230	}
    231
    232	if (unlikely(!(skb->dev->flags & IFF_UP))) {
    233		err = -ENETDOWN;
    234		goto inval_skb;
    235	}
    236
    237	skb->ip_summed = CHECKSUM_UNNECESSARY;
    238
    239	skb_reset_mac_header(skb);
    240	skb_reset_network_header(skb);
    241	skb_reset_transport_header(skb);
    242
    243	if (loop) {
    244		/* local loopback of sent CAN frames */
    245
    246		/* indication for the CAN driver: do loopback */
    247		skb->pkt_type = PACKET_LOOPBACK;
    248
    249		/* The reference to the originating sock may be required
    250		 * by the receiving socket to check whether the frame is
    251		 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
    252		 * Therefore we have to ensure that skb->sk remains the
    253		 * reference to the originating sock by restoring skb->sk
    254		 * after each skb_clone() or skb_orphan() usage.
    255		 */
    256
    257		if (!(skb->dev->flags & IFF_ECHO)) {
    258			/* If the interface is not capable to do loopback
    259			 * itself, we do it here.
    260			 */
    261			newskb = skb_clone(skb, GFP_ATOMIC);
    262			if (!newskb) {
    263				kfree_skb(skb);
    264				return -ENOMEM;
    265			}
    266
    267			can_skb_set_owner(newskb, skb->sk);
    268			newskb->ip_summed = CHECKSUM_UNNECESSARY;
    269			newskb->pkt_type = PACKET_BROADCAST;
    270		}
    271	} else {
    272		/* indication for the CAN driver: no loopback required */
    273		skb->pkt_type = PACKET_HOST;
    274	}
    275
    276	/* send to netdevice */
    277	err = dev_queue_xmit(skb);
    278	if (err > 0)
    279		err = net_xmit_errno(err);
    280
    281	if (err) {
    282		kfree_skb(newskb);
    283		return err;
    284	}
    285
    286	if (newskb)
    287		netif_rx(newskb);
    288
    289	/* update statistics */
    290	pkg_stats->tx_frames++;
    291	pkg_stats->tx_frames_delta++;
    292
    293	return 0;
    294
    295inval_skb:
    296	kfree_skb(skb);
    297	return err;
    298}
    299EXPORT_SYMBOL(can_send);
    300
    301/* af_can rx path */
    302
    303static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
    304							struct net_device *dev)
    305{
    306	if (dev) {
    307		struct can_ml_priv *can_ml = can_get_ml_priv(dev);
    308		return &can_ml->dev_rcv_lists;
    309	} else {
    310		return net->can.rx_alldev_list;
    311	}
    312}
    313
    314/**
    315 * effhash - hash function for 29 bit CAN identifier reduction
    316 * @can_id: 29 bit CAN identifier
    317 *
    318 * Description:
    319 *  To reduce the linear traversal in one linked list of _single_ EFF CAN
    320 *  frame subscriptions the 29 bit identifier is mapped to 10 bits.
    321 *  (see CAN_EFF_RCV_HASH_BITS definition)
    322 *
    323 * Return:
    324 *  Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
    325 */
    326static unsigned int effhash(canid_t can_id)
    327{
    328	unsigned int hash;
    329
    330	hash = can_id;
    331	hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
    332	hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
    333
    334	return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
    335}
    336
    337/**
    338 * can_rcv_list_find - determine optimal filterlist inside device filter struct
    339 * @can_id: pointer to CAN identifier of a given can_filter
    340 * @mask: pointer to CAN mask of a given can_filter
    341 * @dev_rcv_lists: pointer to the device filter struct
    342 *
    343 * Description:
    344 *  Returns the optimal filterlist to reduce the filter handling in the
    345 *  receive path. This function is called by service functions that need
    346 *  to register or unregister a can_filter in the filter lists.
    347 *
    348 *  A filter matches in general, when
    349 *
    350 *          <received_can_id> & mask == can_id & mask
    351 *
    352 *  so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
    353 *  relevant bits for the filter.
    354 *
    355 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
    356 *  filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
    357 *  frames there is a special filterlist and a special rx path filter handling.
    358 *
    359 * Return:
    360 *  Pointer to optimal filterlist for the given can_id/mask pair.
    361 *  Consistency checked mask.
    362 *  Reduced can_id to have a preprocessed filter compare value.
    363 */
    364static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
    365					    struct can_dev_rcv_lists *dev_rcv_lists)
    366{
    367	canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
    368
    369	/* filter for error message frames in extra filterlist */
    370	if (*mask & CAN_ERR_FLAG) {
    371		/* clear CAN_ERR_FLAG in filter entry */
    372		*mask &= CAN_ERR_MASK;
    373		return &dev_rcv_lists->rx[RX_ERR];
    374	}
    375
    376	/* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
    377
    378#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
    379
    380	/* ensure valid values in can_mask for 'SFF only' frame filtering */
    381	if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
    382		*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
    383
    384	/* reduce condition testing at receive time */
    385	*can_id &= *mask;
    386
    387	/* inverse can_id/can_mask filter */
    388	if (inv)
    389		return &dev_rcv_lists->rx[RX_INV];
    390
    391	/* mask == 0 => no condition testing at receive time */
    392	if (!(*mask))
    393		return &dev_rcv_lists->rx[RX_ALL];
    394
    395	/* extra filterlists for the subscription of a single non-RTR can_id */
    396	if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
    397	    !(*can_id & CAN_RTR_FLAG)) {
    398		if (*can_id & CAN_EFF_FLAG) {
    399			if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
    400				return &dev_rcv_lists->rx_eff[effhash(*can_id)];
    401		} else {
    402			if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
    403				return &dev_rcv_lists->rx_sff[*can_id];
    404		}
    405	}
    406
    407	/* default: filter via can_id/can_mask */
    408	return &dev_rcv_lists->rx[RX_FIL];
    409}
    410
    411/**
    412 * can_rx_register - subscribe CAN frames from a specific interface
    413 * @net: the applicable net namespace
    414 * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
    415 * @can_id: CAN identifier (see description)
    416 * @mask: CAN mask (see description)
    417 * @func: callback function on filter match
    418 * @data: returned parameter for callback function
    419 * @ident: string for calling module identification
    420 * @sk: socket pointer (might be NULL)
    421 *
    422 * Description:
    423 *  Invokes the callback function with the received sk_buff and the given
    424 *  parameter 'data' on a matching receive filter. A filter matches, when
    425 *
    426 *          <received_can_id> & mask == can_id & mask
    427 *
    428 *  The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
    429 *  filter for error message frames (CAN_ERR_FLAG bit set in mask).
    430 *
    431 *  The provided pointer to the sk_buff is guaranteed to be valid as long as
    432 *  the callback function is running. The callback function must *not* free
    433 *  the given sk_buff while processing it's task. When the given sk_buff is
    434 *  needed after the end of the callback function it must be cloned inside
    435 *  the callback function with skb_clone().
    436 *
    437 * Return:
    438 *  0 on success
    439 *  -ENOMEM on missing cache mem to create subscription entry
    440 *  -ENODEV unknown device
    441 */
    442int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
    443		    canid_t mask, void (*func)(struct sk_buff *, void *),
    444		    void *data, char *ident, struct sock *sk)
    445{
    446	struct receiver *rcv;
    447	struct hlist_head *rcv_list;
    448	struct can_dev_rcv_lists *dev_rcv_lists;
    449	struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
    450	int err = 0;
    451
    452	/* insert new receiver  (dev,canid,mask) -> (func,data) */
    453
    454	if (dev && dev->type != ARPHRD_CAN)
    455		return -ENODEV;
    456
    457	if (dev && !net_eq(net, dev_net(dev)))
    458		return -ENODEV;
    459
    460	rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
    461	if (!rcv)
    462		return -ENOMEM;
    463
    464	spin_lock_bh(&net->can.rcvlists_lock);
    465
    466	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
    467	rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
    468
    469	rcv->can_id = can_id;
    470	rcv->mask = mask;
    471	rcv->matches = 0;
    472	rcv->func = func;
    473	rcv->data = data;
    474	rcv->ident = ident;
    475	rcv->sk = sk;
    476
    477	hlist_add_head_rcu(&rcv->list, rcv_list);
    478	dev_rcv_lists->entries++;
    479
    480	rcv_lists_stats->rcv_entries++;
    481	rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
    482					       rcv_lists_stats->rcv_entries);
    483	spin_unlock_bh(&net->can.rcvlists_lock);
    484
    485	return err;
    486}
    487EXPORT_SYMBOL(can_rx_register);
    488
    489/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
    490static void can_rx_delete_receiver(struct rcu_head *rp)
    491{
    492	struct receiver *rcv = container_of(rp, struct receiver, rcu);
    493	struct sock *sk = rcv->sk;
    494
    495	kmem_cache_free(rcv_cache, rcv);
    496	if (sk)
    497		sock_put(sk);
    498}
    499
    500/**
    501 * can_rx_unregister - unsubscribe CAN frames from a specific interface
    502 * @net: the applicable net namespace
    503 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
    504 * @can_id: CAN identifier
    505 * @mask: CAN mask
    506 * @func: callback function on filter match
    507 * @data: returned parameter for callback function
    508 *
    509 * Description:
    510 *  Removes subscription entry depending on given (subscription) values.
    511 */
    512void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
    513		       canid_t mask, void (*func)(struct sk_buff *, void *),
    514		       void *data)
    515{
    516	struct receiver *rcv = NULL;
    517	struct hlist_head *rcv_list;
    518	struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
    519	struct can_dev_rcv_lists *dev_rcv_lists;
    520
    521	if (dev && dev->type != ARPHRD_CAN)
    522		return;
    523
    524	if (dev && !net_eq(net, dev_net(dev)))
    525		return;
    526
    527	spin_lock_bh(&net->can.rcvlists_lock);
    528
    529	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
    530	rcv_list = can_rcv_list_find(&can_id, &mask, dev_rcv_lists);
    531
    532	/* Search the receiver list for the item to delete.  This should
    533	 * exist, since no receiver may be unregistered that hasn't
    534	 * been registered before.
    535	 */
    536	hlist_for_each_entry_rcu(rcv, rcv_list, list) {
    537		if (rcv->can_id == can_id && rcv->mask == mask &&
    538		    rcv->func == func && rcv->data == data)
    539			break;
    540	}
    541
    542	/* Check for bugs in CAN protocol implementations using af_can.c:
    543	 * 'rcv' will be NULL if no matching list item was found for removal.
    544	 * As this case may potentially happen when closing a socket while
    545	 * the notifier for removing the CAN netdev is running we just print
    546	 * a warning here.
    547	 */
    548	if (!rcv) {
    549		pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
    550			DNAME(dev), can_id, mask);
    551		goto out;
    552	}
    553
    554	hlist_del_rcu(&rcv->list);
    555	dev_rcv_lists->entries--;
    556
    557	if (rcv_lists_stats->rcv_entries > 0)
    558		rcv_lists_stats->rcv_entries--;
    559
    560 out:
    561	spin_unlock_bh(&net->can.rcvlists_lock);
    562
    563	/* schedule the receiver item for deletion */
    564	if (rcv) {
    565		if (rcv->sk)
    566			sock_hold(rcv->sk);
    567		call_rcu(&rcv->rcu, can_rx_delete_receiver);
    568	}
    569}
    570EXPORT_SYMBOL(can_rx_unregister);
    571
    572static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
    573{
    574	rcv->func(skb, rcv->data);
    575	rcv->matches++;
    576}
    577
    578static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
    579{
    580	struct receiver *rcv;
    581	int matches = 0;
    582	struct can_frame *cf = (struct can_frame *)skb->data;
    583	canid_t can_id = cf->can_id;
    584
    585	if (dev_rcv_lists->entries == 0)
    586		return 0;
    587
    588	if (can_id & CAN_ERR_FLAG) {
    589		/* check for error message frame entries only */
    590		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
    591			if (can_id & rcv->mask) {
    592				deliver(skb, rcv);
    593				matches++;
    594			}
    595		}
    596		return matches;
    597	}
    598
    599	/* check for unfiltered entries */
    600	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
    601		deliver(skb, rcv);
    602		matches++;
    603	}
    604
    605	/* check for can_id/mask entries */
    606	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
    607		if ((can_id & rcv->mask) == rcv->can_id) {
    608			deliver(skb, rcv);
    609			matches++;
    610		}
    611	}
    612
    613	/* check for inverted can_id/mask entries */
    614	hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
    615		if ((can_id & rcv->mask) != rcv->can_id) {
    616			deliver(skb, rcv);
    617			matches++;
    618		}
    619	}
    620
    621	/* check filterlists for single non-RTR can_ids */
    622	if (can_id & CAN_RTR_FLAG)
    623		return matches;
    624
    625	if (can_id & CAN_EFF_FLAG) {
    626		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
    627			if (rcv->can_id == can_id) {
    628				deliver(skb, rcv);
    629				matches++;
    630			}
    631		}
    632	} else {
    633		can_id &= CAN_SFF_MASK;
    634		hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
    635			deliver(skb, rcv);
    636			matches++;
    637		}
    638	}
    639
    640	return matches;
    641}
    642
    643static void can_receive(struct sk_buff *skb, struct net_device *dev)
    644{
    645	struct can_dev_rcv_lists *dev_rcv_lists;
    646	struct net *net = dev_net(dev);
    647	struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
    648	int matches;
    649
    650	/* update statistics */
    651	pkg_stats->rx_frames++;
    652	pkg_stats->rx_frames_delta++;
    653
    654	/* create non-zero unique skb identifier together with *skb */
    655	while (!(can_skb_prv(skb)->skbcnt))
    656		can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
    657
    658	rcu_read_lock();
    659
    660	/* deliver the packet to sockets listening on all devices */
    661	matches = can_rcv_filter(net->can.rx_alldev_list, skb);
    662
    663	/* find receive list for this device */
    664	dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
    665	matches += can_rcv_filter(dev_rcv_lists, skb);
    666
    667	rcu_read_unlock();
    668
    669	/* consume the skbuff allocated by the netdevice driver */
    670	consume_skb(skb);
    671
    672	if (matches > 0) {
    673		pkg_stats->matches++;
    674		pkg_stats->matches_delta++;
    675	}
    676}
    677
    678static int can_rcv(struct sk_buff *skb, struct net_device *dev,
    679		   struct packet_type *pt, struct net_device *orig_dev)
    680{
    681	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
    682
    683	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
    684		pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
    685			     dev->type, skb->len);
    686		goto free_skb;
    687	}
    688
    689	/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
    690	if (unlikely(cfd->len > CAN_MAX_DLEN)) {
    691		pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d, datalen %d\n",
    692			     dev->type, skb->len, cfd->len);
    693		goto free_skb;
    694	}
    695
    696	can_receive(skb, dev);
    697	return NET_RX_SUCCESS;
    698
    699free_skb:
    700	kfree_skb(skb);
    701	return NET_RX_DROP;
    702}
    703
    704static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
    705		     struct packet_type *pt, struct net_device *orig_dev)
    706{
    707	struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
    708
    709	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
    710		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
    711			     dev->type, skb->len);
    712		goto free_skb;
    713	}
    714
    715	/* This check is made separately since cfd->len would be uninitialized if skb->len = 0. */
    716	if (unlikely(cfd->len > CANFD_MAX_DLEN)) {
    717		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d, datalen %d\n",
    718			     dev->type, skb->len, cfd->len);
    719		goto free_skb;
    720	}
    721
    722	can_receive(skb, dev);
    723	return NET_RX_SUCCESS;
    724
    725free_skb:
    726	kfree_skb(skb);
    727	return NET_RX_DROP;
    728}
    729
    730/* af_can protocol functions */
    731
    732/**
    733 * can_proto_register - register CAN transport protocol
    734 * @cp: pointer to CAN protocol structure
    735 *
    736 * Return:
    737 *  0 on success
    738 *  -EINVAL invalid (out of range) protocol number
    739 *  -EBUSY  protocol already in use
    740 *  -ENOBUF if proto_register() fails
    741 */
    742int can_proto_register(const struct can_proto *cp)
    743{
    744	int proto = cp->protocol;
    745	int err = 0;
    746
    747	if (proto < 0 || proto >= CAN_NPROTO) {
    748		pr_err("can: protocol number %d out of range\n", proto);
    749		return -EINVAL;
    750	}
    751
    752	err = proto_register(cp->prot, 0);
    753	if (err < 0)
    754		return err;
    755
    756	mutex_lock(&proto_tab_lock);
    757
    758	if (rcu_access_pointer(proto_tab[proto])) {
    759		pr_err("can: protocol %d already registered\n", proto);
    760		err = -EBUSY;
    761	} else {
    762		RCU_INIT_POINTER(proto_tab[proto], cp);
    763	}
    764
    765	mutex_unlock(&proto_tab_lock);
    766
    767	if (err < 0)
    768		proto_unregister(cp->prot);
    769
    770	return err;
    771}
    772EXPORT_SYMBOL(can_proto_register);
    773
    774/**
    775 * can_proto_unregister - unregister CAN transport protocol
    776 * @cp: pointer to CAN protocol structure
    777 */
    778void can_proto_unregister(const struct can_proto *cp)
    779{
    780	int proto = cp->protocol;
    781
    782	mutex_lock(&proto_tab_lock);
    783	BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
    784	RCU_INIT_POINTER(proto_tab[proto], NULL);
    785	mutex_unlock(&proto_tab_lock);
    786
    787	synchronize_rcu();
    788
    789	proto_unregister(cp->prot);
    790}
    791EXPORT_SYMBOL(can_proto_unregister);
    792
    793static int can_pernet_init(struct net *net)
    794{
    795	spin_lock_init(&net->can.rcvlists_lock);
    796	net->can.rx_alldev_list =
    797		kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
    798	if (!net->can.rx_alldev_list)
    799		goto out;
    800	net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
    801	if (!net->can.pkg_stats)
    802		goto out_free_rx_alldev_list;
    803	net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
    804	if (!net->can.rcv_lists_stats)
    805		goto out_free_pkg_stats;
    806
    807	if (IS_ENABLED(CONFIG_PROC_FS)) {
    808		/* the statistics are updated every second (timer triggered) */
    809		if (stats_timer) {
    810			timer_setup(&net->can.stattimer, can_stat_update,
    811				    0);
    812			mod_timer(&net->can.stattimer,
    813				  round_jiffies(jiffies + HZ));
    814		}
    815		net->can.pkg_stats->jiffies_init = jiffies;
    816		can_init_proc(net);
    817	}
    818
    819	return 0;
    820
    821 out_free_pkg_stats:
    822	kfree(net->can.pkg_stats);
    823 out_free_rx_alldev_list:
    824	kfree(net->can.rx_alldev_list);
    825 out:
    826	return -ENOMEM;
    827}
    828
    829static void can_pernet_exit(struct net *net)
    830{
    831	if (IS_ENABLED(CONFIG_PROC_FS)) {
    832		can_remove_proc(net);
    833		if (stats_timer)
    834			del_timer_sync(&net->can.stattimer);
    835	}
    836
    837	kfree(net->can.rx_alldev_list);
    838	kfree(net->can.pkg_stats);
    839	kfree(net->can.rcv_lists_stats);
    840}
    841
    842/* af_can module init/exit functions */
    843
    844static struct packet_type can_packet __read_mostly = {
    845	.type = cpu_to_be16(ETH_P_CAN),
    846	.func = can_rcv,
    847};
    848
    849static struct packet_type canfd_packet __read_mostly = {
    850	.type = cpu_to_be16(ETH_P_CANFD),
    851	.func = canfd_rcv,
    852};
    853
    854static const struct net_proto_family can_family_ops = {
    855	.family = PF_CAN,
    856	.create = can_create,
    857	.owner  = THIS_MODULE,
    858};
    859
    860static struct pernet_operations can_pernet_ops __read_mostly = {
    861	.init = can_pernet_init,
    862	.exit = can_pernet_exit,
    863};
    864
    865static __init int can_init(void)
    866{
    867	int err;
    868
    869	/* check for correct padding to be able to use the structs similarly */
    870	BUILD_BUG_ON(offsetof(struct can_frame, len) !=
    871		     offsetof(struct canfd_frame, len) ||
    872		     offsetof(struct can_frame, data) !=
    873		     offsetof(struct canfd_frame, data));
    874
    875	pr_info("can: controller area network core\n");
    876
    877	rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
    878				      0, 0, NULL);
    879	if (!rcv_cache)
    880		return -ENOMEM;
    881
    882	err = register_pernet_subsys(&can_pernet_ops);
    883	if (err)
    884		goto out_pernet;
    885
    886	/* protocol register */
    887	err = sock_register(&can_family_ops);
    888	if (err)
    889		goto out_sock;
    890
    891	dev_add_pack(&can_packet);
    892	dev_add_pack(&canfd_packet);
    893
    894	return 0;
    895
    896out_sock:
    897	unregister_pernet_subsys(&can_pernet_ops);
    898out_pernet:
    899	kmem_cache_destroy(rcv_cache);
    900
    901	return err;
    902}
    903
    904static __exit void can_exit(void)
    905{
    906	/* protocol unregister */
    907	dev_remove_pack(&canfd_packet);
    908	dev_remove_pack(&can_packet);
    909	sock_unregister(PF_CAN);
    910
    911	unregister_pernet_subsys(&can_pernet_ops);
    912
    913	rcu_barrier(); /* Wait for completion of call_rcu()'s */
    914
    915	kmem_cache_destroy(rcv_cache);
    916}
    917
    918module_init(can_init);
    919module_exit(can_exit);