cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

connector.c (6814B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	connector.c
      4 *
      5 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
      6 * All rights reserved.
      7 */
      8
      9#include <linux/compiler.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/list.h>
     13#include <linux/skbuff.h>
     14#include <net/netlink.h>
     15#include <linux/moduleparam.h>
     16#include <linux/connector.h>
     17#include <linux/slab.h>
     18#include <linux/mutex.h>
     19#include <linux/proc_fs.h>
     20#include <linux/spinlock.h>
     21
     22#include <net/sock.h>
     23
     24MODULE_LICENSE("GPL");
     25MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
     26MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
     27MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR);
     28
     29static struct cn_dev cdev;
     30
     31static int cn_already_initialized;
     32
     33/*
     34 * Sends mult (multiple) cn_msg at a time.
     35 *
     36 * msg->seq and msg->ack are used to determine message genealogy.
     37 * When someone sends message it puts there locally unique sequence
     38 * and random acknowledge numbers.  Sequence number may be copied into
     39 * nlmsghdr->nlmsg_seq too.
     40 *
     41 * Sequence number is incremented with each message to be sent.
     42 *
     43 * If we expect a reply to our message then the sequence number in
     44 * received message MUST be the same as in original message, and
     45 * acknowledge number MUST be the same + 1.
     46 *
     47 * If we receive a message and its sequence number is not equal to the
     48 * one we are expecting then it is a new message.
     49 *
     50 * If we receive a message and its sequence number is the same as one
     51 * we are expecting but it's acknowledgement number is not equal to
     52 * the acknowledgement number in the original message + 1, then it is
     53 * a new message.
     54 *
     55 * If msg->len != len, then additional cn_msg messages are expected following
     56 * the first msg.
     57 *
     58 * The message is sent to, the portid if given, the group if given, both if
     59 * both, or if both are zero then the group is looked up and sent there.
     60 */
     61int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 __group,
     62	gfp_t gfp_mask)
     63{
     64	struct cn_callback_entry *__cbq;
     65	unsigned int size;
     66	struct sk_buff *skb;
     67	struct nlmsghdr *nlh;
     68	struct cn_msg *data;
     69	struct cn_dev *dev = &cdev;
     70	u32 group = 0;
     71	int found = 0;
     72
     73	if (portid || __group) {
     74		group = __group;
     75	} else {
     76		spin_lock_bh(&dev->cbdev->queue_lock);
     77		list_for_each_entry(__cbq, &dev->cbdev->queue_list,
     78				    callback_entry) {
     79			if (cn_cb_equal(&__cbq->id.id, &msg->id)) {
     80				found = 1;
     81				group = __cbq->group;
     82				break;
     83			}
     84		}
     85		spin_unlock_bh(&dev->cbdev->queue_lock);
     86
     87		if (!found)
     88			return -ENODEV;
     89	}
     90
     91	if (!portid && !netlink_has_listeners(dev->nls, group))
     92		return -ESRCH;
     93
     94	size = sizeof(*msg) + len;
     95
     96	skb = nlmsg_new(size, gfp_mask);
     97	if (!skb)
     98		return -ENOMEM;
     99
    100	nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
    101	if (!nlh) {
    102		kfree_skb(skb);
    103		return -EMSGSIZE;
    104	}
    105
    106	data = nlmsg_data(nlh);
    107
    108	memcpy(data, msg, size);
    109
    110	NETLINK_CB(skb).dst_group = group;
    111
    112	if (group)
    113		return netlink_broadcast(dev->nls, skb, portid, group,
    114					 gfp_mask);
    115	return netlink_unicast(dev->nls, skb, portid,
    116			!gfpflags_allow_blocking(gfp_mask));
    117}
    118EXPORT_SYMBOL_GPL(cn_netlink_send_mult);
    119
    120/* same as cn_netlink_send_mult except msg->len is used for len */
    121int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group,
    122	gfp_t gfp_mask)
    123{
    124	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
    125}
    126EXPORT_SYMBOL_GPL(cn_netlink_send);
    127
    128/*
    129 * Callback helper - queues work and setup destructor for given data.
    130 */
    131static int cn_call_callback(struct sk_buff *skb)
    132{
    133	struct nlmsghdr *nlh;
    134	struct cn_callback_entry *i, *cbq = NULL;
    135	struct cn_dev *dev = &cdev;
    136	struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
    137	struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
    138	int err = -ENODEV;
    139
    140	/* verify msg->len is within skb */
    141	nlh = nlmsg_hdr(skb);
    142	if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
    143		return -EINVAL;
    144
    145	spin_lock_bh(&dev->cbdev->queue_lock);
    146	list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
    147		if (cn_cb_equal(&i->id.id, &msg->id)) {
    148			refcount_inc(&i->refcnt);
    149			cbq = i;
    150			break;
    151		}
    152	}
    153	spin_unlock_bh(&dev->cbdev->queue_lock);
    154
    155	if (cbq != NULL) {
    156		cbq->callback(msg, nsp);
    157		kfree_skb(skb);
    158		cn_queue_release_callback(cbq);
    159		err = 0;
    160	}
    161
    162	return err;
    163}
    164
    165/*
    166 * Main netlink receiving function.
    167 *
    168 * It checks skb, netlink header and msg sizes, and calls callback helper.
    169 */
    170static void cn_rx_skb(struct sk_buff *skb)
    171{
    172	struct nlmsghdr *nlh;
    173	int len, err;
    174
    175	if (skb->len >= NLMSG_HDRLEN) {
    176		nlh = nlmsg_hdr(skb);
    177		len = nlmsg_len(nlh);
    178
    179		if (len < (int)sizeof(struct cn_msg) ||
    180		    skb->len < nlh->nlmsg_len ||
    181		    len > CONNECTOR_MAX_MSG_SIZE)
    182			return;
    183
    184		err = cn_call_callback(skb_get(skb));
    185		if (err < 0)
    186			kfree_skb(skb);
    187	}
    188}
    189
    190/*
    191 * Callback add routing - adds callback with given ID and name.
    192 * If there is registered callback with the same ID it will not be added.
    193 *
    194 * May sleep.
    195 */
    196int cn_add_callback(const struct cb_id *id, const char *name,
    197		    void (*callback)(struct cn_msg *,
    198				     struct netlink_skb_parms *))
    199{
    200	struct cn_dev *dev = &cdev;
    201
    202	if (!cn_already_initialized)
    203		return -EAGAIN;
    204
    205	return cn_queue_add_callback(dev->cbdev, name, id, callback);
    206}
    207EXPORT_SYMBOL_GPL(cn_add_callback);
    208
    209/*
    210 * Callback remove routing - removes callback
    211 * with given ID.
    212 * If there is no registered callback with given
    213 * ID nothing happens.
    214 *
    215 * May sleep while waiting for reference counter to become zero.
    216 */
    217void cn_del_callback(const struct cb_id *id)
    218{
    219	struct cn_dev *dev = &cdev;
    220
    221	cn_queue_del_callback(dev->cbdev, id);
    222}
    223EXPORT_SYMBOL_GPL(cn_del_callback);
    224
    225static int __maybe_unused cn_proc_show(struct seq_file *m, void *v)
    226{
    227	struct cn_queue_dev *dev = cdev.cbdev;
    228	struct cn_callback_entry *cbq;
    229
    230	seq_printf(m, "Name            ID\n");
    231
    232	spin_lock_bh(&dev->queue_lock);
    233
    234	list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
    235		seq_printf(m, "%-15s %u:%u\n",
    236			   cbq->id.name,
    237			   cbq->id.id.idx,
    238			   cbq->id.id.val);
    239	}
    240
    241	spin_unlock_bh(&dev->queue_lock);
    242
    243	return 0;
    244}
    245
    246static int cn_init(void)
    247{
    248	struct cn_dev *dev = &cdev;
    249	struct netlink_kernel_cfg cfg = {
    250		.groups	= CN_NETLINK_USERS + 0xf,
    251		.input	= cn_rx_skb,
    252	};
    253
    254	dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR, &cfg);
    255	if (!dev->nls)
    256		return -EIO;
    257
    258	dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
    259	if (!dev->cbdev) {
    260		netlink_kernel_release(dev->nls);
    261		return -EINVAL;
    262	}
    263
    264	cn_already_initialized = 1;
    265
    266	proc_create_single("connector", S_IRUGO, init_net.proc_net, cn_proc_show);
    267
    268	return 0;
    269}
    270
    271static void cn_fini(void)
    272{
    273	struct cn_dev *dev = &cdev;
    274
    275	cn_already_initialized = 0;
    276
    277	remove_proc_entry("connector", init_net.proc_net);
    278
    279	cn_queue_free_dev(dev->cbdev);
    280	netlink_kernel_release(dev->nls);
    281}
    282
    283subsys_initcall(cn_init);
    284module_exit(cn_fini);