cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cn_queue.c (3291B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	cn_queue.c
      4 *
      5 * 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
      6 * All rights reserved.
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/module.h>
     11#include <linux/list.h>
     12#include <linux/workqueue.h>
     13#include <linux/spinlock.h>
     14#include <linux/slab.h>
     15#include <linux/skbuff.h>
     16#include <linux/suspend.h>
     17#include <linux/connector.h>
     18#include <linux/delay.h>
     19
     20static struct cn_callback_entry *
     21cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
     22			      const struct cb_id *id,
     23			      void (*callback)(struct cn_msg *,
     24					       struct netlink_skb_parms *))
     25{
     26	struct cn_callback_entry *cbq;
     27
     28	cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
     29	if (!cbq) {
     30		pr_err("Failed to create new callback queue.\n");
     31		return NULL;
     32	}
     33
     34	refcount_set(&cbq->refcnt, 1);
     35
     36	atomic_inc(&dev->refcnt);
     37	cbq->pdev = dev;
     38
     39	snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
     40	memcpy(&cbq->id.id, id, sizeof(struct cb_id));
     41	cbq->callback = callback;
     42	return cbq;
     43}
     44
     45void cn_queue_release_callback(struct cn_callback_entry *cbq)
     46{
     47	if (!refcount_dec_and_test(&cbq->refcnt))
     48		return;
     49
     50	atomic_dec(&cbq->pdev->refcnt);
     51	kfree(cbq);
     52}
     53
     54int cn_cb_equal(const struct cb_id *i1, const struct cb_id *i2)
     55{
     56	return ((i1->idx == i2->idx) && (i1->val == i2->val));
     57}
     58
     59int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
     60			  const struct cb_id *id,
     61			  void (*callback)(struct cn_msg *,
     62					   struct netlink_skb_parms *))
     63{
     64	struct cn_callback_entry *cbq, *__cbq;
     65	int found = 0;
     66
     67	cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
     68	if (!cbq)
     69		return -ENOMEM;
     70
     71	spin_lock_bh(&dev->queue_lock);
     72	list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
     73		if (cn_cb_equal(&__cbq->id.id, id)) {
     74			found = 1;
     75			break;
     76		}
     77	}
     78	if (!found)
     79		list_add_tail(&cbq->callback_entry, &dev->queue_list);
     80	spin_unlock_bh(&dev->queue_lock);
     81
     82	if (found) {
     83		cn_queue_release_callback(cbq);
     84		return -EINVAL;
     85	}
     86
     87	cbq->seq = 0;
     88	cbq->group = cbq->id.id.idx;
     89
     90	return 0;
     91}
     92
     93void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id)
     94{
     95	struct cn_callback_entry *cbq, *n;
     96	int found = 0;
     97
     98	spin_lock_bh(&dev->queue_lock);
     99	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) {
    100		if (cn_cb_equal(&cbq->id.id, id)) {
    101			list_del(&cbq->callback_entry);
    102			found = 1;
    103			break;
    104		}
    105	}
    106	spin_unlock_bh(&dev->queue_lock);
    107
    108	if (found)
    109		cn_queue_release_callback(cbq);
    110}
    111
    112struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
    113{
    114	struct cn_queue_dev *dev;
    115
    116	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
    117	if (!dev)
    118		return NULL;
    119
    120	snprintf(dev->name, sizeof(dev->name), "%s", name);
    121	atomic_set(&dev->refcnt, 0);
    122	INIT_LIST_HEAD(&dev->queue_list);
    123	spin_lock_init(&dev->queue_lock);
    124
    125	dev->nls = nls;
    126
    127	return dev;
    128}
    129
    130void cn_queue_free_dev(struct cn_queue_dev *dev)
    131{
    132	struct cn_callback_entry *cbq, *n;
    133
    134	spin_lock_bh(&dev->queue_lock);
    135	list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
    136		list_del(&cbq->callback_entry);
    137	spin_unlock_bh(&dev->queue_lock);
    138
    139	while (atomic_read(&dev->refcnt)) {
    140		pr_info("Waiting for %s to become free: refcnt=%d.\n",
    141		       dev->name, atomic_read(&dev->refcnt));
    142		msleep(1000);
    143	}
    144
    145	kfree(dev);
    146	dev = NULL;
    147}