cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mapping.c (5514B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/* Copyright (c) 2018 Mellanox Technologies */
      3
      4#include <linux/jhash.h>
      5#include <linux/slab.h>
      6#include <linux/xarray.h>
      7#include <linux/hashtable.h>
      8#include <linux/refcount.h>
      9
     10#include "mapping.h"
     11
     12#define MAPPING_GRACE_PERIOD 2000
     13
     14static LIST_HEAD(shared_ctx_list);
     15static DEFINE_MUTEX(shared_ctx_lock);
     16
     17struct mapping_ctx {
     18	struct xarray xarray;
     19	DECLARE_HASHTABLE(ht, 8);
     20	struct mutex lock; /* Guards hashtable and xarray */
     21	unsigned long max_id;
     22	size_t data_size;
     23	bool delayed_removal;
     24	struct delayed_work dwork;
     25	struct list_head pending_list;
     26	spinlock_t pending_list_lock; /* Guards pending list */
     27	u64 id;
     28	u8 type;
     29	struct list_head list;
     30	refcount_t refcount;
     31};
     32
     33struct mapping_item {
     34	struct rcu_head rcu;
     35	struct list_head list;
     36	unsigned long timeout;
     37	struct hlist_node node;
     38	int cnt;
     39	u32 id;
     40	char data[];
     41};
     42
     43int mapping_add(struct mapping_ctx *ctx, void *data, u32 *id)
     44{
     45	struct mapping_item *mi;
     46	int err = -ENOMEM;
     47	u32 hash_key;
     48
     49	mutex_lock(&ctx->lock);
     50
     51	hash_key = jhash(data, ctx->data_size, 0);
     52	hash_for_each_possible(ctx->ht, mi, node, hash_key) {
     53		if (!memcmp(data, mi->data, ctx->data_size))
     54			goto attach;
     55	}
     56
     57	mi = kzalloc(sizeof(*mi) + ctx->data_size, GFP_KERNEL);
     58	if (!mi)
     59		goto err_alloc;
     60
     61	memcpy(mi->data, data, ctx->data_size);
     62	hash_add(ctx->ht, &mi->node, hash_key);
     63
     64	err = xa_alloc(&ctx->xarray, &mi->id, mi, XA_LIMIT(1, ctx->max_id),
     65		       GFP_KERNEL);
     66	if (err)
     67		goto err_assign;
     68attach:
     69	++mi->cnt;
     70	*id = mi->id;
     71
     72	mutex_unlock(&ctx->lock);
     73
     74	return 0;
     75
     76err_assign:
     77	hash_del(&mi->node);
     78	kfree(mi);
     79err_alloc:
     80	mutex_unlock(&ctx->lock);
     81
     82	return err;
     83}
     84
     85static void mapping_remove_and_free(struct mapping_ctx *ctx,
     86				    struct mapping_item *mi)
     87{
     88	xa_erase(&ctx->xarray, mi->id);
     89	kfree_rcu(mi, rcu);
     90}
     91
     92static void mapping_free_item(struct mapping_ctx *ctx,
     93			      struct mapping_item *mi)
     94{
     95	if (!ctx->delayed_removal) {
     96		mapping_remove_and_free(ctx, mi);
     97		return;
     98	}
     99
    100	mi->timeout = jiffies + msecs_to_jiffies(MAPPING_GRACE_PERIOD);
    101
    102	spin_lock(&ctx->pending_list_lock);
    103	list_add_tail(&mi->list, &ctx->pending_list);
    104	spin_unlock(&ctx->pending_list_lock);
    105
    106	schedule_delayed_work(&ctx->dwork, MAPPING_GRACE_PERIOD);
    107}
    108
    109int mapping_remove(struct mapping_ctx *ctx, u32 id)
    110{
    111	unsigned long index = id;
    112	struct mapping_item *mi;
    113	int err = -ENOENT;
    114
    115	mutex_lock(&ctx->lock);
    116	mi = xa_load(&ctx->xarray, index);
    117	if (!mi)
    118		goto out;
    119	err = 0;
    120
    121	if (--mi->cnt > 0)
    122		goto out;
    123
    124	hash_del(&mi->node);
    125	mapping_free_item(ctx, mi);
    126out:
    127	mutex_unlock(&ctx->lock);
    128
    129	return err;
    130}
    131
    132int mapping_find(struct mapping_ctx *ctx, u32 id, void *data)
    133{
    134	unsigned long index = id;
    135	struct mapping_item *mi;
    136	int err = -ENOENT;
    137
    138	rcu_read_lock();
    139	mi = xa_load(&ctx->xarray, index);
    140	if (!mi)
    141		goto err_find;
    142
    143	memcpy(data, mi->data, ctx->data_size);
    144	err = 0;
    145
    146err_find:
    147	rcu_read_unlock();
    148	return err;
    149}
    150
    151static void
    152mapping_remove_and_free_list(struct mapping_ctx *ctx, struct list_head *list)
    153{
    154	struct mapping_item *mi;
    155
    156	list_for_each_entry(mi, list, list)
    157		mapping_remove_and_free(ctx, mi);
    158}
    159
    160static void mapping_work_handler(struct work_struct *work)
    161{
    162	unsigned long min_timeout = 0, now = jiffies;
    163	struct mapping_item *mi, *next;
    164	LIST_HEAD(pending_items);
    165	struct mapping_ctx *ctx;
    166
    167	ctx = container_of(work, struct mapping_ctx, dwork.work);
    168
    169	spin_lock(&ctx->pending_list_lock);
    170	list_for_each_entry_safe(mi, next, &ctx->pending_list, list) {
    171		if (time_after(now, mi->timeout))
    172			list_move(&mi->list, &pending_items);
    173		else if (!min_timeout ||
    174			 time_before(mi->timeout, min_timeout))
    175			min_timeout = mi->timeout;
    176	}
    177	spin_unlock(&ctx->pending_list_lock);
    178
    179	mapping_remove_and_free_list(ctx, &pending_items);
    180
    181	if (min_timeout)
    182		schedule_delayed_work(&ctx->dwork, abs(min_timeout - now));
    183}
    184
    185static void mapping_flush_work(struct mapping_ctx *ctx)
    186{
    187	if (!ctx->delayed_removal)
    188		return;
    189
    190	cancel_delayed_work_sync(&ctx->dwork);
    191	mapping_remove_and_free_list(ctx, &ctx->pending_list);
    192}
    193
    194struct mapping_ctx *
    195mapping_create(size_t data_size, u32 max_id, bool delayed_removal)
    196{
    197	struct mapping_ctx *ctx;
    198
    199	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
    200	if (!ctx)
    201		return ERR_PTR(-ENOMEM);
    202
    203	ctx->max_id = max_id ? max_id : UINT_MAX;
    204	ctx->data_size = data_size;
    205
    206	if (delayed_removal) {
    207		INIT_DELAYED_WORK(&ctx->dwork, mapping_work_handler);
    208		INIT_LIST_HEAD(&ctx->pending_list);
    209		spin_lock_init(&ctx->pending_list_lock);
    210		ctx->delayed_removal = true;
    211	}
    212
    213	mutex_init(&ctx->lock);
    214	xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1);
    215
    216	refcount_set(&ctx->refcount, 1);
    217	INIT_LIST_HEAD(&ctx->list);
    218
    219	return ctx;
    220}
    221
    222struct mapping_ctx *
    223mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal)
    224{
    225	struct mapping_ctx *ctx;
    226
    227	mutex_lock(&shared_ctx_lock);
    228	list_for_each_entry(ctx, &shared_ctx_list, list) {
    229		if (ctx->id == id && ctx->type == type) {
    230			if (refcount_inc_not_zero(&ctx->refcount))
    231				goto unlock;
    232			break;
    233		}
    234	}
    235
    236	ctx = mapping_create(data_size, max_id, delayed_removal);
    237	if (IS_ERR(ctx))
    238		goto unlock;
    239
    240	ctx->id = id;
    241	ctx->type = type;
    242	list_add(&ctx->list, &shared_ctx_list);
    243
    244unlock:
    245	mutex_unlock(&shared_ctx_lock);
    246	return ctx;
    247}
    248
    249void mapping_destroy(struct mapping_ctx *ctx)
    250{
    251	if (!refcount_dec_and_test(&ctx->refcount))
    252		return;
    253
    254	mutex_lock(&shared_ctx_lock);
    255	list_del(&ctx->list);
    256	mutex_unlock(&shared_ctx_lock);
    257
    258	mapping_flush_work(ctx);
    259	xa_destroy(&ctx->xarray);
    260	mutex_destroy(&ctx->lock);
    261
    262	kfree(ctx);
    263}