cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ruleset.c (12404B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Landlock LSM - Ruleset management
      4 *
      5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
      6 * Copyright © 2018-2020 ANSSI
      7 */
      8
      9#include <linux/bits.h>
     10#include <linux/bug.h>
     11#include <linux/compiler_types.h>
     12#include <linux/err.h>
     13#include <linux/errno.h>
     14#include <linux/kernel.h>
     15#include <linux/lockdep.h>
     16#include <linux/overflow.h>
     17#include <linux/rbtree.h>
     18#include <linux/refcount.h>
     19#include <linux/slab.h>
     20#include <linux/spinlock.h>
     21#include <linux/workqueue.h>
     22
     23#include "limits.h"
     24#include "object.h"
     25#include "ruleset.h"
     26
     27static struct landlock_ruleset *create_ruleset(const u32 num_layers)
     28{
     29	struct landlock_ruleset *new_ruleset;
     30
     31	new_ruleset =
     32		kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers),
     33			GFP_KERNEL_ACCOUNT);
     34	if (!new_ruleset)
     35		return ERR_PTR(-ENOMEM);
     36	refcount_set(&new_ruleset->usage, 1);
     37	mutex_init(&new_ruleset->lock);
     38	new_ruleset->root = RB_ROOT;
     39	new_ruleset->num_layers = num_layers;
     40	/*
     41	 * hierarchy = NULL
     42	 * num_rules = 0
     43	 * fs_access_masks[] = 0
     44	 */
     45	return new_ruleset;
     46}
     47
     48struct landlock_ruleset *
     49landlock_create_ruleset(const access_mask_t fs_access_mask)
     50{
     51	struct landlock_ruleset *new_ruleset;
     52
     53	/* Informs about useless ruleset. */
     54	if (!fs_access_mask)
     55		return ERR_PTR(-ENOMSG);
     56	new_ruleset = create_ruleset(1);
     57	if (!IS_ERR(new_ruleset))
     58		new_ruleset->fs_access_masks[0] = fs_access_mask;
     59	return new_ruleset;
     60}
     61
     62static void build_check_rule(void)
     63{
     64	const struct landlock_rule rule = {
     65		.num_layers = ~0,
     66	};
     67
     68	BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS);
     69}
     70
     71static struct landlock_rule *
     72create_rule(struct landlock_object *const object,
     73	    const struct landlock_layer (*const layers)[], const u32 num_layers,
     74	    const struct landlock_layer *const new_layer)
     75{
     76	struct landlock_rule *new_rule;
     77	u32 new_num_layers;
     78
     79	build_check_rule();
     80	if (new_layer) {
     81		/* Should already be checked by landlock_merge_ruleset(). */
     82		if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS))
     83			return ERR_PTR(-E2BIG);
     84		new_num_layers = num_layers + 1;
     85	} else {
     86		new_num_layers = num_layers;
     87	}
     88	new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers),
     89			   GFP_KERNEL_ACCOUNT);
     90	if (!new_rule)
     91		return ERR_PTR(-ENOMEM);
     92	RB_CLEAR_NODE(&new_rule->node);
     93	landlock_get_object(object);
     94	new_rule->object = object;
     95	new_rule->num_layers = new_num_layers;
     96	/* Copies the original layer stack. */
     97	memcpy(new_rule->layers, layers,
     98	       flex_array_size(new_rule, layers, num_layers));
     99	if (new_layer)
    100		/* Adds a copy of @new_layer on the layer stack. */
    101		new_rule->layers[new_rule->num_layers - 1] = *new_layer;
    102	return new_rule;
    103}
    104
    105static void free_rule(struct landlock_rule *const rule)
    106{
    107	might_sleep();
    108	if (!rule)
    109		return;
    110	landlock_put_object(rule->object);
    111	kfree(rule);
    112}
    113
    114static void build_check_ruleset(void)
    115{
    116	const struct landlock_ruleset ruleset = {
    117		.num_rules = ~0,
    118		.num_layers = ~0,
    119	};
    120	typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0;
    121
    122	BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES);
    123	BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS);
    124	BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS);
    125}
    126
    127/**
    128 * insert_rule - Create and insert a rule in a ruleset
    129 *
    130 * @ruleset: The ruleset to be updated.
    131 * @object: The object to build the new rule with.  The underlying kernel
    132 *          object must be held by the caller.
    133 * @layers: One or multiple layers to be copied into the new rule.
    134 * @num_layers: The number of @layers entries.
    135 *
    136 * When user space requests to add a new rule to a ruleset, @layers only
    137 * contains one entry and this entry is not assigned to any level.  In this
    138 * case, the new rule will extend @ruleset, similarly to a boolean OR between
    139 * access rights.
    140 *
    141 * When merging a ruleset in a domain, or copying a domain, @layers will be
    142 * added to @ruleset as new constraints, similarly to a boolean AND between
    143 * access rights.
    144 */
    145static int insert_rule(struct landlock_ruleset *const ruleset,
    146		       struct landlock_object *const object,
    147		       const struct landlock_layer (*const layers)[],
    148		       size_t num_layers)
    149{
    150	struct rb_node **walker_node;
    151	struct rb_node *parent_node = NULL;
    152	struct landlock_rule *new_rule;
    153
    154	might_sleep();
    155	lockdep_assert_held(&ruleset->lock);
    156	if (WARN_ON_ONCE(!object || !layers))
    157		return -ENOENT;
    158	walker_node = &(ruleset->root.rb_node);
    159	while (*walker_node) {
    160		struct landlock_rule *const this =
    161			rb_entry(*walker_node, struct landlock_rule, node);
    162
    163		if (this->object != object) {
    164			parent_node = *walker_node;
    165			if (this->object < object)
    166				walker_node = &((*walker_node)->rb_right);
    167			else
    168				walker_node = &((*walker_node)->rb_left);
    169			continue;
    170		}
    171
    172		/* Only a single-level layer should match an existing rule. */
    173		if (WARN_ON_ONCE(num_layers != 1))
    174			return -EINVAL;
    175
    176		/* If there is a matching rule, updates it. */
    177		if ((*layers)[0].level == 0) {
    178			/*
    179			 * Extends access rights when the request comes from
    180			 * landlock_add_rule(2), i.e. @ruleset is not a domain.
    181			 */
    182			if (WARN_ON_ONCE(this->num_layers != 1))
    183				return -EINVAL;
    184			if (WARN_ON_ONCE(this->layers[0].level != 0))
    185				return -EINVAL;
    186			this->layers[0].access |= (*layers)[0].access;
    187			return 0;
    188		}
    189
    190		if (WARN_ON_ONCE(this->layers[0].level == 0))
    191			return -EINVAL;
    192
    193		/*
    194		 * Intersects access rights when it is a merge between a
    195		 * ruleset and a domain.
    196		 */
    197		new_rule = create_rule(object, &this->layers, this->num_layers,
    198				       &(*layers)[0]);
    199		if (IS_ERR(new_rule))
    200			return PTR_ERR(new_rule);
    201		rb_replace_node(&this->node, &new_rule->node, &ruleset->root);
    202		free_rule(this);
    203		return 0;
    204	}
    205
    206	/* There is no match for @object. */
    207	build_check_ruleset();
    208	if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES)
    209		return -E2BIG;
    210	new_rule = create_rule(object, layers, num_layers, NULL);
    211	if (IS_ERR(new_rule))
    212		return PTR_ERR(new_rule);
    213	rb_link_node(&new_rule->node, parent_node, walker_node);
    214	rb_insert_color(&new_rule->node, &ruleset->root);
    215	ruleset->num_rules++;
    216	return 0;
    217}
    218
    219static void build_check_layer(void)
    220{
    221	const struct landlock_layer layer = {
    222		.level = ~0,
    223		.access = ~0,
    224	};
    225
    226	BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS);
    227	BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS);
    228}
    229
    230/* @ruleset must be locked by the caller. */
    231int landlock_insert_rule(struct landlock_ruleset *const ruleset,
    232			 struct landlock_object *const object,
    233			 const access_mask_t access)
    234{
    235	struct landlock_layer layers[] = { {
    236		.access = access,
    237		/* When @level is zero, insert_rule() extends @ruleset. */
    238		.level = 0,
    239	} };
    240
    241	build_check_layer();
    242	return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers));
    243}
    244
    245static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy)
    246{
    247	if (hierarchy)
    248		refcount_inc(&hierarchy->usage);
    249}
    250
    251static void put_hierarchy(struct landlock_hierarchy *hierarchy)
    252{
    253	while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) {
    254		const struct landlock_hierarchy *const freeme = hierarchy;
    255
    256		hierarchy = hierarchy->parent;
    257		kfree(freeme);
    258	}
    259}
    260
    261static int merge_ruleset(struct landlock_ruleset *const dst,
    262			 struct landlock_ruleset *const src)
    263{
    264	struct landlock_rule *walker_rule, *next_rule;
    265	int err = 0;
    266
    267	might_sleep();
    268	/* Should already be checked by landlock_merge_ruleset() */
    269	if (WARN_ON_ONCE(!src))
    270		return 0;
    271	/* Only merge into a domain. */
    272	if (WARN_ON_ONCE(!dst || !dst->hierarchy))
    273		return -EINVAL;
    274
    275	/* Locks @dst first because we are its only owner. */
    276	mutex_lock(&dst->lock);
    277	mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING);
    278
    279	/* Stacks the new layer. */
    280	if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) {
    281		err = -EINVAL;
    282		goto out_unlock;
    283	}
    284	dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0];
    285
    286	/* Merges the @src tree. */
    287	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root,
    288					     node) {
    289		struct landlock_layer layers[] = { {
    290			.level = dst->num_layers,
    291		} };
    292
    293		if (WARN_ON_ONCE(walker_rule->num_layers != 1)) {
    294			err = -EINVAL;
    295			goto out_unlock;
    296		}
    297		if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) {
    298			err = -EINVAL;
    299			goto out_unlock;
    300		}
    301		layers[0].access = walker_rule->layers[0].access;
    302		err = insert_rule(dst, walker_rule->object, &layers,
    303				  ARRAY_SIZE(layers));
    304		if (err)
    305			goto out_unlock;
    306	}
    307
    308out_unlock:
    309	mutex_unlock(&src->lock);
    310	mutex_unlock(&dst->lock);
    311	return err;
    312}
    313
    314static int inherit_ruleset(struct landlock_ruleset *const parent,
    315			   struct landlock_ruleset *const child)
    316{
    317	struct landlock_rule *walker_rule, *next_rule;
    318	int err = 0;
    319
    320	might_sleep();
    321	if (!parent)
    322		return 0;
    323
    324	/* Locks @child first because we are its only owner. */
    325	mutex_lock(&child->lock);
    326	mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
    327
    328	/* Copies the @parent tree. */
    329	rbtree_postorder_for_each_entry_safe(walker_rule, next_rule,
    330					     &parent->root, node) {
    331		err = insert_rule(child, walker_rule->object,
    332				  &walker_rule->layers,
    333				  walker_rule->num_layers);
    334		if (err)
    335			goto out_unlock;
    336	}
    337
    338	if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) {
    339		err = -EINVAL;
    340		goto out_unlock;
    341	}
    342	/* Copies the parent layer stack and leaves a space for the new layer. */
    343	memcpy(child->fs_access_masks, parent->fs_access_masks,
    344	       flex_array_size(parent, fs_access_masks, parent->num_layers));
    345
    346	if (WARN_ON_ONCE(!parent->hierarchy)) {
    347		err = -EINVAL;
    348		goto out_unlock;
    349	}
    350	get_hierarchy(parent->hierarchy);
    351	child->hierarchy->parent = parent->hierarchy;
    352
    353out_unlock:
    354	mutex_unlock(&parent->lock);
    355	mutex_unlock(&child->lock);
    356	return err;
    357}
    358
    359static void free_ruleset(struct landlock_ruleset *const ruleset)
    360{
    361	struct landlock_rule *freeme, *next;
    362
    363	might_sleep();
    364	rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node)
    365		free_rule(freeme);
    366	put_hierarchy(ruleset->hierarchy);
    367	kfree(ruleset);
    368}
    369
    370void landlock_put_ruleset(struct landlock_ruleset *const ruleset)
    371{
    372	might_sleep();
    373	if (ruleset && refcount_dec_and_test(&ruleset->usage))
    374		free_ruleset(ruleset);
    375}
    376
    377static void free_ruleset_work(struct work_struct *const work)
    378{
    379	struct landlock_ruleset *ruleset;
    380
    381	ruleset = container_of(work, struct landlock_ruleset, work_free);
    382	free_ruleset(ruleset);
    383}
    384
    385void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset)
    386{
    387	if (ruleset && refcount_dec_and_test(&ruleset->usage)) {
    388		INIT_WORK(&ruleset->work_free, free_ruleset_work);
    389		schedule_work(&ruleset->work_free);
    390	}
    391}
    392
    393/**
    394 * landlock_merge_ruleset - Merge a ruleset with a domain
    395 *
    396 * @parent: Parent domain.
    397 * @ruleset: New ruleset to be merged.
    398 *
    399 * Returns the intersection of @parent and @ruleset, or returns @parent if
    400 * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty.
    401 */
    402struct landlock_ruleset *
    403landlock_merge_ruleset(struct landlock_ruleset *const parent,
    404		       struct landlock_ruleset *const ruleset)
    405{
    406	struct landlock_ruleset *new_dom;
    407	u32 num_layers;
    408	int err;
    409
    410	might_sleep();
    411	if (WARN_ON_ONCE(!ruleset || parent == ruleset))
    412		return ERR_PTR(-EINVAL);
    413
    414	if (parent) {
    415		if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS)
    416			return ERR_PTR(-E2BIG);
    417		num_layers = parent->num_layers + 1;
    418	} else {
    419		num_layers = 1;
    420	}
    421
    422	/* Creates a new domain... */
    423	new_dom = create_ruleset(num_layers);
    424	if (IS_ERR(new_dom))
    425		return new_dom;
    426	new_dom->hierarchy =
    427		kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT);
    428	if (!new_dom->hierarchy) {
    429		err = -ENOMEM;
    430		goto out_put_dom;
    431	}
    432	refcount_set(&new_dom->hierarchy->usage, 1);
    433
    434	/* ...as a child of @parent... */
    435	err = inherit_ruleset(parent, new_dom);
    436	if (err)
    437		goto out_put_dom;
    438
    439	/* ...and including @ruleset. */
    440	err = merge_ruleset(new_dom, ruleset);
    441	if (err)
    442		goto out_put_dom;
    443
    444	return new_dom;
    445
    446out_put_dom:
    447	landlock_put_ruleset(new_dom);
    448	return ERR_PTR(err);
    449}
    450
    451/*
    452 * The returned access has the same lifetime as @ruleset.
    453 */
    454const struct landlock_rule *
    455landlock_find_rule(const struct landlock_ruleset *const ruleset,
    456		   const struct landlock_object *const object)
    457{
    458	const struct rb_node *node;
    459
    460	if (!object)
    461		return NULL;
    462	node = ruleset->root.rb_node;
    463	while (node) {
    464		struct landlock_rule *this =
    465			rb_entry(node, struct landlock_rule, node);
    466
    467		if (this->object == object)
    468			return this;
    469		if (this->object < object)
    470			node = node->rb_right;
    471		else
    472			node = node->rb_left;
    473	}
    474	return NULL;
    475}