cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

user.c (6076B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * The "user cache".
      4 *
      5 * (C) Copyright 1991-2000 Linus Torvalds
      6 *
      7 * We have a per-user structure to keep track of how many
      8 * processes, files etc the user has claimed, in order to be
      9 * able to have per-user limits for system resources. 
     10 */
     11
     12#include <linux/init.h>
     13#include <linux/sched.h>
     14#include <linux/slab.h>
     15#include <linux/bitops.h>
     16#include <linux/key.h>
     17#include <linux/sched/user.h>
     18#include <linux/interrupt.h>
     19#include <linux/export.h>
     20#include <linux/user_namespace.h>
     21#include <linux/proc_ns.h>
     22
     23/*
     24 * userns count is 1 for root user, 1 for init_uts_ns,
     25 * and 1 for... ?
     26 */
     27struct user_namespace init_user_ns = {
     28	.uid_map = {
     29		.nr_extents = 1,
     30		{
     31			.extent[0] = {
     32				.first = 0,
     33				.lower_first = 0,
     34				.count = 4294967295U,
     35			},
     36		},
     37	},
     38	.gid_map = {
     39		.nr_extents = 1,
     40		{
     41			.extent[0] = {
     42				.first = 0,
     43				.lower_first = 0,
     44				.count = 4294967295U,
     45			},
     46		},
     47	},
     48	.projid_map = {
     49		.nr_extents = 1,
     50		{
     51			.extent[0] = {
     52				.first = 0,
     53				.lower_first = 0,
     54				.count = 4294967295U,
     55			},
     56		},
     57	},
     58	.ns.count = REFCOUNT_INIT(3),
     59	.owner = GLOBAL_ROOT_UID,
     60	.group = GLOBAL_ROOT_GID,
     61	.ns.inum = PROC_USER_INIT_INO,
     62#ifdef CONFIG_USER_NS
     63	.ns.ops = &userns_operations,
     64#endif
     65	.flags = USERNS_INIT_FLAGS,
     66#ifdef CONFIG_KEYS
     67	.keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
     68	.keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
     69#endif
     70};
     71EXPORT_SYMBOL_GPL(init_user_ns);
     72
     73/*
     74 * UID task count cache, to get fast user lookup in "alloc_uid"
     75 * when changing user ID's (ie setuid() and friends).
     76 */
     77
     78#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
     79#define UIDHASH_SZ	(1 << UIDHASH_BITS)
     80#define UIDHASH_MASK		(UIDHASH_SZ - 1)
     81#define __uidhashfn(uid)	(((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
     82#define uidhashentry(uid)	(uidhash_table + __uidhashfn((__kuid_val(uid))))
     83
     84static struct kmem_cache *uid_cachep;
     85static struct hlist_head uidhash_table[UIDHASH_SZ];
     86
     87/*
     88 * The uidhash_lock is mostly taken from process context, but it is
     89 * occasionally also taken from softirq/tasklet context, when
     90 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
     91 * But free_uid() is also called with local interrupts disabled, and running
     92 * local_bh_enable() with local interrupts disabled is an error - we'll run
     93 * softirq callbacks, and they can unconditionally enable interrupts, and
     94 * the caller of free_uid() didn't expect that..
     95 */
     96static DEFINE_SPINLOCK(uidhash_lock);
     97
     98/* root_user.__count is 1, for init task cred */
     99struct user_struct root_user = {
    100	.__count	= REFCOUNT_INIT(1),
    101	.uid		= GLOBAL_ROOT_UID,
    102	.ratelimit	= RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
    103};
    104
    105/*
    106 * These routines must be called with the uidhash spinlock held!
    107 */
    108static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
    109{
    110	hlist_add_head(&up->uidhash_node, hashent);
    111}
    112
    113static void uid_hash_remove(struct user_struct *up)
    114{
    115	hlist_del_init(&up->uidhash_node);
    116}
    117
    118static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
    119{
    120	struct user_struct *user;
    121
    122	hlist_for_each_entry(user, hashent, uidhash_node) {
    123		if (uid_eq(user->uid, uid)) {
    124			refcount_inc(&user->__count);
    125			return user;
    126		}
    127	}
    128
    129	return NULL;
    130}
    131
    132static int user_epoll_alloc(struct user_struct *up)
    133{
    134#ifdef CONFIG_EPOLL
    135	return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
    136#else
    137	return 0;
    138#endif
    139}
    140
    141static void user_epoll_free(struct user_struct *up)
    142{
    143#ifdef CONFIG_EPOLL
    144	percpu_counter_destroy(&up->epoll_watches);
    145#endif
    146}
    147
    148/* IRQs are disabled and uidhash_lock is held upon function entry.
    149 * IRQ state (as stored in flags) is restored and uidhash_lock released
    150 * upon function exit.
    151 */
    152static void free_user(struct user_struct *up, unsigned long flags)
    153	__releases(&uidhash_lock)
    154{
    155	uid_hash_remove(up);
    156	spin_unlock_irqrestore(&uidhash_lock, flags);
    157	user_epoll_free(up);
    158	kmem_cache_free(uid_cachep, up);
    159}
    160
    161/*
    162 * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
    163 * caller must undo that ref with free_uid().
    164 *
    165 * If the user_struct could not be found, return NULL.
    166 */
    167struct user_struct *find_user(kuid_t uid)
    168{
    169	struct user_struct *ret;
    170	unsigned long flags;
    171
    172	spin_lock_irqsave(&uidhash_lock, flags);
    173	ret = uid_hash_find(uid, uidhashentry(uid));
    174	spin_unlock_irqrestore(&uidhash_lock, flags);
    175	return ret;
    176}
    177
    178void free_uid(struct user_struct *up)
    179{
    180	unsigned long flags;
    181
    182	if (!up)
    183		return;
    184
    185	if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
    186		free_user(up, flags);
    187}
    188
    189struct user_struct *alloc_uid(kuid_t uid)
    190{
    191	struct hlist_head *hashent = uidhashentry(uid);
    192	struct user_struct *up, *new;
    193
    194	spin_lock_irq(&uidhash_lock);
    195	up = uid_hash_find(uid, hashent);
    196	spin_unlock_irq(&uidhash_lock);
    197
    198	if (!up) {
    199		new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
    200		if (!new)
    201			return NULL;
    202
    203		new->uid = uid;
    204		refcount_set(&new->__count, 1);
    205		if (user_epoll_alloc(new)) {
    206			kmem_cache_free(uid_cachep, new);
    207			return NULL;
    208		}
    209		ratelimit_state_init(&new->ratelimit, HZ, 100);
    210		ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
    211
    212		/*
    213		 * Before adding this, check whether we raced
    214		 * on adding the same user already..
    215		 */
    216		spin_lock_irq(&uidhash_lock);
    217		up = uid_hash_find(uid, hashent);
    218		if (up) {
    219			user_epoll_free(new);
    220			kmem_cache_free(uid_cachep, new);
    221		} else {
    222			uid_hash_insert(new, hashent);
    223			up = new;
    224		}
    225		spin_unlock_irq(&uidhash_lock);
    226	}
    227
    228	return up;
    229}
    230
    231static int __init uid_cache_init(void)
    232{
    233	int n;
    234
    235	uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
    236			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
    237
    238	for(n = 0; n < UIDHASH_SZ; ++n)
    239		INIT_HLIST_HEAD(uidhash_table + n);
    240
    241	if (user_epoll_alloc(&root_user))
    242		panic("root_user epoll percpu counter alloc failed");
    243
    244	/* Insert the root user immediately (init already runs as root) */
    245	spin_lock_irq(&uidhash_lock);
    246	uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
    247	spin_unlock_irq(&uidhash_lock);
    248
    249	return 0;
    250}
    251subsys_initcall(uid_cache_init);