cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

watch_queue.c (16754B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Watch queue and general notification mechanism, built on pipes
      3 *
      4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
      5 * Written by David Howells (dhowells@redhat.com)
      6 *
      7 * See Documentation/watch_queue.rst
      8 */
      9
     10#define pr_fmt(fmt) "watchq: " fmt
     11#include <linux/module.h>
     12#include <linux/init.h>
     13#include <linux/sched.h>
     14#include <linux/slab.h>
     15#include <linux/printk.h>
     16#include <linux/miscdevice.h>
     17#include <linux/fs.h>
     18#include <linux/mm.h>
     19#include <linux/pagemap.h>
     20#include <linux/poll.h>
     21#include <linux/uaccess.h>
     22#include <linux/vmalloc.h>
     23#include <linux/file.h>
     24#include <linux/security.h>
     25#include <linux/cred.h>
     26#include <linux/sched/signal.h>
     27#include <linux/watch_queue.h>
     28#include <linux/pipe_fs_i.h>
     29
     30MODULE_DESCRIPTION("Watch queue");
     31MODULE_AUTHOR("Red Hat, Inc.");
     32MODULE_LICENSE("GPL");
     33
     34#define WATCH_QUEUE_NOTE_SIZE 128
     35#define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
     36
     37static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
     38					 struct pipe_buffer *buf)
     39{
     40	struct watch_queue *wqueue = (struct watch_queue *)buf->private;
     41	struct page *page;
     42	unsigned int bit;
     43
     44	/* We need to work out which note within the page this refers to, but
     45	 * the note might have been maximum size, so merely ANDing the offset
     46	 * off doesn't work.  OTOH, the note must've been more than zero size.
     47	 */
     48	bit = buf->offset + buf->len;
     49	if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
     50		bit -= WATCH_QUEUE_NOTE_SIZE;
     51	bit /= WATCH_QUEUE_NOTE_SIZE;
     52
     53	page = buf->page;
     54	bit += page->index;
     55
     56	set_bit(bit, wqueue->notes_bitmap);
     57	generic_pipe_buf_release(pipe, buf);
     58}
     59
     60// No try_steal function => no stealing
     61#define watch_queue_pipe_buf_try_steal NULL
     62
     63/* New data written to a pipe may be appended to a buffer with this type. */
     64static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
     65	.release	= watch_queue_pipe_buf_release,
     66	.try_steal	= watch_queue_pipe_buf_try_steal,
     67	.get		= generic_pipe_buf_get,
     68};
     69
     70/*
     71 * Post a notification to a watch queue.
     72 */
     73static bool post_one_notification(struct watch_queue *wqueue,
     74				  struct watch_notification *n)
     75{
     76	void *p;
     77	struct pipe_inode_info *pipe = wqueue->pipe;
     78	struct pipe_buffer *buf;
     79	struct page *page;
     80	unsigned int head, tail, mask, note, offset, len;
     81	bool done = false;
     82
     83	if (!pipe)
     84		return false;
     85
     86	spin_lock_irq(&pipe->rd_wait.lock);
     87
     88	if (wqueue->defunct)
     89		goto out;
     90
     91	mask = pipe->ring_size - 1;
     92	head = pipe->head;
     93	tail = pipe->tail;
     94	if (pipe_full(head, tail, pipe->ring_size))
     95		goto lost;
     96
     97	note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
     98	if (note >= wqueue->nr_notes)
     99		goto lost;
    100
    101	page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
    102	offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
    103	get_page(page);
    104	len = n->info & WATCH_INFO_LENGTH;
    105	p = kmap_atomic(page);
    106	memcpy(p + offset, n, len);
    107	kunmap_atomic(p);
    108
    109	buf = &pipe->bufs[head & mask];
    110	buf->page = page;
    111	buf->private = (unsigned long)wqueue;
    112	buf->ops = &watch_queue_pipe_buf_ops;
    113	buf->offset = offset;
    114	buf->len = len;
    115	buf->flags = PIPE_BUF_FLAG_WHOLE;
    116	smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
    117
    118	if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
    119		spin_unlock_irq(&pipe->rd_wait.lock);
    120		BUG();
    121	}
    122	wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
    123	done = true;
    124
    125out:
    126	spin_unlock_irq(&pipe->rd_wait.lock);
    127	if (done)
    128		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
    129	return done;
    130
    131lost:
    132	buf = &pipe->bufs[(head - 1) & mask];
    133	buf->flags |= PIPE_BUF_FLAG_LOSS;
    134	goto out;
    135}
    136
    137/*
    138 * Apply filter rules to a notification.
    139 */
    140static bool filter_watch_notification(const struct watch_filter *wf,
    141				      const struct watch_notification *n)
    142{
    143	const struct watch_type_filter *wt;
    144	unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
    145	unsigned int st_index = n->subtype / st_bits;
    146	unsigned int st_bit = 1U << (n->subtype % st_bits);
    147	int i;
    148
    149	if (!test_bit(n->type, wf->type_filter))
    150		return false;
    151
    152	for (i = 0; i < wf->nr_filters; i++) {
    153		wt = &wf->filters[i];
    154		if (n->type == wt->type &&
    155		    (wt->subtype_filter[st_index] & st_bit) &&
    156		    (n->info & wt->info_mask) == wt->info_filter)
    157			return true;
    158	}
    159
    160	return false; /* If there is a filter, the default is to reject. */
    161}
    162
    163/**
    164 * __post_watch_notification - Post an event notification
    165 * @wlist: The watch list to post the event to.
    166 * @n: The notification record to post.
    167 * @cred: The creds of the process that triggered the notification.
    168 * @id: The ID to match on the watch.
    169 *
    170 * Post a notification of an event into a set of watch queues and let the users
    171 * know.
    172 *
    173 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
    174 * should be in units of sizeof(*n).
    175 */
    176void __post_watch_notification(struct watch_list *wlist,
    177			       struct watch_notification *n,
    178			       const struct cred *cred,
    179			       u64 id)
    180{
    181	const struct watch_filter *wf;
    182	struct watch_queue *wqueue;
    183	struct watch *watch;
    184
    185	if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
    186		WARN_ON(1);
    187		return;
    188	}
    189
    190	rcu_read_lock();
    191
    192	hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
    193		if (watch->id != id)
    194			continue;
    195		n->info &= ~WATCH_INFO_ID;
    196		n->info |= watch->info_id;
    197
    198		wqueue = rcu_dereference(watch->queue);
    199		wf = rcu_dereference(wqueue->filter);
    200		if (wf && !filter_watch_notification(wf, n))
    201			continue;
    202
    203		if (security_post_notification(watch->cred, cred, n) < 0)
    204			continue;
    205
    206		post_one_notification(wqueue, n);
    207	}
    208
    209	rcu_read_unlock();
    210}
    211EXPORT_SYMBOL(__post_watch_notification);
    212
    213/*
    214 * Allocate sufficient pages to preallocation for the requested number of
    215 * notifications.
    216 */
    217long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
    218{
    219	struct watch_queue *wqueue = pipe->watch_queue;
    220	struct page **pages;
    221	unsigned long *bitmap;
    222	unsigned long user_bufs;
    223	int ret, i, nr_pages;
    224
    225	if (!wqueue)
    226		return -ENODEV;
    227	if (wqueue->notes)
    228		return -EBUSY;
    229
    230	if (nr_notes < 1 ||
    231	    nr_notes > 512) /* TODO: choose a better hard limit */
    232		return -EINVAL;
    233
    234	nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
    235	nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
    236	user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
    237
    238	if (nr_pages > pipe->max_usage &&
    239	    (too_many_pipe_buffers_hard(user_bufs) ||
    240	     too_many_pipe_buffers_soft(user_bufs)) &&
    241	    pipe_is_unprivileged_user()) {
    242		ret = -EPERM;
    243		goto error;
    244	}
    245
    246	nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
    247	ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
    248	if (ret < 0)
    249		goto error;
    250
    251	pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
    252	if (!pages)
    253		goto error;
    254
    255	for (i = 0; i < nr_pages; i++) {
    256		pages[i] = alloc_page(GFP_KERNEL);
    257		if (!pages[i])
    258			goto error_p;
    259		pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
    260	}
    261
    262	bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
    263	if (!bitmap)
    264		goto error_p;
    265
    266	bitmap_fill(bitmap, nr_notes);
    267	wqueue->notes = pages;
    268	wqueue->notes_bitmap = bitmap;
    269	wqueue->nr_pages = nr_pages;
    270	wqueue->nr_notes = nr_notes;
    271	return 0;
    272
    273error_p:
    274	while (--i >= 0)
    275		__free_page(pages[i]);
    276	kfree(pages);
    277error:
    278	(void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
    279	return ret;
    280}
    281
    282/*
    283 * Set the filter on a watch queue.
    284 */
    285long watch_queue_set_filter(struct pipe_inode_info *pipe,
    286			    struct watch_notification_filter __user *_filter)
    287{
    288	struct watch_notification_type_filter *tf;
    289	struct watch_notification_filter filter;
    290	struct watch_type_filter *q;
    291	struct watch_filter *wfilter;
    292	struct watch_queue *wqueue = pipe->watch_queue;
    293	int ret, nr_filter = 0, i;
    294
    295	if (!wqueue)
    296		return -ENODEV;
    297
    298	if (!_filter) {
    299		/* Remove the old filter */
    300		wfilter = NULL;
    301		goto set;
    302	}
    303
    304	/* Grab the user's filter specification */
    305	if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
    306		return -EFAULT;
    307	if (filter.nr_filters == 0 ||
    308	    filter.nr_filters > 16 ||
    309	    filter.__reserved != 0)
    310		return -EINVAL;
    311
    312	tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf));
    313	if (IS_ERR(tf))
    314		return PTR_ERR(tf);
    315
    316	ret = -EINVAL;
    317	for (i = 0; i < filter.nr_filters; i++) {
    318		if ((tf[i].info_filter & ~tf[i].info_mask) ||
    319		    tf[i].info_mask & WATCH_INFO_LENGTH)
    320			goto err_filter;
    321		/* Ignore any unknown types */
    322		if (tf[i].type >= WATCH_TYPE__NR)
    323			continue;
    324		nr_filter++;
    325	}
    326
    327	/* Now we need to build the internal filter from only the relevant
    328	 * user-specified filters.
    329	 */
    330	ret = -ENOMEM;
    331	wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
    332	if (!wfilter)
    333		goto err_filter;
    334	wfilter->nr_filters = nr_filter;
    335
    336	q = wfilter->filters;
    337	for (i = 0; i < filter.nr_filters; i++) {
    338		if (tf[i].type >= WATCH_TYPE__NR)
    339			continue;
    340
    341		q->type			= tf[i].type;
    342		q->info_filter		= tf[i].info_filter;
    343		q->info_mask		= tf[i].info_mask;
    344		q->subtype_filter[0]	= tf[i].subtype_filter[0];
    345		__set_bit(q->type, wfilter->type_filter);
    346		q++;
    347	}
    348
    349	kfree(tf);
    350set:
    351	pipe_lock(pipe);
    352	wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
    353				      lockdep_is_held(&pipe->mutex));
    354	pipe_unlock(pipe);
    355	if (wfilter)
    356		kfree_rcu(wfilter, rcu);
    357	return 0;
    358
    359err_filter:
    360	kfree(tf);
    361	return ret;
    362}
    363
    364static void __put_watch_queue(struct kref *kref)
    365{
    366	struct watch_queue *wqueue =
    367		container_of(kref, struct watch_queue, usage);
    368	struct watch_filter *wfilter;
    369	int i;
    370
    371	for (i = 0; i < wqueue->nr_pages; i++)
    372		__free_page(wqueue->notes[i]);
    373	kfree(wqueue->notes);
    374	bitmap_free(wqueue->notes_bitmap);
    375
    376	wfilter = rcu_access_pointer(wqueue->filter);
    377	if (wfilter)
    378		kfree_rcu(wfilter, rcu);
    379	kfree_rcu(wqueue, rcu);
    380}
    381
    382/**
    383 * put_watch_queue - Dispose of a ref on a watchqueue.
    384 * @wqueue: The watch queue to unref.
    385 */
    386void put_watch_queue(struct watch_queue *wqueue)
    387{
    388	kref_put(&wqueue->usage, __put_watch_queue);
    389}
    390EXPORT_SYMBOL(put_watch_queue);
    391
    392static void free_watch(struct rcu_head *rcu)
    393{
    394	struct watch *watch = container_of(rcu, struct watch, rcu);
    395
    396	put_watch_queue(rcu_access_pointer(watch->queue));
    397	atomic_dec(&watch->cred->user->nr_watches);
    398	put_cred(watch->cred);
    399	kfree(watch);
    400}
    401
    402static void __put_watch(struct kref *kref)
    403{
    404	struct watch *watch = container_of(kref, struct watch, usage);
    405
    406	call_rcu(&watch->rcu, free_watch);
    407}
    408
    409/*
    410 * Discard a watch.
    411 */
    412static void put_watch(struct watch *watch)
    413{
    414	kref_put(&watch->usage, __put_watch);
    415}
    416
    417/**
    418 * init_watch - Initialise a watch
    419 * @watch: The watch to initialise.
    420 * @wqueue: The queue to assign.
    421 *
    422 * Initialise a watch and set the watch queue.
    423 */
    424void init_watch(struct watch *watch, struct watch_queue *wqueue)
    425{
    426	kref_init(&watch->usage);
    427	INIT_HLIST_NODE(&watch->list_node);
    428	INIT_HLIST_NODE(&watch->queue_node);
    429	rcu_assign_pointer(watch->queue, wqueue);
    430}
    431
    432/**
    433 * add_watch_to_object - Add a watch on an object to a watch list
    434 * @watch: The watch to add
    435 * @wlist: The watch list to add to
    436 *
    437 * @watch->queue must have been set to point to the queue to post notifications
    438 * to and the watch list of the object to be watched.  @watch->cred must also
    439 * have been set to the appropriate credentials and a ref taken on them.
    440 *
    441 * The caller must pin the queue and the list both and must hold the list
    442 * locked against racing watch additions/removals.
    443 */
    444int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
    445{
    446	struct watch_queue *wqueue = rcu_access_pointer(watch->queue);
    447	struct watch *w;
    448
    449	hlist_for_each_entry(w, &wlist->watchers, list_node) {
    450		struct watch_queue *wq = rcu_access_pointer(w->queue);
    451		if (wqueue == wq && watch->id == w->id)
    452			return -EBUSY;
    453	}
    454
    455	watch->cred = get_current_cred();
    456	rcu_assign_pointer(watch->watch_list, wlist);
    457
    458	if (atomic_inc_return(&watch->cred->user->nr_watches) >
    459	    task_rlimit(current, RLIMIT_NOFILE)) {
    460		atomic_dec(&watch->cred->user->nr_watches);
    461		put_cred(watch->cred);
    462		return -EAGAIN;
    463	}
    464
    465	spin_lock_bh(&wqueue->lock);
    466	kref_get(&wqueue->usage);
    467	kref_get(&watch->usage);
    468	hlist_add_head(&watch->queue_node, &wqueue->watches);
    469	spin_unlock_bh(&wqueue->lock);
    470
    471	hlist_add_head(&watch->list_node, &wlist->watchers);
    472	return 0;
    473}
    474EXPORT_SYMBOL(add_watch_to_object);
    475
    476/**
    477 * remove_watch_from_object - Remove a watch or all watches from an object.
    478 * @wlist: The watch list to remove from
    479 * @wq: The watch queue of interest (ignored if @all is true)
    480 * @id: The ID of the watch to remove (ignored if @all is true)
    481 * @all: True to remove all objects
    482 *
    483 * Remove a specific watch or all watches from an object.  A notification is
    484 * sent to the watcher to tell them that this happened.
    485 */
    486int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
    487			     u64 id, bool all)
    488{
    489	struct watch_notification_removal n;
    490	struct watch_queue *wqueue;
    491	struct watch *watch;
    492	int ret = -EBADSLT;
    493
    494	rcu_read_lock();
    495
    496again:
    497	spin_lock(&wlist->lock);
    498	hlist_for_each_entry(watch, &wlist->watchers, list_node) {
    499		if (all ||
    500		    (watch->id == id && rcu_access_pointer(watch->queue) == wq))
    501			goto found;
    502	}
    503	spin_unlock(&wlist->lock);
    504	goto out;
    505
    506found:
    507	ret = 0;
    508	hlist_del_init_rcu(&watch->list_node);
    509	rcu_assign_pointer(watch->watch_list, NULL);
    510	spin_unlock(&wlist->lock);
    511
    512	/* We now own the reference on watch that used to belong to wlist. */
    513
    514	n.watch.type = WATCH_TYPE_META;
    515	n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
    516	n.watch.info = watch->info_id | watch_sizeof(n.watch);
    517	n.id = id;
    518	if (id != 0)
    519		n.watch.info = watch->info_id | watch_sizeof(n);
    520
    521	wqueue = rcu_dereference(watch->queue);
    522
    523	/* We don't need the watch list lock for the next bit as RCU is
    524	 * protecting *wqueue from deallocation.
    525	 */
    526	if (wqueue) {
    527		post_one_notification(wqueue, &n.watch);
    528
    529		spin_lock_bh(&wqueue->lock);
    530
    531		if (!hlist_unhashed(&watch->queue_node)) {
    532			hlist_del_init_rcu(&watch->queue_node);
    533			put_watch(watch);
    534		}
    535
    536		spin_unlock_bh(&wqueue->lock);
    537	}
    538
    539	if (wlist->release_watch) {
    540		void (*release_watch)(struct watch *);
    541
    542		release_watch = wlist->release_watch;
    543		rcu_read_unlock();
    544		(*release_watch)(watch);
    545		rcu_read_lock();
    546	}
    547	put_watch(watch);
    548
    549	if (all && !hlist_empty(&wlist->watchers))
    550		goto again;
    551out:
    552	rcu_read_unlock();
    553	return ret;
    554}
    555EXPORT_SYMBOL(remove_watch_from_object);
    556
    557/*
    558 * Remove all the watches that are contributory to a queue.  This has the
    559 * potential to race with removal of the watches by the destruction of the
    560 * objects being watched or with the distribution of notifications.
    561 */
    562void watch_queue_clear(struct watch_queue *wqueue)
    563{
    564	struct watch_list *wlist;
    565	struct watch *watch;
    566	bool release;
    567
    568	rcu_read_lock();
    569	spin_lock_bh(&wqueue->lock);
    570
    571	/* Prevent new notifications from being stored. */
    572	wqueue->defunct = true;
    573
    574	while (!hlist_empty(&wqueue->watches)) {
    575		watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
    576		hlist_del_init_rcu(&watch->queue_node);
    577		/* We now own a ref on the watch. */
    578		spin_unlock_bh(&wqueue->lock);
    579
    580		/* We can't do the next bit under the queue lock as we need to
    581		 * get the list lock - which would cause a deadlock if someone
    582		 * was removing from the opposite direction at the same time or
    583		 * posting a notification.
    584		 */
    585		wlist = rcu_dereference(watch->watch_list);
    586		if (wlist) {
    587			void (*release_watch)(struct watch *);
    588
    589			spin_lock(&wlist->lock);
    590
    591			release = !hlist_unhashed(&watch->list_node);
    592			if (release) {
    593				hlist_del_init_rcu(&watch->list_node);
    594				rcu_assign_pointer(watch->watch_list, NULL);
    595
    596				/* We now own a second ref on the watch. */
    597			}
    598
    599			release_watch = wlist->release_watch;
    600			spin_unlock(&wlist->lock);
    601
    602			if (release) {
    603				if (release_watch) {
    604					rcu_read_unlock();
    605					/* This might need to call dput(), so
    606					 * we have to drop all the locks.
    607					 */
    608					(*release_watch)(watch);
    609					rcu_read_lock();
    610				}
    611				put_watch(watch);
    612			}
    613		}
    614
    615		put_watch(watch);
    616		spin_lock_bh(&wqueue->lock);
    617	}
    618
    619	spin_unlock_bh(&wqueue->lock);
    620	rcu_read_unlock();
    621}
    622
    623/**
    624 * get_watch_queue - Get a watch queue from its file descriptor.
    625 * @fd: The fd to query.
    626 */
    627struct watch_queue *get_watch_queue(int fd)
    628{
    629	struct pipe_inode_info *pipe;
    630	struct watch_queue *wqueue = ERR_PTR(-EINVAL);
    631	struct fd f;
    632
    633	f = fdget(fd);
    634	if (f.file) {
    635		pipe = get_pipe_info(f.file, false);
    636		if (pipe && pipe->watch_queue) {
    637			wqueue = pipe->watch_queue;
    638			kref_get(&wqueue->usage);
    639		}
    640		fdput(f);
    641	}
    642
    643	return wqueue;
    644}
    645EXPORT_SYMBOL(get_watch_queue);
    646
    647/*
    648 * Initialise a watch queue
    649 */
    650int watch_queue_init(struct pipe_inode_info *pipe)
    651{
    652	struct watch_queue *wqueue;
    653
    654	wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
    655	if (!wqueue)
    656		return -ENOMEM;
    657
    658	wqueue->pipe = pipe;
    659	kref_init(&wqueue->usage);
    660	spin_lock_init(&wqueue->lock);
    661	INIT_HLIST_HEAD(&wqueue->watches);
    662
    663	pipe->watch_queue = wqueue;
    664	return 0;
    665}