cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

userfaultfd.c (55772B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  fs/userfaultfd.c
      4 *
      5 *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
      6 *  Copyright (C) 2008-2009 Red Hat, Inc.
      7 *  Copyright (C) 2015  Red Hat, Inc.
      8 *
      9 *  Some part derived from fs/eventfd.c (anon inode setup) and
     10 *  mm/ksm.c (mm hashing).
     11 */
     12
     13#include <linux/list.h>
     14#include <linux/hashtable.h>
     15#include <linux/sched/signal.h>
     16#include <linux/sched/mm.h>
     17#include <linux/mm.h>
     18#include <linux/mm_inline.h>
     19#include <linux/mmu_notifier.h>
     20#include <linux/poll.h>
     21#include <linux/slab.h>
     22#include <linux/seq_file.h>
     23#include <linux/file.h>
     24#include <linux/bug.h>
     25#include <linux/anon_inodes.h>
     26#include <linux/syscalls.h>
     27#include <linux/userfaultfd_k.h>
     28#include <linux/mempolicy.h>
     29#include <linux/ioctl.h>
     30#include <linux/security.h>
     31#include <linux/hugetlb.h>
     32#include <linux/swapops.h>
     33
     34int sysctl_unprivileged_userfaultfd __read_mostly;
     35
     36static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
     37
     38/*
     39 * Start with fault_pending_wqh and fault_wqh so they're more likely
     40 * to be in the same cacheline.
     41 *
     42 * Locking order:
     43 *	fd_wqh.lock
     44 *		fault_pending_wqh.lock
     45 *			fault_wqh.lock
     46 *		event_wqh.lock
     47 *
     48 * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
     49 * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
     50 * also taken in IRQ context.
     51 */
     52struct userfaultfd_ctx {
     53	/* waitqueue head for the pending (i.e. not read) userfaults */
     54	wait_queue_head_t fault_pending_wqh;
     55	/* waitqueue head for the userfaults */
     56	wait_queue_head_t fault_wqh;
     57	/* waitqueue head for the pseudo fd to wakeup poll/read */
     58	wait_queue_head_t fd_wqh;
     59	/* waitqueue head for events */
     60	wait_queue_head_t event_wqh;
     61	/* a refile sequence protected by fault_pending_wqh lock */
     62	seqcount_spinlock_t refile_seq;
     63	/* pseudo fd refcounting */
     64	refcount_t refcount;
     65	/* userfaultfd syscall flags */
     66	unsigned int flags;
     67	/* features requested from the userspace */
     68	unsigned int features;
     69	/* released */
     70	bool released;
     71	/* memory mappings are changing because of non-cooperative event */
     72	atomic_t mmap_changing;
     73	/* mm with one ore more vmas attached to this userfaultfd_ctx */
     74	struct mm_struct *mm;
     75};
     76
     77struct userfaultfd_fork_ctx {
     78	struct userfaultfd_ctx *orig;
     79	struct userfaultfd_ctx *new;
     80	struct list_head list;
     81};
     82
     83struct userfaultfd_unmap_ctx {
     84	struct userfaultfd_ctx *ctx;
     85	unsigned long start;
     86	unsigned long end;
     87	struct list_head list;
     88};
     89
     90struct userfaultfd_wait_queue {
     91	struct uffd_msg msg;
     92	wait_queue_entry_t wq;
     93	struct userfaultfd_ctx *ctx;
     94	bool waken;
     95};
     96
     97struct userfaultfd_wake_range {
     98	unsigned long start;
     99	unsigned long len;
    100};
    101
    102/* internal indication that UFFD_API ioctl was successfully executed */
    103#define UFFD_FEATURE_INITIALIZED		(1u << 31)
    104
    105static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
    106{
    107	return ctx->features & UFFD_FEATURE_INITIALIZED;
    108}
    109
    110static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
    111				     int wake_flags, void *key)
    112{
    113	struct userfaultfd_wake_range *range = key;
    114	int ret;
    115	struct userfaultfd_wait_queue *uwq;
    116	unsigned long start, len;
    117
    118	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
    119	ret = 0;
    120	/* len == 0 means wake all */
    121	start = range->start;
    122	len = range->len;
    123	if (len && (start > uwq->msg.arg.pagefault.address ||
    124		    start + len <= uwq->msg.arg.pagefault.address))
    125		goto out;
    126	WRITE_ONCE(uwq->waken, true);
    127	/*
    128	 * The Program-Order guarantees provided by the scheduler
    129	 * ensure uwq->waken is visible before the task is woken.
    130	 */
    131	ret = wake_up_state(wq->private, mode);
    132	if (ret) {
    133		/*
    134		 * Wake only once, autoremove behavior.
    135		 *
    136		 * After the effect of list_del_init is visible to the other
    137		 * CPUs, the waitqueue may disappear from under us, see the
    138		 * !list_empty_careful() in handle_userfault().
    139		 *
    140		 * try_to_wake_up() has an implicit smp_mb(), and the
    141		 * wq->private is read before calling the extern function
    142		 * "wake_up_state" (which in turns calls try_to_wake_up).
    143		 */
    144		list_del_init(&wq->entry);
    145	}
    146out:
    147	return ret;
    148}
    149
    150/**
    151 * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
    152 * context.
    153 * @ctx: [in] Pointer to the userfaultfd context.
    154 */
    155static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
    156{
    157	refcount_inc(&ctx->refcount);
    158}
    159
    160/**
    161 * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
    162 * context.
    163 * @ctx: [in] Pointer to userfaultfd context.
    164 *
    165 * The userfaultfd context reference must have been previously acquired either
    166 * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
    167 */
    168static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
    169{
    170	if (refcount_dec_and_test(&ctx->refcount)) {
    171		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
    172		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
    173		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
    174		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
    175		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
    176		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
    177		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
    178		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
    179		mmdrop(ctx->mm);
    180		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
    181	}
    182}
    183
    184static inline void msg_init(struct uffd_msg *msg)
    185{
    186	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
    187	/*
    188	 * Must use memset to zero out the paddings or kernel data is
    189	 * leaked to userland.
    190	 */
    191	memset(msg, 0, sizeof(struct uffd_msg));
    192}
    193
    194static inline struct uffd_msg userfault_msg(unsigned long address,
    195					    unsigned int flags,
    196					    unsigned long reason,
    197					    unsigned int features)
    198{
    199	struct uffd_msg msg;
    200	msg_init(&msg);
    201	msg.event = UFFD_EVENT_PAGEFAULT;
    202
    203	if (!(features & UFFD_FEATURE_EXACT_ADDRESS))
    204		address &= PAGE_MASK;
    205	msg.arg.pagefault.address = address;
    206	/*
    207	 * These flags indicate why the userfault occurred:
    208	 * - UFFD_PAGEFAULT_FLAG_WP indicates a write protect fault.
    209	 * - UFFD_PAGEFAULT_FLAG_MINOR indicates a minor fault.
    210	 * - Neither of these flags being set indicates a MISSING fault.
    211	 *
    212	 * Separately, UFFD_PAGEFAULT_FLAG_WRITE indicates it was a write
    213	 * fault. Otherwise, it was a read fault.
    214	 */
    215	if (flags & FAULT_FLAG_WRITE)
    216		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
    217	if (reason & VM_UFFD_WP)
    218		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
    219	if (reason & VM_UFFD_MINOR)
    220		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_MINOR;
    221	if (features & UFFD_FEATURE_THREAD_ID)
    222		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
    223	return msg;
    224}
    225
    226#ifdef CONFIG_HUGETLB_PAGE
    227/*
    228 * Same functionality as userfaultfd_must_wait below with modifications for
    229 * hugepmd ranges.
    230 */
    231static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
    232					 struct vm_area_struct *vma,
    233					 unsigned long address,
    234					 unsigned long flags,
    235					 unsigned long reason)
    236{
    237	struct mm_struct *mm = ctx->mm;
    238	pte_t *ptep, pte;
    239	bool ret = true;
    240
    241	mmap_assert_locked(mm);
    242
    243	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
    244
    245	if (!ptep)
    246		goto out;
    247
    248	ret = false;
    249	pte = huge_ptep_get(ptep);
    250
    251	/*
    252	 * Lockless access: we're in a wait_event so it's ok if it
    253	 * changes under us.  PTE markers should be handled the same as none
    254	 * ptes here.
    255	 */
    256	if (huge_pte_none_mostly(pte))
    257		ret = true;
    258	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
    259		ret = true;
    260out:
    261	return ret;
    262}
    263#else
    264static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
    265					 struct vm_area_struct *vma,
    266					 unsigned long address,
    267					 unsigned long flags,
    268					 unsigned long reason)
    269{
    270	return false;	/* should never get here */
    271}
    272#endif /* CONFIG_HUGETLB_PAGE */
    273
    274/*
    275 * Verify the pagetables are still not ok after having reigstered into
    276 * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
    277 * userfault that has already been resolved, if userfaultfd_read and
    278 * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
    279 * threads.
    280 */
    281static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
    282					 unsigned long address,
    283					 unsigned long flags,
    284					 unsigned long reason)
    285{
    286	struct mm_struct *mm = ctx->mm;
    287	pgd_t *pgd;
    288	p4d_t *p4d;
    289	pud_t *pud;
    290	pmd_t *pmd, _pmd;
    291	pte_t *pte;
    292	bool ret = true;
    293
    294	mmap_assert_locked(mm);
    295
    296	pgd = pgd_offset(mm, address);
    297	if (!pgd_present(*pgd))
    298		goto out;
    299	p4d = p4d_offset(pgd, address);
    300	if (!p4d_present(*p4d))
    301		goto out;
    302	pud = pud_offset(p4d, address);
    303	if (!pud_present(*pud))
    304		goto out;
    305	pmd = pmd_offset(pud, address);
    306	/*
    307	 * READ_ONCE must function as a barrier with narrower scope
    308	 * and it must be equivalent to:
    309	 *	_pmd = *pmd; barrier();
    310	 *
    311	 * This is to deal with the instability (as in
    312	 * pmd_trans_unstable) of the pmd.
    313	 */
    314	_pmd = READ_ONCE(*pmd);
    315	if (pmd_none(_pmd))
    316		goto out;
    317
    318	ret = false;
    319	if (!pmd_present(_pmd))
    320		goto out;
    321
    322	if (pmd_trans_huge(_pmd)) {
    323		if (!pmd_write(_pmd) && (reason & VM_UFFD_WP))
    324			ret = true;
    325		goto out;
    326	}
    327
    328	/*
    329	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
    330	 * and use the standard pte_offset_map() instead of parsing _pmd.
    331	 */
    332	pte = pte_offset_map(pmd, address);
    333	/*
    334	 * Lockless access: we're in a wait_event so it's ok if it
    335	 * changes under us.  PTE markers should be handled the same as none
    336	 * ptes here.
    337	 */
    338	if (pte_none_mostly(*pte))
    339		ret = true;
    340	if (!pte_write(*pte) && (reason & VM_UFFD_WP))
    341		ret = true;
    342	pte_unmap(pte);
    343
    344out:
    345	return ret;
    346}
    347
    348static inline unsigned int userfaultfd_get_blocking_state(unsigned int flags)
    349{
    350	if (flags & FAULT_FLAG_INTERRUPTIBLE)
    351		return TASK_INTERRUPTIBLE;
    352
    353	if (flags & FAULT_FLAG_KILLABLE)
    354		return TASK_KILLABLE;
    355
    356	return TASK_UNINTERRUPTIBLE;
    357}
    358
    359/*
    360 * The locking rules involved in returning VM_FAULT_RETRY depending on
    361 * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
    362 * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
    363 * recommendation in __lock_page_or_retry is not an understatement.
    364 *
    365 * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
    366 * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
    367 * not set.
    368 *
    369 * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
    370 * set, VM_FAULT_RETRY can still be returned if and only if there are
    371 * fatal_signal_pending()s, and the mmap_lock must be released before
    372 * returning it.
    373 */
    374vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
    375{
    376	struct mm_struct *mm = vmf->vma->vm_mm;
    377	struct userfaultfd_ctx *ctx;
    378	struct userfaultfd_wait_queue uwq;
    379	vm_fault_t ret = VM_FAULT_SIGBUS;
    380	bool must_wait;
    381	unsigned int blocking_state;
    382
    383	/*
    384	 * We don't do userfault handling for the final child pid update.
    385	 *
    386	 * We also don't do userfault handling during
    387	 * coredumping. hugetlbfs has the special
    388	 * follow_hugetlb_page() to skip missing pages in the
    389	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
    390	 * the no_page_table() helper in follow_page_mask(), but the
    391	 * shmem_vm_ops->fault method is invoked even during
    392	 * coredumping without mmap_lock and it ends up here.
    393	 */
    394	if (current->flags & (PF_EXITING|PF_DUMPCORE))
    395		goto out;
    396
    397	/*
    398	 * Coredumping runs without mmap_lock so we can only check that
    399	 * the mmap_lock is held, if PF_DUMPCORE was not set.
    400	 */
    401	mmap_assert_locked(mm);
    402
    403	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
    404	if (!ctx)
    405		goto out;
    406
    407	BUG_ON(ctx->mm != mm);
    408
    409	/* Any unrecognized flag is a bug. */
    410	VM_BUG_ON(reason & ~__VM_UFFD_FLAGS);
    411	/* 0 or > 1 flags set is a bug; we expect exactly 1. */
    412	VM_BUG_ON(!reason || (reason & (reason - 1)));
    413
    414	if (ctx->features & UFFD_FEATURE_SIGBUS)
    415		goto out;
    416	if ((vmf->flags & FAULT_FLAG_USER) == 0 &&
    417	    ctx->flags & UFFD_USER_MODE_ONLY) {
    418		printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
    419			"sysctl knob to 1 if kernel faults must be handled "
    420			"without obtaining CAP_SYS_PTRACE capability\n");
    421		goto out;
    422	}
    423
    424	/*
    425	 * If it's already released don't get it. This avoids to loop
    426	 * in __get_user_pages if userfaultfd_release waits on the
    427	 * caller of handle_userfault to release the mmap_lock.
    428	 */
    429	if (unlikely(READ_ONCE(ctx->released))) {
    430		/*
    431		 * Don't return VM_FAULT_SIGBUS in this case, so a non
    432		 * cooperative manager can close the uffd after the
    433		 * last UFFDIO_COPY, without risking to trigger an
    434		 * involuntary SIGBUS if the process was starting the
    435		 * userfaultfd while the userfaultfd was still armed
    436		 * (but after the last UFFDIO_COPY). If the uffd
    437		 * wasn't already closed when the userfault reached
    438		 * this point, that would normally be solved by
    439		 * userfaultfd_must_wait returning 'false'.
    440		 *
    441		 * If we were to return VM_FAULT_SIGBUS here, the non
    442		 * cooperative manager would be instead forced to
    443		 * always call UFFDIO_UNREGISTER before it can safely
    444		 * close the uffd.
    445		 */
    446		ret = VM_FAULT_NOPAGE;
    447		goto out;
    448	}
    449
    450	/*
    451	 * Check that we can return VM_FAULT_RETRY.
    452	 *
    453	 * NOTE: it should become possible to return VM_FAULT_RETRY
    454	 * even if FAULT_FLAG_TRIED is set without leading to gup()
    455	 * -EBUSY failures, if the userfaultfd is to be extended for
    456	 * VM_UFFD_WP tracking and we intend to arm the userfault
    457	 * without first stopping userland access to the memory. For
    458	 * VM_UFFD_MISSING userfaults this is enough for now.
    459	 */
    460	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
    461		/*
    462		 * Validate the invariant that nowait must allow retry
    463		 * to be sure not to return SIGBUS erroneously on
    464		 * nowait invocations.
    465		 */
    466		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
    467#ifdef CONFIG_DEBUG_VM
    468		if (printk_ratelimit()) {
    469			printk(KERN_WARNING
    470			       "FAULT_FLAG_ALLOW_RETRY missing %x\n",
    471			       vmf->flags);
    472			dump_stack();
    473		}
    474#endif
    475		goto out;
    476	}
    477
    478	/*
    479	 * Handle nowait, not much to do other than tell it to retry
    480	 * and wait.
    481	 */
    482	ret = VM_FAULT_RETRY;
    483	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
    484		goto out;
    485
    486	/* take the reference before dropping the mmap_lock */
    487	userfaultfd_ctx_get(ctx);
    488
    489	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
    490	uwq.wq.private = current;
    491	uwq.msg = userfault_msg(vmf->real_address, vmf->flags, reason,
    492			ctx->features);
    493	uwq.ctx = ctx;
    494	uwq.waken = false;
    495
    496	blocking_state = userfaultfd_get_blocking_state(vmf->flags);
    497
    498	spin_lock_irq(&ctx->fault_pending_wqh.lock);
    499	/*
    500	 * After the __add_wait_queue the uwq is visible to userland
    501	 * through poll/read().
    502	 */
    503	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
    504	/*
    505	 * The smp_mb() after __set_current_state prevents the reads
    506	 * following the spin_unlock to happen before the list_add in
    507	 * __add_wait_queue.
    508	 */
    509	set_current_state(blocking_state);
    510	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
    511
    512	if (!is_vm_hugetlb_page(vmf->vma))
    513		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
    514						  reason);
    515	else
    516		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
    517						       vmf->address,
    518						       vmf->flags, reason);
    519	mmap_read_unlock(mm);
    520
    521	if (likely(must_wait && !READ_ONCE(ctx->released))) {
    522		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
    523		schedule();
    524	}
    525
    526	__set_current_state(TASK_RUNNING);
    527
    528	/*
    529	 * Here we race with the list_del; list_add in
    530	 * userfaultfd_ctx_read(), however because we don't ever run
    531	 * list_del_init() to refile across the two lists, the prev
    532	 * and next pointers will never point to self. list_add also
    533	 * would never let any of the two pointers to point to
    534	 * self. So list_empty_careful won't risk to see both pointers
    535	 * pointing to self at any time during the list refile. The
    536	 * only case where list_del_init() is called is the full
    537	 * removal in the wake function and there we don't re-list_add
    538	 * and it's fine not to block on the spinlock. The uwq on this
    539	 * kernel stack can be released after the list_del_init.
    540	 */
    541	if (!list_empty_careful(&uwq.wq.entry)) {
    542		spin_lock_irq(&ctx->fault_pending_wqh.lock);
    543		/*
    544		 * No need of list_del_init(), the uwq on the stack
    545		 * will be freed shortly anyway.
    546		 */
    547		list_del(&uwq.wq.entry);
    548		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
    549	}
    550
    551	/*
    552	 * ctx may go away after this if the userfault pseudo fd is
    553	 * already released.
    554	 */
    555	userfaultfd_ctx_put(ctx);
    556
    557out:
    558	return ret;
    559}
    560
    561static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
    562					      struct userfaultfd_wait_queue *ewq)
    563{
    564	struct userfaultfd_ctx *release_new_ctx;
    565
    566	if (WARN_ON_ONCE(current->flags & PF_EXITING))
    567		goto out;
    568
    569	ewq->ctx = ctx;
    570	init_waitqueue_entry(&ewq->wq, current);
    571	release_new_ctx = NULL;
    572
    573	spin_lock_irq(&ctx->event_wqh.lock);
    574	/*
    575	 * After the __add_wait_queue the uwq is visible to userland
    576	 * through poll/read().
    577	 */
    578	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
    579	for (;;) {
    580		set_current_state(TASK_KILLABLE);
    581		if (ewq->msg.event == 0)
    582			break;
    583		if (READ_ONCE(ctx->released) ||
    584		    fatal_signal_pending(current)) {
    585			/*
    586			 * &ewq->wq may be queued in fork_event, but
    587			 * __remove_wait_queue ignores the head
    588			 * parameter. It would be a problem if it
    589			 * didn't.
    590			 */
    591			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
    592			if (ewq->msg.event == UFFD_EVENT_FORK) {
    593				struct userfaultfd_ctx *new;
    594
    595				new = (struct userfaultfd_ctx *)
    596					(unsigned long)
    597					ewq->msg.arg.reserved.reserved1;
    598				release_new_ctx = new;
    599			}
    600			break;
    601		}
    602
    603		spin_unlock_irq(&ctx->event_wqh.lock);
    604
    605		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
    606		schedule();
    607
    608		spin_lock_irq(&ctx->event_wqh.lock);
    609	}
    610	__set_current_state(TASK_RUNNING);
    611	spin_unlock_irq(&ctx->event_wqh.lock);
    612
    613	if (release_new_ctx) {
    614		struct vm_area_struct *vma;
    615		struct mm_struct *mm = release_new_ctx->mm;
    616
    617		/* the various vma->vm_userfaultfd_ctx still points to it */
    618		mmap_write_lock(mm);
    619		for (vma = mm->mmap; vma; vma = vma->vm_next)
    620			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
    621				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
    622				vma->vm_flags &= ~__VM_UFFD_FLAGS;
    623			}
    624		mmap_write_unlock(mm);
    625
    626		userfaultfd_ctx_put(release_new_ctx);
    627	}
    628
    629	/*
    630	 * ctx may go away after this if the userfault pseudo fd is
    631	 * already released.
    632	 */
    633out:
    634	atomic_dec(&ctx->mmap_changing);
    635	VM_BUG_ON(atomic_read(&ctx->mmap_changing) < 0);
    636	userfaultfd_ctx_put(ctx);
    637}
    638
    639static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
    640				       struct userfaultfd_wait_queue *ewq)
    641{
    642	ewq->msg.event = 0;
    643	wake_up_locked(&ctx->event_wqh);
    644	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
    645}
    646
    647int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
    648{
    649	struct userfaultfd_ctx *ctx = NULL, *octx;
    650	struct userfaultfd_fork_ctx *fctx;
    651
    652	octx = vma->vm_userfaultfd_ctx.ctx;
    653	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
    654		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
    655		vma->vm_flags &= ~__VM_UFFD_FLAGS;
    656		return 0;
    657	}
    658
    659	list_for_each_entry(fctx, fcs, list)
    660		if (fctx->orig == octx) {
    661			ctx = fctx->new;
    662			break;
    663		}
    664
    665	if (!ctx) {
    666		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
    667		if (!fctx)
    668			return -ENOMEM;
    669
    670		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
    671		if (!ctx) {
    672			kfree(fctx);
    673			return -ENOMEM;
    674		}
    675
    676		refcount_set(&ctx->refcount, 1);
    677		ctx->flags = octx->flags;
    678		ctx->features = octx->features;
    679		ctx->released = false;
    680		atomic_set(&ctx->mmap_changing, 0);
    681		ctx->mm = vma->vm_mm;
    682		mmgrab(ctx->mm);
    683
    684		userfaultfd_ctx_get(octx);
    685		atomic_inc(&octx->mmap_changing);
    686		fctx->orig = octx;
    687		fctx->new = ctx;
    688		list_add_tail(&fctx->list, fcs);
    689	}
    690
    691	vma->vm_userfaultfd_ctx.ctx = ctx;
    692	return 0;
    693}
    694
    695static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
    696{
    697	struct userfaultfd_ctx *ctx = fctx->orig;
    698	struct userfaultfd_wait_queue ewq;
    699
    700	msg_init(&ewq.msg);
    701
    702	ewq.msg.event = UFFD_EVENT_FORK;
    703	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
    704
    705	userfaultfd_event_wait_completion(ctx, &ewq);
    706}
    707
    708void dup_userfaultfd_complete(struct list_head *fcs)
    709{
    710	struct userfaultfd_fork_ctx *fctx, *n;
    711
    712	list_for_each_entry_safe(fctx, n, fcs, list) {
    713		dup_fctx(fctx);
    714		list_del(&fctx->list);
    715		kfree(fctx);
    716	}
    717}
    718
    719void mremap_userfaultfd_prep(struct vm_area_struct *vma,
    720			     struct vm_userfaultfd_ctx *vm_ctx)
    721{
    722	struct userfaultfd_ctx *ctx;
    723
    724	ctx = vma->vm_userfaultfd_ctx.ctx;
    725
    726	if (!ctx)
    727		return;
    728
    729	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
    730		vm_ctx->ctx = ctx;
    731		userfaultfd_ctx_get(ctx);
    732		atomic_inc(&ctx->mmap_changing);
    733	} else {
    734		/* Drop uffd context if remap feature not enabled */
    735		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
    736		vma->vm_flags &= ~__VM_UFFD_FLAGS;
    737	}
    738}
    739
    740void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
    741				 unsigned long from, unsigned long to,
    742				 unsigned long len)
    743{
    744	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
    745	struct userfaultfd_wait_queue ewq;
    746
    747	if (!ctx)
    748		return;
    749
    750	if (to & ~PAGE_MASK) {
    751		userfaultfd_ctx_put(ctx);
    752		return;
    753	}
    754
    755	msg_init(&ewq.msg);
    756
    757	ewq.msg.event = UFFD_EVENT_REMAP;
    758	ewq.msg.arg.remap.from = from;
    759	ewq.msg.arg.remap.to = to;
    760	ewq.msg.arg.remap.len = len;
    761
    762	userfaultfd_event_wait_completion(ctx, &ewq);
    763}
    764
    765bool userfaultfd_remove(struct vm_area_struct *vma,
    766			unsigned long start, unsigned long end)
    767{
    768	struct mm_struct *mm = vma->vm_mm;
    769	struct userfaultfd_ctx *ctx;
    770	struct userfaultfd_wait_queue ewq;
    771
    772	ctx = vma->vm_userfaultfd_ctx.ctx;
    773	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
    774		return true;
    775
    776	userfaultfd_ctx_get(ctx);
    777	atomic_inc(&ctx->mmap_changing);
    778	mmap_read_unlock(mm);
    779
    780	msg_init(&ewq.msg);
    781
    782	ewq.msg.event = UFFD_EVENT_REMOVE;
    783	ewq.msg.arg.remove.start = start;
    784	ewq.msg.arg.remove.end = end;
    785
    786	userfaultfd_event_wait_completion(ctx, &ewq);
    787
    788	return false;
    789}
    790
    791static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
    792			  unsigned long start, unsigned long end)
    793{
    794	struct userfaultfd_unmap_ctx *unmap_ctx;
    795
    796	list_for_each_entry(unmap_ctx, unmaps, list)
    797		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
    798		    unmap_ctx->end == end)
    799			return true;
    800
    801	return false;
    802}
    803
    804int userfaultfd_unmap_prep(struct vm_area_struct *vma,
    805			   unsigned long start, unsigned long end,
    806			   struct list_head *unmaps)
    807{
    808	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
    809		struct userfaultfd_unmap_ctx *unmap_ctx;
    810		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
    811
    812		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
    813		    has_unmap_ctx(ctx, unmaps, start, end))
    814			continue;
    815
    816		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
    817		if (!unmap_ctx)
    818			return -ENOMEM;
    819
    820		userfaultfd_ctx_get(ctx);
    821		atomic_inc(&ctx->mmap_changing);
    822		unmap_ctx->ctx = ctx;
    823		unmap_ctx->start = start;
    824		unmap_ctx->end = end;
    825		list_add_tail(&unmap_ctx->list, unmaps);
    826	}
    827
    828	return 0;
    829}
    830
    831void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
    832{
    833	struct userfaultfd_unmap_ctx *ctx, *n;
    834	struct userfaultfd_wait_queue ewq;
    835
    836	list_for_each_entry_safe(ctx, n, uf, list) {
    837		msg_init(&ewq.msg);
    838
    839		ewq.msg.event = UFFD_EVENT_UNMAP;
    840		ewq.msg.arg.remove.start = ctx->start;
    841		ewq.msg.arg.remove.end = ctx->end;
    842
    843		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
    844
    845		list_del(&ctx->list);
    846		kfree(ctx);
    847	}
    848}
    849
    850static int userfaultfd_release(struct inode *inode, struct file *file)
    851{
    852	struct userfaultfd_ctx *ctx = file->private_data;
    853	struct mm_struct *mm = ctx->mm;
    854	struct vm_area_struct *vma, *prev;
    855	/* len == 0 means wake all */
    856	struct userfaultfd_wake_range range = { .len = 0, };
    857	unsigned long new_flags;
    858
    859	WRITE_ONCE(ctx->released, true);
    860
    861	if (!mmget_not_zero(mm))
    862		goto wakeup;
    863
    864	/*
    865	 * Flush page faults out of all CPUs. NOTE: all page faults
    866	 * must be retried without returning VM_FAULT_SIGBUS if
    867	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
    868	 * changes while handle_userfault released the mmap_lock. So
    869	 * it's critical that released is set to true (above), before
    870	 * taking the mmap_lock for writing.
    871	 */
    872	mmap_write_lock(mm);
    873	prev = NULL;
    874	for (vma = mm->mmap; vma; vma = vma->vm_next) {
    875		cond_resched();
    876		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
    877		       !!(vma->vm_flags & __VM_UFFD_FLAGS));
    878		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
    879			prev = vma;
    880			continue;
    881		}
    882		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
    883		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
    884				 new_flags, vma->anon_vma,
    885				 vma->vm_file, vma->vm_pgoff,
    886				 vma_policy(vma),
    887				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
    888		if (prev)
    889			vma = prev;
    890		else
    891			prev = vma;
    892		vma->vm_flags = new_flags;
    893		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
    894	}
    895	mmap_write_unlock(mm);
    896	mmput(mm);
    897wakeup:
    898	/*
    899	 * After no new page faults can wait on this fault_*wqh, flush
    900	 * the last page faults that may have been already waiting on
    901	 * the fault_*wqh.
    902	 */
    903	spin_lock_irq(&ctx->fault_pending_wqh.lock);
    904	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
    905	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
    906	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
    907
    908	/* Flush pending events that may still wait on event_wqh */
    909	wake_up_all(&ctx->event_wqh);
    910
    911	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
    912	userfaultfd_ctx_put(ctx);
    913	return 0;
    914}
    915
    916/* fault_pending_wqh.lock must be hold by the caller */
    917static inline struct userfaultfd_wait_queue *find_userfault_in(
    918		wait_queue_head_t *wqh)
    919{
    920	wait_queue_entry_t *wq;
    921	struct userfaultfd_wait_queue *uwq;
    922
    923	lockdep_assert_held(&wqh->lock);
    924
    925	uwq = NULL;
    926	if (!waitqueue_active(wqh))
    927		goto out;
    928	/* walk in reverse to provide FIFO behavior to read userfaults */
    929	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
    930	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
    931out:
    932	return uwq;
    933}
    934
    935static inline struct userfaultfd_wait_queue *find_userfault(
    936		struct userfaultfd_ctx *ctx)
    937{
    938	return find_userfault_in(&ctx->fault_pending_wqh);
    939}
    940
    941static inline struct userfaultfd_wait_queue *find_userfault_evt(
    942		struct userfaultfd_ctx *ctx)
    943{
    944	return find_userfault_in(&ctx->event_wqh);
    945}
    946
    947static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
    948{
    949	struct userfaultfd_ctx *ctx = file->private_data;
    950	__poll_t ret;
    951
    952	poll_wait(file, &ctx->fd_wqh, wait);
    953
    954	if (!userfaultfd_is_initialized(ctx))
    955		return EPOLLERR;
    956
    957	/*
    958	 * poll() never guarantees that read won't block.
    959	 * userfaults can be waken before they're read().
    960	 */
    961	if (unlikely(!(file->f_flags & O_NONBLOCK)))
    962		return EPOLLERR;
    963	/*
    964	 * lockless access to see if there are pending faults
    965	 * __pollwait last action is the add_wait_queue but
    966	 * the spin_unlock would allow the waitqueue_active to
    967	 * pass above the actual list_add inside
    968	 * add_wait_queue critical section. So use a full
    969	 * memory barrier to serialize the list_add write of
    970	 * add_wait_queue() with the waitqueue_active read
    971	 * below.
    972	 */
    973	ret = 0;
    974	smp_mb();
    975	if (waitqueue_active(&ctx->fault_pending_wqh))
    976		ret = EPOLLIN;
    977	else if (waitqueue_active(&ctx->event_wqh))
    978		ret = EPOLLIN;
    979
    980	return ret;
    981}
    982
    983static const struct file_operations userfaultfd_fops;
    984
    985static int resolve_userfault_fork(struct userfaultfd_ctx *new,
    986				  struct inode *inode,
    987				  struct uffd_msg *msg)
    988{
    989	int fd;
    990
    991	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, new,
    992			O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS), inode);
    993	if (fd < 0)
    994		return fd;
    995
    996	msg->arg.reserved.reserved1 = 0;
    997	msg->arg.fork.ufd = fd;
    998	return 0;
    999}
   1000
   1001static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
   1002				    struct uffd_msg *msg, struct inode *inode)
   1003{
   1004	ssize_t ret;
   1005	DECLARE_WAITQUEUE(wait, current);
   1006	struct userfaultfd_wait_queue *uwq;
   1007	/*
   1008	 * Handling fork event requires sleeping operations, so
   1009	 * we drop the event_wqh lock, then do these ops, then
   1010	 * lock it back and wake up the waiter. While the lock is
   1011	 * dropped the ewq may go away so we keep track of it
   1012	 * carefully.
   1013	 */
   1014	LIST_HEAD(fork_event);
   1015	struct userfaultfd_ctx *fork_nctx = NULL;
   1016
   1017	/* always take the fd_wqh lock before the fault_pending_wqh lock */
   1018	spin_lock_irq(&ctx->fd_wqh.lock);
   1019	__add_wait_queue(&ctx->fd_wqh, &wait);
   1020	for (;;) {
   1021		set_current_state(TASK_INTERRUPTIBLE);
   1022		spin_lock(&ctx->fault_pending_wqh.lock);
   1023		uwq = find_userfault(ctx);
   1024		if (uwq) {
   1025			/*
   1026			 * Use a seqcount to repeat the lockless check
   1027			 * in wake_userfault() to avoid missing
   1028			 * wakeups because during the refile both
   1029			 * waitqueue could become empty if this is the
   1030			 * only userfault.
   1031			 */
   1032			write_seqcount_begin(&ctx->refile_seq);
   1033
   1034			/*
   1035			 * The fault_pending_wqh.lock prevents the uwq
   1036			 * to disappear from under us.
   1037			 *
   1038			 * Refile this userfault from
   1039			 * fault_pending_wqh to fault_wqh, it's not
   1040			 * pending anymore after we read it.
   1041			 *
   1042			 * Use list_del() by hand (as
   1043			 * userfaultfd_wake_function also uses
   1044			 * list_del_init() by hand) to be sure nobody
   1045			 * changes __remove_wait_queue() to use
   1046			 * list_del_init() in turn breaking the
   1047			 * !list_empty_careful() check in
   1048			 * handle_userfault(). The uwq->wq.head list
   1049			 * must never be empty at any time during the
   1050			 * refile, or the waitqueue could disappear
   1051			 * from under us. The "wait_queue_head_t"
   1052			 * parameter of __remove_wait_queue() is unused
   1053			 * anyway.
   1054			 */
   1055			list_del(&uwq->wq.entry);
   1056			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
   1057
   1058			write_seqcount_end(&ctx->refile_seq);
   1059
   1060			/* careful to always initialize msg if ret == 0 */
   1061			*msg = uwq->msg;
   1062			spin_unlock(&ctx->fault_pending_wqh.lock);
   1063			ret = 0;
   1064			break;
   1065		}
   1066		spin_unlock(&ctx->fault_pending_wqh.lock);
   1067
   1068		spin_lock(&ctx->event_wqh.lock);
   1069		uwq = find_userfault_evt(ctx);
   1070		if (uwq) {
   1071			*msg = uwq->msg;
   1072
   1073			if (uwq->msg.event == UFFD_EVENT_FORK) {
   1074				fork_nctx = (struct userfaultfd_ctx *)
   1075					(unsigned long)
   1076					uwq->msg.arg.reserved.reserved1;
   1077				list_move(&uwq->wq.entry, &fork_event);
   1078				/*
   1079				 * fork_nctx can be freed as soon as
   1080				 * we drop the lock, unless we take a
   1081				 * reference on it.
   1082				 */
   1083				userfaultfd_ctx_get(fork_nctx);
   1084				spin_unlock(&ctx->event_wqh.lock);
   1085				ret = 0;
   1086				break;
   1087			}
   1088
   1089			userfaultfd_event_complete(ctx, uwq);
   1090			spin_unlock(&ctx->event_wqh.lock);
   1091			ret = 0;
   1092			break;
   1093		}
   1094		spin_unlock(&ctx->event_wqh.lock);
   1095
   1096		if (signal_pending(current)) {
   1097			ret = -ERESTARTSYS;
   1098			break;
   1099		}
   1100		if (no_wait) {
   1101			ret = -EAGAIN;
   1102			break;
   1103		}
   1104		spin_unlock_irq(&ctx->fd_wqh.lock);
   1105		schedule();
   1106		spin_lock_irq(&ctx->fd_wqh.lock);
   1107	}
   1108	__remove_wait_queue(&ctx->fd_wqh, &wait);
   1109	__set_current_state(TASK_RUNNING);
   1110	spin_unlock_irq(&ctx->fd_wqh.lock);
   1111
   1112	if (!ret && msg->event == UFFD_EVENT_FORK) {
   1113		ret = resolve_userfault_fork(fork_nctx, inode, msg);
   1114		spin_lock_irq(&ctx->event_wqh.lock);
   1115		if (!list_empty(&fork_event)) {
   1116			/*
   1117			 * The fork thread didn't abort, so we can
   1118			 * drop the temporary refcount.
   1119			 */
   1120			userfaultfd_ctx_put(fork_nctx);
   1121
   1122			uwq = list_first_entry(&fork_event,
   1123					       typeof(*uwq),
   1124					       wq.entry);
   1125			/*
   1126			 * If fork_event list wasn't empty and in turn
   1127			 * the event wasn't already released by fork
   1128			 * (the event is allocated on fork kernel
   1129			 * stack), put the event back to its place in
   1130			 * the event_wq. fork_event head will be freed
   1131			 * as soon as we return so the event cannot
   1132			 * stay queued there no matter the current
   1133			 * "ret" value.
   1134			 */
   1135			list_del(&uwq->wq.entry);
   1136			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
   1137
   1138			/*
   1139			 * Leave the event in the waitqueue and report
   1140			 * error to userland if we failed to resolve
   1141			 * the userfault fork.
   1142			 */
   1143			if (likely(!ret))
   1144				userfaultfd_event_complete(ctx, uwq);
   1145		} else {
   1146			/*
   1147			 * Here the fork thread aborted and the
   1148			 * refcount from the fork thread on fork_nctx
   1149			 * has already been released. We still hold
   1150			 * the reference we took before releasing the
   1151			 * lock above. If resolve_userfault_fork
   1152			 * failed we've to drop it because the
   1153			 * fork_nctx has to be freed in such case. If
   1154			 * it succeeded we'll hold it because the new
   1155			 * uffd references it.
   1156			 */
   1157			if (ret)
   1158				userfaultfd_ctx_put(fork_nctx);
   1159		}
   1160		spin_unlock_irq(&ctx->event_wqh.lock);
   1161	}
   1162
   1163	return ret;
   1164}
   1165
   1166static ssize_t userfaultfd_read(struct file *file, char __user *buf,
   1167				size_t count, loff_t *ppos)
   1168{
   1169	struct userfaultfd_ctx *ctx = file->private_data;
   1170	ssize_t _ret, ret = 0;
   1171	struct uffd_msg msg;
   1172	int no_wait = file->f_flags & O_NONBLOCK;
   1173	struct inode *inode = file_inode(file);
   1174
   1175	if (!userfaultfd_is_initialized(ctx))
   1176		return -EINVAL;
   1177
   1178	for (;;) {
   1179		if (count < sizeof(msg))
   1180			return ret ? ret : -EINVAL;
   1181		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
   1182		if (_ret < 0)
   1183			return ret ? ret : _ret;
   1184		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
   1185			return ret ? ret : -EFAULT;
   1186		ret += sizeof(msg);
   1187		buf += sizeof(msg);
   1188		count -= sizeof(msg);
   1189		/*
   1190		 * Allow to read more than one fault at time but only
   1191		 * block if waiting for the very first one.
   1192		 */
   1193		no_wait = O_NONBLOCK;
   1194	}
   1195}
   1196
   1197static void __wake_userfault(struct userfaultfd_ctx *ctx,
   1198			     struct userfaultfd_wake_range *range)
   1199{
   1200	spin_lock_irq(&ctx->fault_pending_wqh.lock);
   1201	/* wake all in the range and autoremove */
   1202	if (waitqueue_active(&ctx->fault_pending_wqh))
   1203		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
   1204				     range);
   1205	if (waitqueue_active(&ctx->fault_wqh))
   1206		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
   1207	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
   1208}
   1209
   1210static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
   1211					   struct userfaultfd_wake_range *range)
   1212{
   1213	unsigned seq;
   1214	bool need_wakeup;
   1215
   1216	/*
   1217	 * To be sure waitqueue_active() is not reordered by the CPU
   1218	 * before the pagetable update, use an explicit SMP memory
   1219	 * barrier here. PT lock release or mmap_read_unlock(mm) still
   1220	 * have release semantics that can allow the
   1221	 * waitqueue_active() to be reordered before the pte update.
   1222	 */
   1223	smp_mb();
   1224
   1225	/*
   1226	 * Use waitqueue_active because it's very frequent to
   1227	 * change the address space atomically even if there are no
   1228	 * userfaults yet. So we take the spinlock only when we're
   1229	 * sure we've userfaults to wake.
   1230	 */
   1231	do {
   1232		seq = read_seqcount_begin(&ctx->refile_seq);
   1233		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
   1234			waitqueue_active(&ctx->fault_wqh);
   1235		cond_resched();
   1236	} while (read_seqcount_retry(&ctx->refile_seq, seq));
   1237	if (need_wakeup)
   1238		__wake_userfault(ctx, range);
   1239}
   1240
   1241static __always_inline int validate_range(struct mm_struct *mm,
   1242					  __u64 start, __u64 len)
   1243{
   1244	__u64 task_size = mm->task_size;
   1245
   1246	if (start & ~PAGE_MASK)
   1247		return -EINVAL;
   1248	if (len & ~PAGE_MASK)
   1249		return -EINVAL;
   1250	if (!len)
   1251		return -EINVAL;
   1252	if (start < mmap_min_addr)
   1253		return -EINVAL;
   1254	if (start >= task_size)
   1255		return -EINVAL;
   1256	if (len > task_size - start)
   1257		return -EINVAL;
   1258	return 0;
   1259}
   1260
   1261static int userfaultfd_register(struct userfaultfd_ctx *ctx,
   1262				unsigned long arg)
   1263{
   1264	struct mm_struct *mm = ctx->mm;
   1265	struct vm_area_struct *vma, *prev, *cur;
   1266	int ret;
   1267	struct uffdio_register uffdio_register;
   1268	struct uffdio_register __user *user_uffdio_register;
   1269	unsigned long vm_flags, new_flags;
   1270	bool found;
   1271	bool basic_ioctls;
   1272	unsigned long start, end, vma_end;
   1273
   1274	user_uffdio_register = (struct uffdio_register __user *) arg;
   1275
   1276	ret = -EFAULT;
   1277	if (copy_from_user(&uffdio_register, user_uffdio_register,
   1278			   sizeof(uffdio_register)-sizeof(__u64)))
   1279		goto out;
   1280
   1281	ret = -EINVAL;
   1282	if (!uffdio_register.mode)
   1283		goto out;
   1284	if (uffdio_register.mode & ~UFFD_API_REGISTER_MODES)
   1285		goto out;
   1286	vm_flags = 0;
   1287	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
   1288		vm_flags |= VM_UFFD_MISSING;
   1289	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
   1290#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
   1291		goto out;
   1292#endif
   1293		vm_flags |= VM_UFFD_WP;
   1294	}
   1295	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR) {
   1296#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
   1297		goto out;
   1298#endif
   1299		vm_flags |= VM_UFFD_MINOR;
   1300	}
   1301
   1302	ret = validate_range(mm, uffdio_register.range.start,
   1303			     uffdio_register.range.len);
   1304	if (ret)
   1305		goto out;
   1306
   1307	start = uffdio_register.range.start;
   1308	end = start + uffdio_register.range.len;
   1309
   1310	ret = -ENOMEM;
   1311	if (!mmget_not_zero(mm))
   1312		goto out;
   1313
   1314	mmap_write_lock(mm);
   1315	vma = find_vma_prev(mm, start, &prev);
   1316	if (!vma)
   1317		goto out_unlock;
   1318
   1319	/* check that there's at least one vma in the range */
   1320	ret = -EINVAL;
   1321	if (vma->vm_start >= end)
   1322		goto out_unlock;
   1323
   1324	/*
   1325	 * If the first vma contains huge pages, make sure start address
   1326	 * is aligned to huge page size.
   1327	 */
   1328	if (is_vm_hugetlb_page(vma)) {
   1329		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
   1330
   1331		if (start & (vma_hpagesize - 1))
   1332			goto out_unlock;
   1333	}
   1334
   1335	/*
   1336	 * Search for not compatible vmas.
   1337	 */
   1338	found = false;
   1339	basic_ioctls = false;
   1340	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
   1341		cond_resched();
   1342
   1343		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
   1344		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
   1345
   1346		/* check not compatible vmas */
   1347		ret = -EINVAL;
   1348		if (!vma_can_userfault(cur, vm_flags))
   1349			goto out_unlock;
   1350
   1351		/*
   1352		 * UFFDIO_COPY will fill file holes even without
   1353		 * PROT_WRITE. This check enforces that if this is a
   1354		 * MAP_SHARED, the process has write permission to the backing
   1355		 * file. If VM_MAYWRITE is set it also enforces that on a
   1356		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
   1357		 * F_WRITE_SEAL can be taken until the vma is destroyed.
   1358		 */
   1359		ret = -EPERM;
   1360		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
   1361			goto out_unlock;
   1362
   1363		/*
   1364		 * If this vma contains ending address, and huge pages
   1365		 * check alignment.
   1366		 */
   1367		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
   1368		    end > cur->vm_start) {
   1369			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
   1370
   1371			ret = -EINVAL;
   1372
   1373			if (end & (vma_hpagesize - 1))
   1374				goto out_unlock;
   1375		}
   1376		if ((vm_flags & VM_UFFD_WP) && !(cur->vm_flags & VM_MAYWRITE))
   1377			goto out_unlock;
   1378
   1379		/*
   1380		 * Check that this vma isn't already owned by a
   1381		 * different userfaultfd. We can't allow more than one
   1382		 * userfaultfd to own a single vma simultaneously or we
   1383		 * wouldn't know which one to deliver the userfaults to.
   1384		 */
   1385		ret = -EBUSY;
   1386		if (cur->vm_userfaultfd_ctx.ctx &&
   1387		    cur->vm_userfaultfd_ctx.ctx != ctx)
   1388			goto out_unlock;
   1389
   1390		/*
   1391		 * Note vmas containing huge pages
   1392		 */
   1393		if (is_vm_hugetlb_page(cur))
   1394			basic_ioctls = true;
   1395
   1396		found = true;
   1397	}
   1398	BUG_ON(!found);
   1399
   1400	if (vma->vm_start < start)
   1401		prev = vma;
   1402
   1403	ret = 0;
   1404	do {
   1405		cond_resched();
   1406
   1407		BUG_ON(!vma_can_userfault(vma, vm_flags));
   1408		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
   1409		       vma->vm_userfaultfd_ctx.ctx != ctx);
   1410		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
   1411
   1412		/*
   1413		 * Nothing to do: this vma is already registered into this
   1414		 * userfaultfd and with the right tracking mode too.
   1415		 */
   1416		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
   1417		    (vma->vm_flags & vm_flags) == vm_flags)
   1418			goto skip;
   1419
   1420		if (vma->vm_start > start)
   1421			start = vma->vm_start;
   1422		vma_end = min(end, vma->vm_end);
   1423
   1424		new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
   1425		prev = vma_merge(mm, prev, start, vma_end, new_flags,
   1426				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
   1427				 vma_policy(vma),
   1428				 ((struct vm_userfaultfd_ctx){ ctx }),
   1429				 anon_vma_name(vma));
   1430		if (prev) {
   1431			vma = prev;
   1432			goto next;
   1433		}
   1434		if (vma->vm_start < start) {
   1435			ret = split_vma(mm, vma, start, 1);
   1436			if (ret)
   1437				break;
   1438		}
   1439		if (vma->vm_end > end) {
   1440			ret = split_vma(mm, vma, end, 0);
   1441			if (ret)
   1442				break;
   1443		}
   1444	next:
   1445		/*
   1446		 * In the vma_merge() successful mprotect-like case 8:
   1447		 * the next vma was merged into the current one and
   1448		 * the current one has not been updated yet.
   1449		 */
   1450		vma->vm_flags = new_flags;
   1451		vma->vm_userfaultfd_ctx.ctx = ctx;
   1452
   1453		if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
   1454			hugetlb_unshare_all_pmds(vma);
   1455
   1456	skip:
   1457		prev = vma;
   1458		start = vma->vm_end;
   1459		vma = vma->vm_next;
   1460	} while (vma && vma->vm_start < end);
   1461out_unlock:
   1462	mmap_write_unlock(mm);
   1463	mmput(mm);
   1464	if (!ret) {
   1465		__u64 ioctls_out;
   1466
   1467		ioctls_out = basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
   1468		    UFFD_API_RANGE_IOCTLS;
   1469
   1470		/*
   1471		 * Declare the WP ioctl only if the WP mode is
   1472		 * specified and all checks passed with the range
   1473		 */
   1474		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_WP))
   1475			ioctls_out &= ~((__u64)1 << _UFFDIO_WRITEPROTECT);
   1476
   1477		/* CONTINUE ioctl is only supported for MINOR ranges. */
   1478		if (!(uffdio_register.mode & UFFDIO_REGISTER_MODE_MINOR))
   1479			ioctls_out &= ~((__u64)1 << _UFFDIO_CONTINUE);
   1480
   1481		/*
   1482		 * Now that we scanned all vmas we can already tell
   1483		 * userland which ioctls methods are guaranteed to
   1484		 * succeed on this range.
   1485		 */
   1486		if (put_user(ioctls_out, &user_uffdio_register->ioctls))
   1487			ret = -EFAULT;
   1488	}
   1489out:
   1490	return ret;
   1491}
   1492
   1493static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
   1494				  unsigned long arg)
   1495{
   1496	struct mm_struct *mm = ctx->mm;
   1497	struct vm_area_struct *vma, *prev, *cur;
   1498	int ret;
   1499	struct uffdio_range uffdio_unregister;
   1500	unsigned long new_flags;
   1501	bool found;
   1502	unsigned long start, end, vma_end;
   1503	const void __user *buf = (void __user *)arg;
   1504
   1505	ret = -EFAULT;
   1506	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
   1507		goto out;
   1508
   1509	ret = validate_range(mm, uffdio_unregister.start,
   1510			     uffdio_unregister.len);
   1511	if (ret)
   1512		goto out;
   1513
   1514	start = uffdio_unregister.start;
   1515	end = start + uffdio_unregister.len;
   1516
   1517	ret = -ENOMEM;
   1518	if (!mmget_not_zero(mm))
   1519		goto out;
   1520
   1521	mmap_write_lock(mm);
   1522	vma = find_vma_prev(mm, start, &prev);
   1523	if (!vma)
   1524		goto out_unlock;
   1525
   1526	/* check that there's at least one vma in the range */
   1527	ret = -EINVAL;
   1528	if (vma->vm_start >= end)
   1529		goto out_unlock;
   1530
   1531	/*
   1532	 * If the first vma contains huge pages, make sure start address
   1533	 * is aligned to huge page size.
   1534	 */
   1535	if (is_vm_hugetlb_page(vma)) {
   1536		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
   1537
   1538		if (start & (vma_hpagesize - 1))
   1539			goto out_unlock;
   1540	}
   1541
   1542	/*
   1543	 * Search for not compatible vmas.
   1544	 */
   1545	found = false;
   1546	ret = -EINVAL;
   1547	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
   1548		cond_resched();
   1549
   1550		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
   1551		       !!(cur->vm_flags & __VM_UFFD_FLAGS));
   1552
   1553		/*
   1554		 * Check not compatible vmas, not strictly required
   1555		 * here as not compatible vmas cannot have an
   1556		 * userfaultfd_ctx registered on them, but this
   1557		 * provides for more strict behavior to notice
   1558		 * unregistration errors.
   1559		 */
   1560		if (!vma_can_userfault(cur, cur->vm_flags))
   1561			goto out_unlock;
   1562
   1563		found = true;
   1564	}
   1565	BUG_ON(!found);
   1566
   1567	if (vma->vm_start < start)
   1568		prev = vma;
   1569
   1570	ret = 0;
   1571	do {
   1572		cond_resched();
   1573
   1574		BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
   1575
   1576		/*
   1577		 * Nothing to do: this vma is already registered into this
   1578		 * userfaultfd and with the right tracking mode too.
   1579		 */
   1580		if (!vma->vm_userfaultfd_ctx.ctx)
   1581			goto skip;
   1582
   1583		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
   1584
   1585		if (vma->vm_start > start)
   1586			start = vma->vm_start;
   1587		vma_end = min(end, vma->vm_end);
   1588
   1589		if (userfaultfd_missing(vma)) {
   1590			/*
   1591			 * Wake any concurrent pending userfault while
   1592			 * we unregister, so they will not hang
   1593			 * permanently and it avoids userland to call
   1594			 * UFFDIO_WAKE explicitly.
   1595			 */
   1596			struct userfaultfd_wake_range range;
   1597			range.start = start;
   1598			range.len = vma_end - start;
   1599			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
   1600		}
   1601
   1602		new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
   1603		prev = vma_merge(mm, prev, start, vma_end, new_flags,
   1604				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
   1605				 vma_policy(vma),
   1606				 NULL_VM_UFFD_CTX, anon_vma_name(vma));
   1607		if (prev) {
   1608			vma = prev;
   1609			goto next;
   1610		}
   1611		if (vma->vm_start < start) {
   1612			ret = split_vma(mm, vma, start, 1);
   1613			if (ret)
   1614				break;
   1615		}
   1616		if (vma->vm_end > end) {
   1617			ret = split_vma(mm, vma, end, 0);
   1618			if (ret)
   1619				break;
   1620		}
   1621	next:
   1622		/*
   1623		 * In the vma_merge() successful mprotect-like case 8:
   1624		 * the next vma was merged into the current one and
   1625		 * the current one has not been updated yet.
   1626		 */
   1627		vma->vm_flags = new_flags;
   1628		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
   1629
   1630	skip:
   1631		prev = vma;
   1632		start = vma->vm_end;
   1633		vma = vma->vm_next;
   1634	} while (vma && vma->vm_start < end);
   1635out_unlock:
   1636	mmap_write_unlock(mm);
   1637	mmput(mm);
   1638out:
   1639	return ret;
   1640}
   1641
   1642/*
   1643 * userfaultfd_wake may be used in combination with the
   1644 * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
   1645 */
   1646static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
   1647			    unsigned long arg)
   1648{
   1649	int ret;
   1650	struct uffdio_range uffdio_wake;
   1651	struct userfaultfd_wake_range range;
   1652	const void __user *buf = (void __user *)arg;
   1653
   1654	ret = -EFAULT;
   1655	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
   1656		goto out;
   1657
   1658	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
   1659	if (ret)
   1660		goto out;
   1661
   1662	range.start = uffdio_wake.start;
   1663	range.len = uffdio_wake.len;
   1664
   1665	/*
   1666	 * len == 0 means wake all and we don't want to wake all here,
   1667	 * so check it again to be sure.
   1668	 */
   1669	VM_BUG_ON(!range.len);
   1670
   1671	wake_userfault(ctx, &range);
   1672	ret = 0;
   1673
   1674out:
   1675	return ret;
   1676}
   1677
   1678static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
   1679			    unsigned long arg)
   1680{
   1681	__s64 ret;
   1682	struct uffdio_copy uffdio_copy;
   1683	struct uffdio_copy __user *user_uffdio_copy;
   1684	struct userfaultfd_wake_range range;
   1685
   1686	user_uffdio_copy = (struct uffdio_copy __user *) arg;
   1687
   1688	ret = -EAGAIN;
   1689	if (atomic_read(&ctx->mmap_changing))
   1690		goto out;
   1691
   1692	ret = -EFAULT;
   1693	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
   1694			   /* don't copy "copy" last field */
   1695			   sizeof(uffdio_copy)-sizeof(__s64)))
   1696		goto out;
   1697
   1698	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
   1699	if (ret)
   1700		goto out;
   1701	/*
   1702	 * double check for wraparound just in case. copy_from_user()
   1703	 * will later check uffdio_copy.src + uffdio_copy.len to fit
   1704	 * in the userland range.
   1705	 */
   1706	ret = -EINVAL;
   1707	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
   1708		goto out;
   1709	if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
   1710		goto out;
   1711	if (mmget_not_zero(ctx->mm)) {
   1712		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
   1713				   uffdio_copy.len, &ctx->mmap_changing,
   1714				   uffdio_copy.mode);
   1715		mmput(ctx->mm);
   1716	} else {
   1717		return -ESRCH;
   1718	}
   1719	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
   1720		return -EFAULT;
   1721	if (ret < 0)
   1722		goto out;
   1723	BUG_ON(!ret);
   1724	/* len == 0 would wake all */
   1725	range.len = ret;
   1726	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
   1727		range.start = uffdio_copy.dst;
   1728		wake_userfault(ctx, &range);
   1729	}
   1730	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
   1731out:
   1732	return ret;
   1733}
   1734
   1735static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
   1736				unsigned long arg)
   1737{
   1738	__s64 ret;
   1739	struct uffdio_zeropage uffdio_zeropage;
   1740	struct uffdio_zeropage __user *user_uffdio_zeropage;
   1741	struct userfaultfd_wake_range range;
   1742
   1743	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
   1744
   1745	ret = -EAGAIN;
   1746	if (atomic_read(&ctx->mmap_changing))
   1747		goto out;
   1748
   1749	ret = -EFAULT;
   1750	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
   1751			   /* don't copy "zeropage" last field */
   1752			   sizeof(uffdio_zeropage)-sizeof(__s64)))
   1753		goto out;
   1754
   1755	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
   1756			     uffdio_zeropage.range.len);
   1757	if (ret)
   1758		goto out;
   1759	ret = -EINVAL;
   1760	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
   1761		goto out;
   1762
   1763	if (mmget_not_zero(ctx->mm)) {
   1764		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
   1765				     uffdio_zeropage.range.len,
   1766				     &ctx->mmap_changing);
   1767		mmput(ctx->mm);
   1768	} else {
   1769		return -ESRCH;
   1770	}
   1771	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
   1772		return -EFAULT;
   1773	if (ret < 0)
   1774		goto out;
   1775	/* len == 0 would wake all */
   1776	BUG_ON(!ret);
   1777	range.len = ret;
   1778	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
   1779		range.start = uffdio_zeropage.range.start;
   1780		wake_userfault(ctx, &range);
   1781	}
   1782	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
   1783out:
   1784	return ret;
   1785}
   1786
   1787static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx,
   1788				    unsigned long arg)
   1789{
   1790	int ret;
   1791	struct uffdio_writeprotect uffdio_wp;
   1792	struct uffdio_writeprotect __user *user_uffdio_wp;
   1793	struct userfaultfd_wake_range range;
   1794	bool mode_wp, mode_dontwake;
   1795
   1796	if (atomic_read(&ctx->mmap_changing))
   1797		return -EAGAIN;
   1798
   1799	user_uffdio_wp = (struct uffdio_writeprotect __user *) arg;
   1800
   1801	if (copy_from_user(&uffdio_wp, user_uffdio_wp,
   1802			   sizeof(struct uffdio_writeprotect)))
   1803		return -EFAULT;
   1804
   1805	ret = validate_range(ctx->mm, uffdio_wp.range.start,
   1806			     uffdio_wp.range.len);
   1807	if (ret)
   1808		return ret;
   1809
   1810	if (uffdio_wp.mode & ~(UFFDIO_WRITEPROTECT_MODE_DONTWAKE |
   1811			       UFFDIO_WRITEPROTECT_MODE_WP))
   1812		return -EINVAL;
   1813
   1814	mode_wp = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_WP;
   1815	mode_dontwake = uffdio_wp.mode & UFFDIO_WRITEPROTECT_MODE_DONTWAKE;
   1816
   1817	if (mode_wp && mode_dontwake)
   1818		return -EINVAL;
   1819
   1820	if (mmget_not_zero(ctx->mm)) {
   1821		ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start,
   1822					  uffdio_wp.range.len, mode_wp,
   1823					  &ctx->mmap_changing);
   1824		mmput(ctx->mm);
   1825	} else {
   1826		return -ESRCH;
   1827	}
   1828
   1829	if (ret)
   1830		return ret;
   1831
   1832	if (!mode_wp && !mode_dontwake) {
   1833		range.start = uffdio_wp.range.start;
   1834		range.len = uffdio_wp.range.len;
   1835		wake_userfault(ctx, &range);
   1836	}
   1837	return ret;
   1838}
   1839
   1840static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg)
   1841{
   1842	__s64 ret;
   1843	struct uffdio_continue uffdio_continue;
   1844	struct uffdio_continue __user *user_uffdio_continue;
   1845	struct userfaultfd_wake_range range;
   1846
   1847	user_uffdio_continue = (struct uffdio_continue __user *)arg;
   1848
   1849	ret = -EAGAIN;
   1850	if (atomic_read(&ctx->mmap_changing))
   1851		goto out;
   1852
   1853	ret = -EFAULT;
   1854	if (copy_from_user(&uffdio_continue, user_uffdio_continue,
   1855			   /* don't copy the output fields */
   1856			   sizeof(uffdio_continue) - (sizeof(__s64))))
   1857		goto out;
   1858
   1859	ret = validate_range(ctx->mm, uffdio_continue.range.start,
   1860			     uffdio_continue.range.len);
   1861	if (ret)
   1862		goto out;
   1863
   1864	ret = -EINVAL;
   1865	/* double check for wraparound just in case. */
   1866	if (uffdio_continue.range.start + uffdio_continue.range.len <=
   1867	    uffdio_continue.range.start) {
   1868		goto out;
   1869	}
   1870	if (uffdio_continue.mode & ~UFFDIO_CONTINUE_MODE_DONTWAKE)
   1871		goto out;
   1872
   1873	if (mmget_not_zero(ctx->mm)) {
   1874		ret = mcopy_continue(ctx->mm, uffdio_continue.range.start,
   1875				     uffdio_continue.range.len,
   1876				     &ctx->mmap_changing);
   1877		mmput(ctx->mm);
   1878	} else {
   1879		return -ESRCH;
   1880	}
   1881
   1882	if (unlikely(put_user(ret, &user_uffdio_continue->mapped)))
   1883		return -EFAULT;
   1884	if (ret < 0)
   1885		goto out;
   1886
   1887	/* len == 0 would wake all */
   1888	BUG_ON(!ret);
   1889	range.len = ret;
   1890	if (!(uffdio_continue.mode & UFFDIO_CONTINUE_MODE_DONTWAKE)) {
   1891		range.start = uffdio_continue.range.start;
   1892		wake_userfault(ctx, &range);
   1893	}
   1894	ret = range.len == uffdio_continue.range.len ? 0 : -EAGAIN;
   1895
   1896out:
   1897	return ret;
   1898}
   1899
   1900static inline unsigned int uffd_ctx_features(__u64 user_features)
   1901{
   1902	/*
   1903	 * For the current set of features the bits just coincide. Set
   1904	 * UFFD_FEATURE_INITIALIZED to mark the features as enabled.
   1905	 */
   1906	return (unsigned int)user_features | UFFD_FEATURE_INITIALIZED;
   1907}
   1908
   1909/*
   1910 * userland asks for a certain API version and we return which bits
   1911 * and ioctl commands are implemented in this kernel for such API
   1912 * version or -EINVAL if unknown.
   1913 */
   1914static int userfaultfd_api(struct userfaultfd_ctx *ctx,
   1915			   unsigned long arg)
   1916{
   1917	struct uffdio_api uffdio_api;
   1918	void __user *buf = (void __user *)arg;
   1919	unsigned int ctx_features;
   1920	int ret;
   1921	__u64 features;
   1922
   1923	ret = -EFAULT;
   1924	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
   1925		goto out;
   1926	features = uffdio_api.features;
   1927	ret = -EINVAL;
   1928	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
   1929		goto err_out;
   1930	ret = -EPERM;
   1931	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
   1932		goto err_out;
   1933	/* report all available features and ioctls to userland */
   1934	uffdio_api.features = UFFD_API_FEATURES;
   1935#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
   1936	uffdio_api.features &=
   1937		~(UFFD_FEATURE_MINOR_HUGETLBFS | UFFD_FEATURE_MINOR_SHMEM);
   1938#endif
   1939#ifndef CONFIG_HAVE_ARCH_USERFAULTFD_WP
   1940	uffdio_api.features &= ~UFFD_FEATURE_PAGEFAULT_FLAG_WP;
   1941#endif
   1942#ifndef CONFIG_PTE_MARKER_UFFD_WP
   1943	uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM;
   1944#endif
   1945	uffdio_api.ioctls = UFFD_API_IOCTLS;
   1946	ret = -EFAULT;
   1947	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
   1948		goto out;
   1949
   1950	/* only enable the requested features for this uffd context */
   1951	ctx_features = uffd_ctx_features(features);
   1952	ret = -EINVAL;
   1953	if (cmpxchg(&ctx->features, 0, ctx_features) != 0)
   1954		goto err_out;
   1955
   1956	ret = 0;
   1957out:
   1958	return ret;
   1959err_out:
   1960	memset(&uffdio_api, 0, sizeof(uffdio_api));
   1961	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
   1962		ret = -EFAULT;
   1963	goto out;
   1964}
   1965
   1966static long userfaultfd_ioctl(struct file *file, unsigned cmd,
   1967			      unsigned long arg)
   1968{
   1969	int ret = -EINVAL;
   1970	struct userfaultfd_ctx *ctx = file->private_data;
   1971
   1972	if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx))
   1973		return -EINVAL;
   1974
   1975	switch(cmd) {
   1976	case UFFDIO_API:
   1977		ret = userfaultfd_api(ctx, arg);
   1978		break;
   1979	case UFFDIO_REGISTER:
   1980		ret = userfaultfd_register(ctx, arg);
   1981		break;
   1982	case UFFDIO_UNREGISTER:
   1983		ret = userfaultfd_unregister(ctx, arg);
   1984		break;
   1985	case UFFDIO_WAKE:
   1986		ret = userfaultfd_wake(ctx, arg);
   1987		break;
   1988	case UFFDIO_COPY:
   1989		ret = userfaultfd_copy(ctx, arg);
   1990		break;
   1991	case UFFDIO_ZEROPAGE:
   1992		ret = userfaultfd_zeropage(ctx, arg);
   1993		break;
   1994	case UFFDIO_WRITEPROTECT:
   1995		ret = userfaultfd_writeprotect(ctx, arg);
   1996		break;
   1997	case UFFDIO_CONTINUE:
   1998		ret = userfaultfd_continue(ctx, arg);
   1999		break;
   2000	}
   2001	return ret;
   2002}
   2003
   2004#ifdef CONFIG_PROC_FS
   2005static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
   2006{
   2007	struct userfaultfd_ctx *ctx = f->private_data;
   2008	wait_queue_entry_t *wq;
   2009	unsigned long pending = 0, total = 0;
   2010
   2011	spin_lock_irq(&ctx->fault_pending_wqh.lock);
   2012	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
   2013		pending++;
   2014		total++;
   2015	}
   2016	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
   2017		total++;
   2018	}
   2019	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
   2020
   2021	/*
   2022	 * If more protocols will be added, there will be all shown
   2023	 * separated by a space. Like this:
   2024	 *	protocols: aa:... bb:...
   2025	 */
   2026	seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
   2027		   pending, total, UFFD_API, ctx->features,
   2028		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
   2029}
   2030#endif
   2031
   2032static const struct file_operations userfaultfd_fops = {
   2033#ifdef CONFIG_PROC_FS
   2034	.show_fdinfo	= userfaultfd_show_fdinfo,
   2035#endif
   2036	.release	= userfaultfd_release,
   2037	.poll		= userfaultfd_poll,
   2038	.read		= userfaultfd_read,
   2039	.unlocked_ioctl = userfaultfd_ioctl,
   2040	.compat_ioctl	= compat_ptr_ioctl,
   2041	.llseek		= noop_llseek,
   2042};
   2043
   2044static void init_once_userfaultfd_ctx(void *mem)
   2045{
   2046	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
   2047
   2048	init_waitqueue_head(&ctx->fault_pending_wqh);
   2049	init_waitqueue_head(&ctx->fault_wqh);
   2050	init_waitqueue_head(&ctx->event_wqh);
   2051	init_waitqueue_head(&ctx->fd_wqh);
   2052	seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
   2053}
   2054
   2055SYSCALL_DEFINE1(userfaultfd, int, flags)
   2056{
   2057	struct userfaultfd_ctx *ctx;
   2058	int fd;
   2059
   2060	if (!sysctl_unprivileged_userfaultfd &&
   2061	    (flags & UFFD_USER_MODE_ONLY) == 0 &&
   2062	    !capable(CAP_SYS_PTRACE)) {
   2063		printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
   2064			"sysctl knob to 1 if kernel faults must be handled "
   2065			"without obtaining CAP_SYS_PTRACE capability\n");
   2066		return -EPERM;
   2067	}
   2068
   2069	BUG_ON(!current->mm);
   2070
   2071	/* Check the UFFD_* constants for consistency.  */
   2072	BUILD_BUG_ON(UFFD_USER_MODE_ONLY & UFFD_SHARED_FCNTL_FLAGS);
   2073	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
   2074	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
   2075
   2076	if (flags & ~(UFFD_SHARED_FCNTL_FLAGS | UFFD_USER_MODE_ONLY))
   2077		return -EINVAL;
   2078
   2079	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
   2080	if (!ctx)
   2081		return -ENOMEM;
   2082
   2083	refcount_set(&ctx->refcount, 1);
   2084	ctx->flags = flags;
   2085	ctx->features = 0;
   2086	ctx->released = false;
   2087	atomic_set(&ctx->mmap_changing, 0);
   2088	ctx->mm = current->mm;
   2089	/* prevent the mm struct to be freed */
   2090	mmgrab(ctx->mm);
   2091
   2092	fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx,
   2093			O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
   2094	if (fd < 0) {
   2095		mmdrop(ctx->mm);
   2096		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
   2097	}
   2098	return fd;
   2099}
   2100
   2101static int __init userfaultfd_init(void)
   2102{
   2103	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
   2104						sizeof(struct userfaultfd_ctx),
   2105						0,
   2106						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
   2107						init_once_userfaultfd_ctx);
   2108	return 0;
   2109}
   2110__initcall(userfaultfd_init);