cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mincore.c (7269B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *	linux/mm/mincore.c
      4 *
      5 * Copyright (C) 1994-2006  Linus Torvalds
      6 */
      7
      8/*
      9 * The mincore() system call.
     10 */
     11#include <linux/pagemap.h>
     12#include <linux/gfp.h>
     13#include <linux/pagewalk.h>
     14#include <linux/mman.h>
     15#include <linux/syscalls.h>
     16#include <linux/swap.h>
     17#include <linux/swapops.h>
     18#include <linux/shmem_fs.h>
     19#include <linux/hugetlb.h>
     20#include <linux/pgtable.h>
     21
     22#include <linux/uaccess.h>
     23#include "swap.h"
     24
     25static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
     26			unsigned long end, struct mm_walk *walk)
     27{
     28#ifdef CONFIG_HUGETLB_PAGE
     29	unsigned char present;
     30	unsigned char *vec = walk->private;
     31
     32	/*
     33	 * Hugepages under user process are always in RAM and never
     34	 * swapped out, but theoretically it needs to be checked.
     35	 */
     36	present = pte && !huge_pte_none(huge_ptep_get(pte));
     37	for (; addr != end; vec++, addr += PAGE_SIZE)
     38		*vec = present;
     39	walk->private = vec;
     40#else
     41	BUG();
     42#endif
     43	return 0;
     44}
     45
     46/*
     47 * Later we can get more picky about what "in core" means precisely.
     48 * For now, simply check to see if the page is in the page cache,
     49 * and is up to date; i.e. that no page-in operation would be required
     50 * at this time if an application were to map and access this page.
     51 */
     52static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
     53{
     54	unsigned char present = 0;
     55	struct page *page;
     56
     57	/*
     58	 * When tmpfs swaps out a page from a file, any process mapping that
     59	 * file will not get a swp_entry_t in its pte, but rather it is like
     60	 * any other file mapping (ie. marked !present and faulted in with
     61	 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
     62	 */
     63	page = find_get_incore_page(mapping, index);
     64	if (page) {
     65		present = PageUptodate(page);
     66		put_page(page);
     67	}
     68
     69	return present;
     70}
     71
     72static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
     73				struct vm_area_struct *vma, unsigned char *vec)
     74{
     75	unsigned long nr = (end - addr) >> PAGE_SHIFT;
     76	int i;
     77
     78	if (vma->vm_file) {
     79		pgoff_t pgoff;
     80
     81		pgoff = linear_page_index(vma, addr);
     82		for (i = 0; i < nr; i++, pgoff++)
     83			vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
     84	} else {
     85		for (i = 0; i < nr; i++)
     86			vec[i] = 0;
     87	}
     88	return nr;
     89}
     90
     91static int mincore_unmapped_range(unsigned long addr, unsigned long end,
     92				   __always_unused int depth,
     93				   struct mm_walk *walk)
     94{
     95	walk->private += __mincore_unmapped_range(addr, end,
     96						  walk->vma, walk->private);
     97	return 0;
     98}
     99
    100static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
    101			struct mm_walk *walk)
    102{
    103	spinlock_t *ptl;
    104	struct vm_area_struct *vma = walk->vma;
    105	pte_t *ptep;
    106	unsigned char *vec = walk->private;
    107	int nr = (end - addr) >> PAGE_SHIFT;
    108
    109	ptl = pmd_trans_huge_lock(pmd, vma);
    110	if (ptl) {
    111		memset(vec, 1, nr);
    112		spin_unlock(ptl);
    113		goto out;
    114	}
    115
    116	if (pmd_trans_unstable(pmd)) {
    117		__mincore_unmapped_range(addr, end, vma, vec);
    118		goto out;
    119	}
    120
    121	ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
    122	for (; addr != end; ptep++, addr += PAGE_SIZE) {
    123		pte_t pte = *ptep;
    124
    125		/* We need to do cache lookup too for pte markers */
    126		if (pte_none_mostly(pte))
    127			__mincore_unmapped_range(addr, addr + PAGE_SIZE,
    128						 vma, vec);
    129		else if (pte_present(pte))
    130			*vec = 1;
    131		else { /* pte is a swap entry */
    132			swp_entry_t entry = pte_to_swp_entry(pte);
    133
    134			if (non_swap_entry(entry)) {
    135				/*
    136				 * migration or hwpoison entries are always
    137				 * uptodate
    138				 */
    139				*vec = 1;
    140			} else {
    141#ifdef CONFIG_SWAP
    142				*vec = mincore_page(swap_address_space(entry),
    143						    swp_offset(entry));
    144#else
    145				WARN_ON(1);
    146				*vec = 1;
    147#endif
    148			}
    149		}
    150		vec++;
    151	}
    152	pte_unmap_unlock(ptep - 1, ptl);
    153out:
    154	walk->private += nr;
    155	cond_resched();
    156	return 0;
    157}
    158
    159static inline bool can_do_mincore(struct vm_area_struct *vma)
    160{
    161	if (vma_is_anonymous(vma))
    162		return true;
    163	if (!vma->vm_file)
    164		return false;
    165	/*
    166	 * Reveal pagecache information only for non-anonymous mappings that
    167	 * correspond to the files the calling process could (if tried) open
    168	 * for writing; otherwise we'd be including shared non-exclusive
    169	 * mappings, which opens a side channel.
    170	 */
    171	return inode_owner_or_capable(&init_user_ns,
    172				      file_inode(vma->vm_file)) ||
    173	       file_permission(vma->vm_file, MAY_WRITE) == 0;
    174}
    175
    176static const struct mm_walk_ops mincore_walk_ops = {
    177	.pmd_entry		= mincore_pte_range,
    178	.pte_hole		= mincore_unmapped_range,
    179	.hugetlb_entry		= mincore_hugetlb,
    180};
    181
    182/*
    183 * Do a chunk of "sys_mincore()". We've already checked
    184 * all the arguments, we hold the mmap semaphore: we should
    185 * just return the amount of info we're asked for.
    186 */
    187static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
    188{
    189	struct vm_area_struct *vma;
    190	unsigned long end;
    191	int err;
    192
    193	vma = find_vma(current->mm, addr);
    194	if (!vma || addr < vma->vm_start)
    195		return -ENOMEM;
    196	end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
    197	if (!can_do_mincore(vma)) {
    198		unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
    199		memset(vec, 1, pages);
    200		return pages;
    201	}
    202	err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
    203	if (err < 0)
    204		return err;
    205	return (end - addr) >> PAGE_SHIFT;
    206}
    207
    208/*
    209 * The mincore(2) system call.
    210 *
    211 * mincore() returns the memory residency status of the pages in the
    212 * current process's address space specified by [addr, addr + len).
    213 * The status is returned in a vector of bytes.  The least significant
    214 * bit of each byte is 1 if the referenced page is in memory, otherwise
    215 * it is zero.
    216 *
    217 * Because the status of a page can change after mincore() checks it
    218 * but before it returns to the application, the returned vector may
    219 * contain stale information.  Only locked pages are guaranteed to
    220 * remain in memory.
    221 *
    222 * return values:
    223 *  zero    - success
    224 *  -EFAULT - vec points to an illegal address
    225 *  -EINVAL - addr is not a multiple of PAGE_SIZE
    226 *  -ENOMEM - Addresses in the range [addr, addr + len] are
    227 *		invalid for the address space of this process, or
    228 *		specify one or more pages which are not currently
    229 *		mapped
    230 *  -EAGAIN - A kernel resource was temporarily unavailable.
    231 */
    232SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
    233		unsigned char __user *, vec)
    234{
    235	long retval;
    236	unsigned long pages;
    237	unsigned char *tmp;
    238
    239	start = untagged_addr(start);
    240
    241	/* Check the start address: needs to be page-aligned.. */
    242	if (start & ~PAGE_MASK)
    243		return -EINVAL;
    244
    245	/* ..and we need to be passed a valid user-space range */
    246	if (!access_ok((void __user *) start, len))
    247		return -ENOMEM;
    248
    249	/* This also avoids any overflows on PAGE_ALIGN */
    250	pages = len >> PAGE_SHIFT;
    251	pages += (offset_in_page(len)) != 0;
    252
    253	if (!access_ok(vec, pages))
    254		return -EFAULT;
    255
    256	tmp = (void *) __get_free_page(GFP_USER);
    257	if (!tmp)
    258		return -EAGAIN;
    259
    260	retval = 0;
    261	while (pages) {
    262		/*
    263		 * Do at most PAGE_SIZE entries per iteration, due to
    264		 * the temporary buffer size.
    265		 */
    266		mmap_read_lock(current->mm);
    267		retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
    268		mmap_read_unlock(current->mm);
    269
    270		if (retval <= 0)
    271			break;
    272		if (copy_to_user(vec, tmp, retval)) {
    273			retval = -EFAULT;
    274			break;
    275		}
    276		pages -= retval;
    277		vec += retval;
    278		start += retval << PAGE_SHIFT;
    279		retval = 0;
    280	}
    281	free_page((unsigned long) tmp);
    282	return retval;
    283}