cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess_with_memcpy.c (6327B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/lib/uaccess_with_memcpy.c
      4 *
      5 *  Written by: Lennert Buytenhek and Nicolas Pitre
      6 *  Copyright (C) 2009 Marvell Semiconductor
      7 */
      8
      9#include <linux/kernel.h>
     10#include <linux/ctype.h>
     11#include <linux/uaccess.h>
     12#include <linux/rwsem.h>
     13#include <linux/mm.h>
     14#include <linux/sched.h>
     15#include <linux/hardirq.h> /* for in_atomic() */
     16#include <linux/gfp.h>
     17#include <linux/highmem.h>
     18#include <linux/hugetlb.h>
     19#include <asm/current.h>
     20#include <asm/page.h>
     21
     22static int
     23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
     24{
     25	unsigned long addr = (unsigned long)_addr;
     26	pgd_t *pgd;
     27	p4d_t *p4d;
     28	pmd_t *pmd;
     29	pte_t *pte;
     30	pud_t *pud;
     31	spinlock_t *ptl;
     32
     33	pgd = pgd_offset(current->mm, addr);
     34	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
     35		return 0;
     36
     37	p4d = p4d_offset(pgd, addr);
     38	if (unlikely(p4d_none(*p4d) || p4d_bad(*p4d)))
     39		return 0;
     40
     41	pud = pud_offset(p4d, addr);
     42	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
     43		return 0;
     44
     45	pmd = pmd_offset(pud, addr);
     46	if (unlikely(pmd_none(*pmd)))
     47		return 0;
     48
     49	/*
     50	 * A pmd can be bad if it refers to a HugeTLB or THP page.
     51	 *
     52	 * Both THP and HugeTLB pages have the same pmd layout
     53	 * and should not be manipulated by the pte functions.
     54	 *
     55	 * Lock the page table for the destination and check
     56	 * to see that it's still huge and whether or not we will
     57	 * need to fault on write.
     58	 */
     59	if (unlikely(pmd_thp_or_huge(*pmd))) {
     60		ptl = &current->mm->page_table_lock;
     61		spin_lock(ptl);
     62		if (unlikely(!pmd_thp_or_huge(*pmd)
     63			|| pmd_hugewillfault(*pmd))) {
     64			spin_unlock(ptl);
     65			return 0;
     66		}
     67
     68		*ptep = NULL;
     69		*ptlp = ptl;
     70		return 1;
     71	}
     72
     73	if (unlikely(pmd_bad(*pmd)))
     74		return 0;
     75
     76	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
     77	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
     78	    !pte_write(*pte) || !pte_dirty(*pte))) {
     79		pte_unmap_unlock(pte, ptl);
     80		return 0;
     81	}
     82
     83	*ptep = pte;
     84	*ptlp = ptl;
     85
     86	return 1;
     87}
     88
     89static unsigned long noinline
     90__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
     91{
     92	unsigned long ua_flags;
     93	int atomic;
     94
     95	/* the mmap semaphore is taken only if not in an atomic context */
     96	atomic = faulthandler_disabled();
     97
     98	if (!atomic)
     99		mmap_read_lock(current->mm);
    100	while (n) {
    101		pte_t *pte;
    102		spinlock_t *ptl;
    103		int tocopy;
    104
    105		while (!pin_page_for_write(to, &pte, &ptl)) {
    106			if (!atomic)
    107				mmap_read_unlock(current->mm);
    108			if (__put_user(0, (char __user *)to))
    109				goto out;
    110			if (!atomic)
    111				mmap_read_lock(current->mm);
    112		}
    113
    114		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
    115		if (tocopy > n)
    116			tocopy = n;
    117
    118		ua_flags = uaccess_save_and_enable();
    119		memcpy((void *)to, from, tocopy);
    120		uaccess_restore(ua_flags);
    121		to += tocopy;
    122		from += tocopy;
    123		n -= tocopy;
    124
    125		if (pte)
    126			pte_unmap_unlock(pte, ptl);
    127		else
    128			spin_unlock(ptl);
    129	}
    130	if (!atomic)
    131		mmap_read_unlock(current->mm);
    132
    133out:
    134	return n;
    135}
    136
    137unsigned long
    138arm_copy_to_user(void __user *to, const void *from, unsigned long n)
    139{
    140	/*
    141	 * This test is stubbed out of the main function above to keep
    142	 * the overhead for small copies low by avoiding a large
    143	 * register dump on the stack just to reload them right away.
    144	 * With frame pointer disabled, tail call optimization kicks in
    145	 * as well making this test almost invisible.
    146	 */
    147	if (n < 64) {
    148		unsigned long ua_flags = uaccess_save_and_enable();
    149		n = __copy_to_user_std(to, from, n);
    150		uaccess_restore(ua_flags);
    151	} else {
    152		n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
    153					  from, n);
    154	}
    155	return n;
    156}
    157	
    158static unsigned long noinline
    159__clear_user_memset(void __user *addr, unsigned long n)
    160{
    161	unsigned long ua_flags;
    162
    163	mmap_read_lock(current->mm);
    164	while (n) {
    165		pte_t *pte;
    166		spinlock_t *ptl;
    167		int tocopy;
    168
    169		while (!pin_page_for_write(addr, &pte, &ptl)) {
    170			mmap_read_unlock(current->mm);
    171			if (__put_user(0, (char __user *)addr))
    172				goto out;
    173			mmap_read_lock(current->mm);
    174		}
    175
    176		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
    177		if (tocopy > n)
    178			tocopy = n;
    179
    180		ua_flags = uaccess_save_and_enable();
    181		memset((void *)addr, 0, tocopy);
    182		uaccess_restore(ua_flags);
    183		addr += tocopy;
    184		n -= tocopy;
    185
    186		if (pte)
    187			pte_unmap_unlock(pte, ptl);
    188		else
    189			spin_unlock(ptl);
    190	}
    191	mmap_read_unlock(current->mm);
    192
    193out:
    194	return n;
    195}
    196
    197unsigned long arm_clear_user(void __user *addr, unsigned long n)
    198{
    199	/* See rational for this in __copy_to_user() above. */
    200	if (n < 64) {
    201		unsigned long ua_flags = uaccess_save_and_enable();
    202		n = __clear_user_std(addr, n);
    203		uaccess_restore(ua_flags);
    204	} else {
    205		n = __clear_user_memset(addr, n);
    206	}
    207	return n;
    208}
    209
    210#if 0
    211
    212/*
    213 * This code is disabled by default, but kept around in case the chosen
    214 * thresholds need to be revalidated.  Some overhead (small but still)
    215 * would be implied by a runtime determined variable threshold, and
    216 * so far the measurement on concerned targets didn't show a worthwhile
    217 * variation.
    218 *
    219 * Note that a fairly precise sched_clock() implementation is needed
    220 * for results to make some sense.
    221 */
    222
    223#include <linux/vmalloc.h>
    224
    225static int __init test_size_treshold(void)
    226{
    227	struct page *src_page, *dst_page;
    228	void *user_ptr, *kernel_ptr;
    229	unsigned long long t0, t1, t2;
    230	int size, ret;
    231
    232	ret = -ENOMEM;
    233	src_page = alloc_page(GFP_KERNEL);
    234	if (!src_page)
    235		goto no_src;
    236	dst_page = alloc_page(GFP_KERNEL);
    237	if (!dst_page)
    238		goto no_dst;
    239	kernel_ptr = page_address(src_page);
    240	user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
    241	if (!user_ptr)
    242		goto no_vmap;
    243
    244	/* warm up the src page dcache */
    245	ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
    246
    247	for (size = PAGE_SIZE; size >= 4; size /= 2) {
    248		t0 = sched_clock();
    249		ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
    250		t1 = sched_clock();
    251		ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
    252		t2 = sched_clock();
    253		printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
    254	}
    255
    256	for (size = PAGE_SIZE; size >= 4; size /= 2) {
    257		t0 = sched_clock();
    258		ret |= __clear_user_memset(user_ptr, size);
    259		t1 = sched_clock();
    260		ret |= __clear_user_std(user_ptr, size);
    261		t2 = sched_clock();
    262		printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
    263	}
    264
    265	if (ret)
    266		ret = -EFAULT;
    267
    268	vunmap(user_ptr);
    269no_vmap:
    270	put_page(dst_page);
    271no_dst:
    272	put_page(src_page);
    273no_src:
    274	return ret;
    275}
    276
    277subsys_initcall(test_size_treshold);
    278
    279#endif