cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess_64.h (2440B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_UACCESS_64_H
      3#define _ASM_X86_UACCESS_64_H
      4
      5/*
      6 * User space memory access functions
      7 */
      8#include <linux/compiler.h>
      9#include <linux/lockdep.h>
     10#include <linux/kasan-checks.h>
     11#include <asm/alternative.h>
     12#include <asm/cpufeatures.h>
     13#include <asm/page.h>
     14
     15/*
     16 * Copy To/From Userspace
     17 */
     18
     19/* Handles exceptions in both to and from, but doesn't do access_ok */
     20__must_check unsigned long
     21copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
     22__must_check unsigned long
     23copy_user_generic_string(void *to, const void *from, unsigned len);
     24__must_check unsigned long
     25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
     26
     27static __always_inline __must_check unsigned long
     28copy_user_generic(void *to, const void *from, unsigned len)
     29{
     30	unsigned ret;
     31
     32	/*
     33	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
     34	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
     35	 * Otherwise, use copy_user_generic_unrolled.
     36	 */
     37	alternative_call_2(copy_user_generic_unrolled,
     38			 copy_user_generic_string,
     39			 X86_FEATURE_REP_GOOD,
     40			 copy_user_enhanced_fast_string,
     41			 X86_FEATURE_ERMS,
     42			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
     43				     "=d" (len)),
     44			 "1" (to), "2" (from), "3" (len)
     45			 : "memory", "rcx", "r8", "r9", "r10", "r11");
     46	return ret;
     47}
     48
     49static __always_inline __must_check unsigned long
     50raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
     51{
     52	return copy_user_generic(dst, (__force void *)src, size);
     53}
     54
     55static __always_inline __must_check unsigned long
     56raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
     57{
     58	return copy_user_generic((__force void *)dst, src, size);
     59}
     60
     61extern long __copy_user_nocache(void *dst, const void __user *src,
     62				unsigned size, int zerorest);
     63
     64extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
     65extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
     66			   size_t len);
     67
     68static inline int
     69__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
     70				  unsigned size)
     71{
     72	kasan_check_write(dst, size);
     73	return __copy_user_nocache(dst, src, size, 0);
     74}
     75
     76static inline int
     77__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
     78{
     79	kasan_check_write(dst, size);
     80	return __copy_user_flushcache(dst, src, size);
     81}
     82#endif /* _ASM_X86_UACCESS_64_H */