cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess.h (21158B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_UACCESS_H
      3#define _ASM_X86_UACCESS_H
      4/*
      5 * User space memory access functions
      6 */
      7#include <linux/compiler.h>
      8#include <linux/kasan-checks.h>
      9#include <linux/string.h>
     10#include <asm/asm.h>
     11#include <asm/page.h>
     12#include <asm/smap.h>
     13#include <asm/extable.h>
     14
     15#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
     16static inline bool pagefault_disabled(void);
     17# define WARN_ON_IN_IRQ()	\
     18	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
     19#else
     20# define WARN_ON_IN_IRQ()
     21#endif
     22
     23/**
     24 * access_ok - Checks if a user space pointer is valid
     25 * @addr: User space pointer to start of block to check
     26 * @size: Size of block to check
     27 *
     28 * Context: User context only. This function may sleep if pagefaults are
     29 *          enabled.
     30 *
     31 * Checks if a pointer to a block of memory in user space is valid.
     32 *
     33 * Note that, depending on architecture, this function probably just
     34 * checks that the pointer is in the user space range - after calling
     35 * this function, memory access functions may still return -EFAULT.
     36 *
     37 * Return: true (nonzero) if the memory block may be valid, false (zero)
     38 * if it is definitely invalid.
     39 */
     40#define access_ok(addr, size)					\
     41({									\
     42	WARN_ON_IN_IRQ();						\
     43	likely(__access_ok(addr, size));				\
     44})
     45
     46#include <asm-generic/access_ok.h>
     47
     48extern int __get_user_1(void);
     49extern int __get_user_2(void);
     50extern int __get_user_4(void);
     51extern int __get_user_8(void);
     52extern int __get_user_nocheck_1(void);
     53extern int __get_user_nocheck_2(void);
     54extern int __get_user_nocheck_4(void);
     55extern int __get_user_nocheck_8(void);
     56extern int __get_user_bad(void);
     57
     58#define __uaccess_begin() stac()
     59#define __uaccess_end()   clac()
     60#define __uaccess_begin_nospec()	\
     61({					\
     62	stac();				\
     63	barrier_nospec();		\
     64})
     65
     66/*
     67 * This is the smallest unsigned integer type that can fit a value
     68 * (up to 'long long')
     69 */
     70#define __inttype(x) __typeof__(		\
     71	__typefits(x,char,			\
     72	  __typefits(x,short,			\
     73	    __typefits(x,int,			\
     74	      __typefits(x,long,0ULL)))))
     75
     76#define __typefits(x,type,not) \
     77	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
     78
     79/*
     80 * This is used for both get_user() and __get_user() to expand to
     81 * the proper special function call that has odd calling conventions
     82 * due to returning both a value and an error, and that depends on
     83 * the size of the pointer passed in.
     84 *
     85 * Careful: we have to cast the result to the type of the pointer
     86 * for sign reasons.
     87 *
     88 * The use of _ASM_DX as the register specifier is a bit of a
     89 * simplification, as gcc only cares about it as the starting point
     90 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
     91 * (%ecx being the next register in gcc's x86 register sequence), and
     92 * %rdx on 64 bits.
     93 *
     94 * Clang/LLVM cares about the size of the register, but still wants
     95 * the base register for something that ends up being a pair.
     96 */
     97#define do_get_user_call(fn,x,ptr)					\
     98({									\
     99	int __ret_gu;							\
    100	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
    101	__chk_user_ptr(ptr);						\
    102	asm volatile("call __" #fn "_%P4"				\
    103		     : "=a" (__ret_gu), "=r" (__val_gu),		\
    104			ASM_CALL_CONSTRAINT				\
    105		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
    106	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
    107	__builtin_expect(__ret_gu, 0);					\
    108})
    109
    110/**
    111 * get_user - Get a simple variable from user space.
    112 * @x:   Variable to store result.
    113 * @ptr: Source address, in user space.
    114 *
    115 * Context: User context only. This function may sleep if pagefaults are
    116 *          enabled.
    117 *
    118 * This macro copies a single simple variable from user space to kernel
    119 * space.  It supports simple types like char and int, but not larger
    120 * data types like structures or arrays.
    121 *
    122 * @ptr must have pointer-to-simple-variable type, and the result of
    123 * dereferencing @ptr must be assignable to @x without a cast.
    124 *
    125 * Return: zero on success, or -EFAULT on error.
    126 * On error, the variable @x is set to zero.
    127 */
    128#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
    129
    130/**
    131 * __get_user - Get a simple variable from user space, with less checking.
    132 * @x:   Variable to store result.
    133 * @ptr: Source address, in user space.
    134 *
    135 * Context: User context only. This function may sleep if pagefaults are
    136 *          enabled.
    137 *
    138 * This macro copies a single simple variable from user space to kernel
    139 * space.  It supports simple types like char and int, but not larger
    140 * data types like structures or arrays.
    141 *
    142 * @ptr must have pointer-to-simple-variable type, and the result of
    143 * dereferencing @ptr must be assignable to @x without a cast.
    144 *
    145 * Caller must check the pointer with access_ok() before calling this
    146 * function.
    147 *
    148 * Return: zero on success, or -EFAULT on error.
    149 * On error, the variable @x is set to zero.
    150 */
    151#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
    152
    153
    154#ifdef CONFIG_X86_32
    155#define __put_user_goto_u64(x, addr, label)			\
    156	asm_volatile_goto("\n"					\
    157		     "1:	movl %%eax,0(%1)\n"		\
    158		     "2:	movl %%edx,4(%1)\n"		\
    159		     _ASM_EXTABLE_UA(1b, %l2)			\
    160		     _ASM_EXTABLE_UA(2b, %l2)			\
    161		     : : "A" (x), "r" (addr)			\
    162		     : : label)
    163
    164#else
    165#define __put_user_goto_u64(x, ptr, label) \
    166	__put_user_goto(x, ptr, "q", "er", label)
    167#endif
    168
    169extern void __put_user_bad(void);
    170
    171/*
    172 * Strange magic calling convention: pointer in %ecx,
    173 * value in %eax(:%edx), return value in %ecx. clobbers %rbx
    174 */
    175extern void __put_user_1(void);
    176extern void __put_user_2(void);
    177extern void __put_user_4(void);
    178extern void __put_user_8(void);
    179extern void __put_user_nocheck_1(void);
    180extern void __put_user_nocheck_2(void);
    181extern void __put_user_nocheck_4(void);
    182extern void __put_user_nocheck_8(void);
    183
    184/*
    185 * ptr must be evaluated and assigned to the temporary __ptr_pu before
    186 * the assignment of x to __val_pu, to avoid any function calls
    187 * involved in the ptr expression (possibly implicitly generated due
    188 * to KASAN) from clobbering %ax.
    189 */
    190#define do_put_user_call(fn,x,ptr)					\
    191({									\
    192	int __ret_pu;							\
    193	void __user *__ptr_pu;						\
    194	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
    195	__chk_user_ptr(ptr);						\
    196	__ptr_pu = (ptr);						\
    197	__val_pu = (x);							\
    198	asm volatile("call __" #fn "_%P[size]"				\
    199		     : "=c" (__ret_pu),					\
    200			ASM_CALL_CONSTRAINT				\
    201		     : "0" (__ptr_pu),					\
    202		       "r" (__val_pu),					\
    203		       [size] "i" (sizeof(*(ptr)))			\
    204		     :"ebx");						\
    205	__builtin_expect(__ret_pu, 0);					\
    206})
    207
    208/**
    209 * put_user - Write a simple value into user space.
    210 * @x:   Value to copy to user space.
    211 * @ptr: Destination address, in user space.
    212 *
    213 * Context: User context only. This function may sleep if pagefaults are
    214 *          enabled.
    215 *
    216 * This macro copies a single simple value from kernel space to user
    217 * space.  It supports simple types like char and int, but not larger
    218 * data types like structures or arrays.
    219 *
    220 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
    221 * to the result of dereferencing @ptr.
    222 *
    223 * Return: zero on success, or -EFAULT on error.
    224 */
    225#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
    226
    227/**
    228 * __put_user - Write a simple value into user space, with less checking.
    229 * @x:   Value to copy to user space.
    230 * @ptr: Destination address, in user space.
    231 *
    232 * Context: User context only. This function may sleep if pagefaults are
    233 *          enabled.
    234 *
    235 * This macro copies a single simple value from kernel space to user
    236 * space.  It supports simple types like char and int, but not larger
    237 * data types like structures or arrays.
    238 *
    239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
    240 * to the result of dereferencing @ptr.
    241 *
    242 * Caller must check the pointer with access_ok() before calling this
    243 * function.
    244 *
    245 * Return: zero on success, or -EFAULT on error.
    246 */
    247#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
    248
    249#define __put_user_size(x, ptr, size, label)				\
    250do {									\
    251	__chk_user_ptr(ptr);						\
    252	switch (size) {							\
    253	case 1:								\
    254		__put_user_goto(x, ptr, "b", "iq", label);		\
    255		break;							\
    256	case 2:								\
    257		__put_user_goto(x, ptr, "w", "ir", label);		\
    258		break;							\
    259	case 4:								\
    260		__put_user_goto(x, ptr, "l", "ir", label);		\
    261		break;							\
    262	case 8:								\
    263		__put_user_goto_u64(x, ptr, label);			\
    264		break;							\
    265	default:							\
    266		__put_user_bad();					\
    267	}								\
    268} while (0)
    269
    270#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    271
    272#ifdef CONFIG_X86_32
    273#define __get_user_asm_u64(x, ptr, label) do {				\
    274	unsigned int __gu_low, __gu_high;				\
    275	const unsigned int __user *__gu_ptr;				\
    276	__gu_ptr = (const void __user *)(ptr);				\
    277	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
    278	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
    279	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
    280} while (0)
    281#else
    282#define __get_user_asm_u64(x, ptr, label)				\
    283	__get_user_asm(x, ptr, "q", "=r", label)
    284#endif
    285
    286#define __get_user_size(x, ptr, size, label)				\
    287do {									\
    288	__chk_user_ptr(ptr);						\
    289	switch (size) {							\
    290	case 1:	{							\
    291		unsigned char x_u8__;					\
    292		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
    293		(x) = x_u8__;						\
    294		break;							\
    295	}								\
    296	case 2:								\
    297		__get_user_asm(x, ptr, "w", "=r", label);		\
    298		break;							\
    299	case 4:								\
    300		__get_user_asm(x, ptr, "l", "=r", label);		\
    301		break;							\
    302	case 8:								\
    303		__get_user_asm_u64(x, ptr, label);			\
    304		break;							\
    305	default:							\
    306		(x) = __get_user_bad();					\
    307	}								\
    308} while (0)
    309
    310#define __get_user_asm(x, addr, itype, ltype, label)			\
    311	asm_volatile_goto("\n"						\
    312		     "1:	mov"itype" %[umem],%[output]\n"		\
    313		     _ASM_EXTABLE_UA(1b, %l2)				\
    314		     : [output] ltype(x)				\
    315		     : [umem] "m" (__m(addr))				\
    316		     : : label)
    317
    318#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    319
    320#ifdef CONFIG_X86_32
    321#define __get_user_asm_u64(x, ptr, retval)				\
    322({									\
    323	__typeof__(ptr) __ptr = (ptr);					\
    324	asm volatile("\n"						\
    325		     "1:	movl %[lowbits],%%eax\n"		\
    326		     "2:	movl %[highbits],%%edx\n"		\
    327		     "3:\n"						\
    328		     _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG |	\
    329					   EX_FLAG_CLEAR_AX_DX,		\
    330					   %[errout])			\
    331		     _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG |	\
    332					   EX_FLAG_CLEAR_AX_DX,		\
    333					   %[errout])			\
    334		     : [errout] "=r" (retval),				\
    335		       [output] "=&A"(x)				\
    336		     : [lowbits] "m" (__m(__ptr)),			\
    337		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
    338		       "0" (retval));					\
    339})
    340
    341#else
    342#define __get_user_asm_u64(x, ptr, retval) \
    343	 __get_user_asm(x, ptr, retval, "q")
    344#endif
    345
    346#define __get_user_size(x, ptr, size, retval)				\
    347do {									\
    348	unsigned char x_u8__;						\
    349									\
    350	retval = 0;							\
    351	__chk_user_ptr(ptr);						\
    352	switch (size) {							\
    353	case 1:								\
    354		__get_user_asm(x_u8__, ptr, retval, "b");		\
    355		(x) = x_u8__;						\
    356		break;							\
    357	case 2:								\
    358		__get_user_asm(x, ptr, retval, "w");			\
    359		break;							\
    360	case 4:								\
    361		__get_user_asm(x, ptr, retval, "l");			\
    362		break;							\
    363	case 8:								\
    364		__get_user_asm_u64(x, ptr, retval);			\
    365		break;							\
    366	default:							\
    367		(x) = __get_user_bad();					\
    368	}								\
    369} while (0)
    370
    371#define __get_user_asm(x, addr, err, itype)				\
    372	asm volatile("\n"						\
    373		     "1:	mov"itype" %[umem],%[output]\n"		\
    374		     "2:\n"						\
    375		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
    376					   EX_FLAG_CLEAR_AX,		\
    377					   %[errout])			\
    378		     : [errout] "=r" (err),				\
    379		       [output] "=a" (x)				\
    380		     : [umem] "m" (__m(addr)),				\
    381		       "0" (err))
    382
    383#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    384
    385#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
    386#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
    387	bool success;							\
    388	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
    389	__typeof__(*(_ptr)) __old = *_old;				\
    390	__typeof__(*(_ptr)) __new = (_new);				\
    391	asm_volatile_goto("\n"						\
    392		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
    393		     _ASM_EXTABLE_UA(1b, %l[label])			\
    394		     : CC_OUT(z) (success),				\
    395		       [ptr] "+m" (*_ptr),				\
    396		       [old] "+a" (__old)				\
    397		     : [new] ltype (__new)				\
    398		     : "memory"						\
    399		     : label);						\
    400	if (unlikely(!success))						\
    401		*_old = __old;						\
    402	likely(success);					})
    403
    404#ifdef CONFIG_X86_32
    405#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
    406	bool success;							\
    407	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
    408	__typeof__(*(_ptr)) __old = *_old;				\
    409	__typeof__(*(_ptr)) __new = (_new);				\
    410	asm_volatile_goto("\n"						\
    411		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
    412		     _ASM_EXTABLE_UA(1b, %l[label])			\
    413		     : CC_OUT(z) (success),				\
    414		       "+A" (__old),					\
    415		       [ptr] "+m" (*_ptr)				\
    416		     : "b" ((u32)__new),				\
    417		       "c" ((u32)((u64)__new >> 32))			\
    418		     : "memory"						\
    419		     : label);						\
    420	if (unlikely(!success))						\
    421		*_old = __old;						\
    422	likely(success);					})
    423#endif // CONFIG_X86_32
    424#else  // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
    425#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label)	({ \
    426	int __err = 0;							\
    427	bool success;							\
    428	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
    429	__typeof__(*(_ptr)) __old = *_old;				\
    430	__typeof__(*(_ptr)) __new = (_new);				\
    431	asm volatile("\n"						\
    432		     "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
    433		     CC_SET(z)						\
    434		     "2:\n"						\
    435		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG,	\
    436					   %[errout])			\
    437		     : CC_OUT(z) (success),				\
    438		       [errout] "+r" (__err),				\
    439		       [ptr] "+m" (*_ptr),				\
    440		       [old] "+a" (__old)				\
    441		     : [new] ltype (__new)				\
    442		     : "memory");					\
    443	if (unlikely(__err))						\
    444		goto label;						\
    445	if (unlikely(!success))						\
    446		*_old = __old;						\
    447	likely(success);					})
    448
    449#ifdef CONFIG_X86_32
    450/*
    451 * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
    452 * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
    453 * hardcoded by CMPXCHG8B, leaving only ESI and EDI.  If the compiler uses
    454 * both ESI and EDI for the memory operand, compilation will fail if the error
    455 * is an input+output as there will be no register available for input.
    456 */
    457#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label)	({	\
    458	int __result;							\
    459	__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold);		\
    460	__typeof__(*(_ptr)) __old = *_old;				\
    461	__typeof__(*(_ptr)) __new = (_new);				\
    462	asm volatile("\n"						\
    463		     "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n"		\
    464		     "mov $0, %%ecx\n\t"				\
    465		     "setz %%cl\n"					\
    466		     "2:\n"						\
    467		     _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
    468		     : [result]"=c" (__result),				\
    469		       "+A" (__old),					\
    470		       [ptr] "+m" (*_ptr)				\
    471		     : "b" ((u32)__new),				\
    472		       "c" ((u32)((u64)__new >> 32))			\
    473		     : "memory", "cc");					\
    474	if (unlikely(__result < 0))					\
    475		goto label;						\
    476	if (unlikely(!__result))					\
    477		*_old = __old;						\
    478	likely(__result);					})
    479#endif // CONFIG_X86_32
    480#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
    481
    482/* FIXME: this hack is definitely wrong -AK */
    483struct __large_struct { unsigned long buf[100]; };
    484#define __m(x) (*(struct __large_struct __user *)(x))
    485
    486/*
    487 * Tell gcc we read from memory instead of writing: this is because
    488 * we do not write to any memory gcc knows about, so there are no
    489 * aliasing issues.
    490 */
    491#define __put_user_goto(x, addr, itype, ltype, label)			\
    492	asm_volatile_goto("\n"						\
    493		"1:	mov"itype" %0,%1\n"				\
    494		_ASM_EXTABLE_UA(1b, %l2)				\
    495		: : ltype(x), "m" (__m(addr))				\
    496		: : label)
    497
    498extern unsigned long
    499copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
    500extern __must_check long
    501strncpy_from_user(char *dst, const char __user *src, long count);
    502
    503extern __must_check long strnlen_user(const char __user *str, long n);
    504
    505unsigned long __must_check clear_user(void __user *mem, unsigned long len);
    506unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
    507
    508#ifdef CONFIG_ARCH_HAS_COPY_MC
    509unsigned long __must_check
    510copy_mc_to_kernel(void *to, const void *from, unsigned len);
    511#define copy_mc_to_kernel copy_mc_to_kernel
    512
    513unsigned long __must_check
    514copy_mc_to_user(void *to, const void *from, unsigned len);
    515#endif
    516
    517/*
    518 * movsl can be slow when source and dest are not both 8-byte aligned
    519 */
    520#ifdef CONFIG_X86_INTEL_USERCOPY
    521extern struct movsl_mask {
    522	int mask;
    523} ____cacheline_aligned_in_smp movsl_mask;
    524#endif
    525
    526#define ARCH_HAS_NOCACHE_UACCESS 1
    527
    528#ifdef CONFIG_X86_32
    529# include <asm/uaccess_32.h>
    530#else
    531# include <asm/uaccess_64.h>
    532#endif
    533
    534/*
    535 * The "unsafe" user accesses aren't really "unsafe", but the naming
    536 * is a big fat warning: you have to not only do the access_ok()
    537 * checking before using them, but you have to surround them with the
    538 * user_access_begin/end() pair.
    539 */
    540static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
    541{
    542	if (unlikely(!access_ok(ptr,len)))
    543		return 0;
    544	__uaccess_begin_nospec();
    545	return 1;
    546}
    547#define user_access_begin(a,b)	user_access_begin(a,b)
    548#define user_access_end()	__uaccess_end()
    549
    550#define user_access_save()	smap_save()
    551#define user_access_restore(x)	smap_restore(x)
    552
    553#define unsafe_put_user(x, ptr, label)	\
    554	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
    555
    556#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    557#define unsafe_get_user(x, ptr, err_label)					\
    558do {										\
    559	__inttype(*(ptr)) __gu_val;						\
    560	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
    561	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
    562} while (0)
    563#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    564#define unsafe_get_user(x, ptr, err_label)					\
    565do {										\
    566	int __gu_err;								\
    567	__inttype(*(ptr)) __gu_val;						\
    568	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
    569	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
    570	if (unlikely(__gu_err)) goto err_label;					\
    571} while (0)
    572#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    573
    574extern void __try_cmpxchg_user_wrong_size(void);
    575
    576#ifndef CONFIG_X86_32
    577#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label)		\
    578	__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
    579#endif
    580
    581/*
    582 * Force the pointer to u<size> to match the size expected by the asm helper.
    583 * clang/LLVM compiles all cases and only discards the unused paths after
    584 * processing errors, which breaks i386 if the pointer is an 8-byte value.
    585 */
    586#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({			\
    587	bool __ret;								\
    588	__chk_user_ptr(_ptr);							\
    589	switch (sizeof(*(_ptr))) {						\
    590	case 1:	__ret = __try_cmpxchg_user_asm("b", "q",			\
    591					       (__force u8 *)(_ptr), (_oldp),	\
    592					       (_nval), _label);		\
    593		break;								\
    594	case 2:	__ret = __try_cmpxchg_user_asm("w", "r",			\
    595					       (__force u16 *)(_ptr), (_oldp),	\
    596					       (_nval), _label);		\
    597		break;								\
    598	case 4:	__ret = __try_cmpxchg_user_asm("l", "r",			\
    599					       (__force u32 *)(_ptr), (_oldp),	\
    600					       (_nval), _label);		\
    601		break;								\
    602	case 8:	__ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
    603						 (_nval), _label);		\
    604		break;								\
    605	default: __try_cmpxchg_user_wrong_size();				\
    606	}									\
    607	__ret;						})
    608
    609/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
    610#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label)	({		\
    611	int __ret = -EFAULT;						\
    612	__uaccess_begin_nospec();					\
    613	__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label);	\
    614_label:									\
    615	__uaccess_end();						\
    616	__ret;								\
    617							})
    618
    619/*
    620 * We want the unsafe accessors to always be inlined and use
    621 * the error labels - thus the macro games.
    622 */
    623#define unsafe_copy_loop(dst, src, len, type, label)				\
    624	while (len >= sizeof(type)) {						\
    625		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
    626		dst += sizeof(type);						\
    627		src += sizeof(type);						\
    628		len -= sizeof(type);						\
    629	}
    630
    631#define unsafe_copy_to_user(_dst,_src,_len,label)			\
    632do {									\
    633	char __user *__ucu_dst = (_dst);				\
    634	const char *__ucu_src = (_src);					\
    635	size_t __ucu_len = (_len);					\
    636	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
    637	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
    638	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
    639	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
    640} while (0)
    641
    642#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    643#define __get_kernel_nofault(dst, src, type, err_label)			\
    644	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
    645			sizeof(type), err_label)
    646#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    647#define __get_kernel_nofault(dst, src, type, err_label)			\
    648do {									\
    649	int __kr_err;							\
    650									\
    651	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
    652			sizeof(type), __kr_err);			\
    653	if (unlikely(__kr_err))						\
    654		goto err_label;						\
    655} while (0)
    656#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
    657
    658#define __put_kernel_nofault(dst, src, type, err_label)			\
    659	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
    660			sizeof(type), err_label)
    661
    662#endif /* _ASM_X86_UACCESS_H */
    663