cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess_64.h (7862B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_UACCESS_H
      3#define _ASM_UACCESS_H
      4
      5/*
      6 * User space memory access functions
      7 */
      8
      9#include <linux/compiler.h>
     10#include <linux/string.h>
     11#include <asm/asi.h>
     12#include <asm/spitfire.h>
     13
     14#include <asm/processor.h>
     15#include <asm-generic/access_ok.h>
     16
     17/*
     18 * Sparc64 is segmented, though more like the M68K than the I386.
     19 * We use the secondary ASI to address user memory, which references a
     20 * completely different VM map, thus there is zero chance of the user
     21 * doing something queer and tricking us into poking kernel memory.
     22 */
     23
     24/*
     25 * Test whether a block of memory is a valid user space address.
     26 * Returns 0 if the range is valid, nonzero otherwise.
     27 */
     28static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
     29{
     30	if (__builtin_constant_p(size))
     31		return addr > limit - size;
     32
     33	addr += size;
     34	if (addr < size)
     35		return true;
     36
     37	return addr > limit;
     38}
     39
     40#define __range_not_ok(addr, size, limit)                               \
     41({                                                                      \
     42	__chk_user_ptr(addr);                                           \
     43	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
     44})
     45
     46void __retl_efault(void);
     47
     48/* Uh, these should become the main single-value transfer routines..
     49 * They automatically use the right size if we just have the right
     50 * pointer type..
     51 *
     52 * This gets kind of ugly. We want to return _two_ values in "get_user()"
     53 * and yet we don't want to do any pointers, because that is too much
     54 * of a performance impact. Thus we have a few rather ugly macros here,
     55 * and hide all the ugliness from the user.
     56 */
     57#define put_user(x, ptr) ({ \
     58	unsigned long __pu_addr = (unsigned long)(ptr); \
     59	__chk_user_ptr(ptr); \
     60	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
     61})
     62
     63#define get_user(x, ptr) ({ \
     64	unsigned long __gu_addr = (unsigned long)(ptr); \
     65	__chk_user_ptr(ptr); \
     66	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
     67})
     68
     69#define __put_user(x, ptr) put_user(x, ptr)
     70#define __get_user(x, ptr) get_user(x, ptr)
     71
     72struct __large_struct { unsigned long buf[100]; };
     73#define __m(x) ((struct __large_struct *)(x))
     74
     75#define __put_kernel_nofault(dst, src, type, label)			\
     76do {									\
     77	type *addr = (type __force *)(dst);				\
     78	type data = *(type *)src;					\
     79	register int __pu_ret;						\
     80	switch (sizeof(type)) {						\
     81	case 1: __put_kernel_asm(data, b, addr, __pu_ret); break;	\
     82	case 2: __put_kernel_asm(data, h, addr, __pu_ret); break;	\
     83	case 4: __put_kernel_asm(data, w, addr, __pu_ret); break;	\
     84	case 8: __put_kernel_asm(data, x, addr, __pu_ret); break;	\
     85	default: __pu_ret = __put_user_bad(); break;			\
     86	}								\
     87	if (__pu_ret)							\
     88		goto label;						\
     89} while (0)
     90
     91#define __put_kernel_asm(x, size, addr, ret)				\
     92__asm__ __volatile__(							\
     93		"/* Put kernel asm, inline. */\n"			\
     94	"1:\t"	"st"#size " %1, [%2]\n\t"				\
     95		"clr	%0\n"						\
     96	"2:\n\n\t"							\
     97		".section .fixup,#alloc,#execinstr\n\t"			\
     98		".align	4\n"						\
     99	"3:\n\t"							\
    100		"sethi	%%hi(2b), %0\n\t"				\
    101		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
    102		" mov	%3, %0\n\n\t"					\
    103		".previous\n\t"						\
    104		".section __ex_table,\"a\"\n\t"				\
    105		".align	4\n\t"						\
    106		".word	1b, 3b\n\t"					\
    107		".previous\n\n\t"					\
    108	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
    109		 "i" (-EFAULT))
    110
    111#define __put_user_nocheck(data, addr, size) ({			\
    112	register int __pu_ret;					\
    113	switch (size) {						\
    114	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
    115	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
    116	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
    117	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
    118	default: __pu_ret = __put_user_bad(); break;		\
    119	}							\
    120	__pu_ret;						\
    121})
    122
    123#define __put_user_asm(x, size, addr, ret)				\
    124__asm__ __volatile__(							\
    125		"/* Put user asm, inline. */\n"				\
    126	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
    127		"clr	%0\n"						\
    128	"2:\n\n\t"							\
    129		".section .fixup,#alloc,#execinstr\n\t"			\
    130		".align	4\n"						\
    131	"3:\n\t"							\
    132		"sethi	%%hi(2b), %0\n\t"				\
    133		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
    134		" mov	%3, %0\n\n\t"					\
    135		".previous\n\t"						\
    136		".section __ex_table,\"a\"\n\t"				\
    137		".align	4\n\t"						\
    138		".word	1b, 3b\n\t"					\
    139		".previous\n\n\t"					\
    140	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
    141		 "i" (-EFAULT))
    142
    143int __put_user_bad(void);
    144
    145#define __get_kernel_nofault(dst, src, type, label)			     \
    146do {									     \
    147	type *addr = (type __force *)(src);		     		     \
    148	register int __gu_ret;						     \
    149	register unsigned long __gu_val;				     \
    150	switch (sizeof(type)) {						     \
    151		case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
    152		case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
    153		case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
    154		case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break;  \
    155		default:						     \
    156			__gu_val = 0;					     \
    157			__gu_ret = __get_user_bad();			     \
    158			break;						     \
    159	} 								     \
    160	if (__gu_ret)							     \
    161		goto label;						     \
    162	*(type *)dst = (__force type) __gu_val;				     \
    163} while (0)
    164#define __get_kernel_asm(x, size, addr, ret)				\
    165__asm__ __volatile__(							\
    166		"/* Get kernel asm, inline. */\n"			\
    167	"1:\t"	"ld"#size " [%2], %1\n\t"				\
    168		"clr	%0\n"						\
    169	"2:\n\n\t"							\
    170		".section .fixup,#alloc,#execinstr\n\t"			\
    171		".align	4\n"						\
    172	"3:\n\t"							\
    173		"sethi	%%hi(2b), %0\n\t"				\
    174		"clr	%1\n\t"						\
    175		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
    176		" mov	%3, %0\n\n\t"					\
    177		".previous\n\t"						\
    178		".section __ex_table,\"a\"\n\t"				\
    179		".align	4\n\t"						\
    180		".word	1b, 3b\n\n\t"					\
    181		".previous\n\t"						\
    182	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
    183		 "i" (-EFAULT))
    184
    185#define __get_user_nocheck(data, addr, size, type) ({			     \
    186	register int __gu_ret;						     \
    187	register unsigned long __gu_val;				     \
    188	switch (size) {							     \
    189		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
    190		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
    191		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
    192		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
    193		default:						     \
    194			__gu_val = 0;					     \
    195			__gu_ret = __get_user_bad();			     \
    196			break;						     \
    197	} 								     \
    198	data = (__force type) __gu_val;					     \
    199	 __gu_ret;							     \
    200})
    201
    202#define __get_user_asm(x, size, addr, ret)				\
    203__asm__ __volatile__(							\
    204		"/* Get user asm, inline. */\n"				\
    205	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
    206		"clr	%0\n"						\
    207	"2:\n\n\t"							\
    208		".section .fixup,#alloc,#execinstr\n\t"			\
    209		".align	4\n"						\
    210	"3:\n\t"							\
    211		"sethi	%%hi(2b), %0\n\t"				\
    212		"clr	%1\n\t"						\
    213		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
    214		" mov	%3, %0\n\n\t"					\
    215		".previous\n\t"						\
    216		".section __ex_table,\"a\"\n\t"				\
    217		".align	4\n\t"						\
    218		".word	1b, 3b\n\n\t"					\
    219		".previous\n\t"						\
    220	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
    221		 "i" (-EFAULT))
    222
    223int __get_user_bad(void);
    224
    225unsigned long __must_check raw_copy_from_user(void *to,
    226					     const void __user *from,
    227					     unsigned long size);
    228
    229unsigned long __must_check raw_copy_to_user(void __user *to,
    230					   const void *from,
    231					   unsigned long size);
    232#define INLINE_COPY_FROM_USER
    233#define INLINE_COPY_TO_USER
    234
    235unsigned long __must_check raw_copy_in_user(void __user *to,
    236					   const void __user *from,
    237					   unsigned long size);
    238
    239unsigned long __must_check __clear_user(void __user *, unsigned long);
    240
    241#define clear_user __clear_user
    242
    243__must_check long strnlen_user(const char __user *str, long n);
    244
    245struct pt_regs;
    246unsigned long compute_effective_address(struct pt_regs *,
    247					unsigned int insn,
    248					unsigned int rd);
    249
    250#endif /* _ASM_UACCESS_H */