cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess.h (9646B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 *  S390 version
      4 *    Copyright IBM Corp. 1999, 2000
      5 *    Author(s): Hartmut Penner (hp@de.ibm.com),
      6 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
      7 *
      8 *  Derived from "include/asm-i386/uaccess.h"
      9 */
     10#ifndef __S390_UACCESS_H
     11#define __S390_UACCESS_H
     12
     13/*
     14 * User space memory access functions
     15 */
     16#include <asm/asm-extable.h>
     17#include <asm/processor.h>
     18#include <asm/ctl_reg.h>
     19#include <asm/extable.h>
     20#include <asm/facility.h>
     21#include <asm-generic/access_ok.h>
     22
     23void debug_user_asce(int exit);
     24
     25unsigned long __must_check
     26raw_copy_from_user(void *to, const void __user *from, unsigned long n);
     27
     28unsigned long __must_check
     29raw_copy_to_user(void __user *to, const void *from, unsigned long n);
     30
     31#ifndef CONFIG_KASAN
     32#define INLINE_COPY_FROM_USER
     33#define INLINE_COPY_TO_USER
     34#endif
     35
     36unsigned long __must_check
     37_copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key);
     38
     39static __always_inline unsigned long __must_check
     40copy_from_user_key(void *to, const void __user *from, unsigned long n, unsigned long key)
     41{
     42	if (likely(check_copy_size(to, n, false)))
     43		n = _copy_from_user_key(to, from, n, key);
     44	return n;
     45}
     46
     47unsigned long __must_check
     48_copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key);
     49
     50static __always_inline unsigned long __must_check
     51copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned long key)
     52{
     53	if (likely(check_copy_size(from, n, true)))
     54		n = _copy_to_user_key(to, from, n, key);
     55	return n;
     56}
     57
     58union oac {
     59	unsigned int val;
     60	struct {
     61		struct {
     62			unsigned short key : 4;
     63			unsigned short	   : 4;
     64			unsigned short as  : 2;
     65			unsigned short	   : 4;
     66			unsigned short k   : 1;
     67			unsigned short a   : 1;
     68		} oac1;
     69		struct {
     70			unsigned short key : 4;
     71			unsigned short	   : 4;
     72			unsigned short as  : 2;
     73			unsigned short	   : 4;
     74			unsigned short k   : 1;
     75			unsigned short a   : 1;
     76		} oac2;
     77	};
     78};
     79
     80int __noreturn __put_user_bad(void);
     81
     82#define __put_user_asm(to, from, size)					\
     83({									\
     84	union oac __oac_spec = {					\
     85		.oac1.as = PSW_BITS_AS_SECONDARY,			\
     86		.oac1.a = 1,						\
     87	};								\
     88	int __rc;							\
     89									\
     90	asm volatile(							\
     91		"	lr	0,%[spec]\n"				\
     92		"0:	mvcos	%[_to],%[_from],%[_size]\n"		\
     93		"1:	xr	%[rc],%[rc]\n"				\
     94		"2:\n"							\
     95		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
     96		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
     97		: [rc] "=&d" (__rc), [_to] "+Q" (*(to))			\
     98		: [_size] "d" (size), [_from] "Q" (*(from)),		\
     99		  [spec] "d" (__oac_spec.val)				\
    100		: "cc", "0");						\
    101	__rc;								\
    102})
    103
    104static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
    105{
    106	int rc;
    107
    108	switch (size) {
    109	case 1:
    110		rc = __put_user_asm((unsigned char __user *)ptr,
    111				    (unsigned char *)x,
    112				    size);
    113		break;
    114	case 2:
    115		rc = __put_user_asm((unsigned short __user *)ptr,
    116				    (unsigned short *)x,
    117				    size);
    118		break;
    119	case 4:
    120		rc = __put_user_asm((unsigned int __user *)ptr,
    121				    (unsigned int *)x,
    122				    size);
    123		break;
    124	case 8:
    125		rc = __put_user_asm((unsigned long __user *)ptr,
    126				    (unsigned long *)x,
    127				    size);
    128		break;
    129	default:
    130		__put_user_bad();
    131		break;
    132	}
    133	return rc;
    134}
    135
    136int __noreturn __get_user_bad(void);
    137
    138#define __get_user_asm(to, from, size)					\
    139({									\
    140	union oac __oac_spec = {					\
    141		.oac2.as = PSW_BITS_AS_SECONDARY,			\
    142		.oac2.a = 1,						\
    143	};								\
    144	int __rc;							\
    145									\
    146	asm volatile(							\
    147		"	lr	0,%[spec]\n"				\
    148		"0:	mvcos	0(%[_to]),%[_from],%[_size]\n"		\
    149		"1:	xr	%[rc],%[rc]\n"				\
    150		"2:\n"							\
    151		EX_TABLE_UA_LOAD_MEM(0b, 2b, %[rc], %[_to], %[_ksize])	\
    152		EX_TABLE_UA_LOAD_MEM(1b, 2b, %[rc], %[_to], %[_ksize])	\
    153		: [rc] "=&d" (__rc), "=Q" (*(to))			\
    154		: [_size] "d" (size), [_from] "Q" (*(from)),		\
    155		  [spec] "d" (__oac_spec.val), [_to] "a" (to),		\
    156		  [_ksize] "K" (size)					\
    157		: "cc", "0");						\
    158	__rc;								\
    159})
    160
    161static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
    162{
    163	int rc;
    164
    165	switch (size) {
    166	case 1:
    167		rc = __get_user_asm((unsigned char *)x,
    168				    (unsigned char __user *)ptr,
    169				    size);
    170		break;
    171	case 2:
    172		rc = __get_user_asm((unsigned short *)x,
    173				    (unsigned short __user *)ptr,
    174				    size);
    175		break;
    176	case 4:
    177		rc = __get_user_asm((unsigned int *)x,
    178				    (unsigned int __user *)ptr,
    179				    size);
    180		break;
    181	case 8:
    182		rc = __get_user_asm((unsigned long *)x,
    183				    (unsigned long __user *)ptr,
    184				    size);
    185		break;
    186	default:
    187		__get_user_bad();
    188		break;
    189	}
    190	return rc;
    191}
    192
    193/*
    194 * These are the main single-value transfer routines.  They automatically
    195 * use the right size if we just have the right pointer type.
    196 */
    197#define __put_user(x, ptr)						\
    198({									\
    199	__typeof__(*(ptr)) __x = (x);					\
    200	int __pu_err = -EFAULT;						\
    201									\
    202	__chk_user_ptr(ptr);						\
    203	switch (sizeof(*(ptr))) {					\
    204	case 1:								\
    205	case 2:								\
    206	case 4:								\
    207	case 8:								\
    208		__pu_err = __put_user_fn(&__x, ptr, sizeof(*(ptr)));	\
    209		break;							\
    210	default:							\
    211		__put_user_bad();					\
    212		break;							\
    213	}								\
    214	__builtin_expect(__pu_err, 0);					\
    215})
    216
    217#define put_user(x, ptr)						\
    218({									\
    219	might_fault();							\
    220	__put_user(x, ptr);						\
    221})
    222
    223#define __get_user(x, ptr)						\
    224({									\
    225	int __gu_err = -EFAULT;						\
    226									\
    227	__chk_user_ptr(ptr);						\
    228	switch (sizeof(*(ptr))) {					\
    229	case 1: {							\
    230		unsigned char __x;					\
    231									\
    232		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
    233		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
    234		break;							\
    235	};								\
    236	case 2: {							\
    237		unsigned short __x;					\
    238									\
    239		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
    240		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
    241		break;							\
    242	};								\
    243	case 4: {							\
    244		unsigned int __x;					\
    245									\
    246		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
    247		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
    248		break;							\
    249	};								\
    250	case 8: {							\
    251		unsigned long __x;					\
    252									\
    253		__gu_err = __get_user_fn(&__x, ptr, sizeof(*(ptr)));	\
    254		(x) = *(__force __typeof__(*(ptr)) *)&__x;		\
    255		break;							\
    256	};								\
    257	default:							\
    258		__get_user_bad();					\
    259		break;							\
    260	}								\
    261	__builtin_expect(__gu_err, 0);					\
    262})
    263
    264#define get_user(x, ptr)						\
    265({									\
    266	might_fault();							\
    267	__get_user(x, ptr);						\
    268})
    269
    270/*
    271 * Copy a null terminated string from userspace.
    272 */
    273long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
    274
    275long __must_check strnlen_user(const char __user *src, long count);
    276
    277/*
    278 * Zero Userspace
    279 */
    280unsigned long __must_check __clear_user(void __user *to, unsigned long size);
    281
    282static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
    283{
    284	might_fault();
    285	return __clear_user(to, n);
    286}
    287
    288int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count);
    289void *s390_kernel_write(void *dst, const void *src, size_t size);
    290
    291int __noreturn __put_kernel_bad(void);
    292
    293#define __put_kernel_asm(val, to, insn)					\
    294({									\
    295	int __rc;							\
    296									\
    297	asm volatile(							\
    298		"0:   " insn "  %[_val],%[_to]\n"			\
    299		"1:	xr	%[rc],%[rc]\n"				\
    300		"2:\n"							\
    301		EX_TABLE_UA_STORE(0b, 2b, %[rc])			\
    302		EX_TABLE_UA_STORE(1b, 2b, %[rc])			\
    303		: [rc] "=d" (__rc), [_to] "+Q" (*(to))			\
    304		: [_val] "d" (val)					\
    305		: "cc");						\
    306	__rc;								\
    307})
    308
    309#define __put_kernel_nofault(dst, src, type, err_label)			\
    310do {									\
    311	unsigned long __x = (unsigned long)(*((type *)(src)));		\
    312	int __pk_err;							\
    313									\
    314	switch (sizeof(type)) {						\
    315	case 1:								\
    316		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
    317		break;							\
    318	case 2:								\
    319		__pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
    320		break;							\
    321	case 4:								\
    322		__pk_err = __put_kernel_asm(__x, (type *)(dst), "st");	\
    323		break;							\
    324	case 8:								\
    325		__pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
    326		break;							\
    327	default:							\
    328		__pk_err = __put_kernel_bad();				\
    329		break;							\
    330	}								\
    331	if (unlikely(__pk_err))						\
    332		goto err_label;						\
    333} while (0)
    334
    335int __noreturn __get_kernel_bad(void);
    336
    337#define __get_kernel_asm(val, from, insn)				\
    338({									\
    339	int __rc;							\
    340									\
    341	asm volatile(							\
    342		"0:   " insn "  %[_val],%[_from]\n"			\
    343		"1:	xr	%[rc],%[rc]\n"				\
    344		"2:\n"							\
    345		EX_TABLE_UA_LOAD_REG(0b, 2b, %[rc], %[_val])		\
    346		EX_TABLE_UA_LOAD_REG(1b, 2b, %[rc], %[_val])		\
    347		: [rc] "=d" (__rc), [_val] "=d" (val)			\
    348		: [_from] "Q" (*(from))					\
    349		: "cc");						\
    350	__rc;								\
    351})
    352
    353#define __get_kernel_nofault(dst, src, type, err_label)			\
    354do {									\
    355	int __gk_err;							\
    356									\
    357	switch (sizeof(type)) {						\
    358	case 1: {							\
    359		unsigned char __x;					\
    360									\
    361		__gk_err = __get_kernel_asm(__x, (type *)(src), "ic");	\
    362		*((type *)(dst)) = (type)__x;				\
    363		break;							\
    364	};								\
    365	case 2: {							\
    366		unsigned short __x;					\
    367									\
    368		__gk_err = __get_kernel_asm(__x, (type *)(src), "lh");	\
    369		*((type *)(dst)) = (type)__x;				\
    370		break;							\
    371	};								\
    372	case 4: {							\
    373		unsigned int __x;					\
    374									\
    375		__gk_err = __get_kernel_asm(__x, (type *)(src), "l");	\
    376		*((type *)(dst)) = (type)__x;				\
    377		break;							\
    378	};								\
    379	case 8: {							\
    380		unsigned long __x;					\
    381									\
    382		__gk_err = __get_kernel_asm(__x, (type *)(src), "lg");	\
    383		*((type *)(dst)) = (type)__x;				\
    384		break;							\
    385	};								\
    386	default:							\
    387		__gk_err = __get_kernel_bad();				\
    388		break;							\
    389	}								\
    390	if (unlikely(__gk_err))						\
    391		goto err_label;						\
    392} while (0)
    393
    394#endif /* __S390_UACCESS_H */