uaccess.h (6225B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __PARISC_UACCESS_H 3#define __PARISC_UACCESS_H 4 5/* 6 * User space memory access functions 7 */ 8#include <asm/page.h> 9#include <asm/cache.h> 10 11#include <linux/bug.h> 12#include <linux/string.h> 13 14#define TASK_SIZE_MAX DEFAULT_TASK_SIZE 15#include <asm/pgtable.h> 16#include <asm-generic/access_ok.h> 17 18#define put_user __put_user 19#define get_user __get_user 20 21#if !defined(CONFIG_64BIT) 22#define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) 23#define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) 24#else 25#define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) 26#define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) 27#endif 28 29/* 30 * The exception table contains two values: the first is the relative offset to 31 * the address of the instruction that is allowed to fault, and the second is 32 * the relative offset to the address of the fixup routine. Since relative 33 * addresses are used, 32bit values are sufficient even on 64bit kernel. 34 */ 35 36#define ARCH_HAS_RELATIVE_EXTABLE 37struct exception_table_entry { 38 int insn; /* relative address of insn that is allowed to fault. */ 39 int fixup; /* relative address of fixup routine */ 40}; 41 42#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ 43 ".section __ex_table,\"aw\"\n" \ 44 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ 45 ".previous\n" 46 47/* 48 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry 49 * (with lowest bit set) for which the fault handler in fixup_exception() will 50 * load -EFAULT into %r29 for a read or write fault, and zeroes the target 51 * register in case of a read fault in get_user(). 52 */ 53#define ASM_EXCEPTIONTABLE_REG 29 54#define ASM_EXCEPTIONTABLE_VAR(__variable) \ 55 register long __variable __asm__ ("r29") = 0 56#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 57 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 58 59#define __get_user_internal(sr, val, ptr) \ 60({ \ 61 ASM_EXCEPTIONTABLE_VAR(__gu_err); \ 62 \ 63 switch (sizeof(*(ptr))) { \ 64 case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ 65 case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ 66 case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ 67 case 8: LDD_USER(sr, val, ptr); break; \ 68 default: BUILD_BUG(); \ 69 } \ 70 \ 71 __gu_err; \ 72}) 73 74#define __get_user(val, ptr) \ 75({ \ 76 __get_user_internal(SR_USER, val, ptr); \ 77}) 78 79#define __get_user_asm(sr, val, ldx, ptr) \ 80{ \ 81 register long __gu_val; \ 82 \ 83 __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \ 84 "9:\n" \ 85 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 86 : "=r"(__gu_val), "+r"(__gu_err) \ 87 : "i"(sr), "r"(ptr)); \ 88 \ 89 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 90} 91 92#define __get_kernel_nofault(dst, src, type, err_label) \ 93{ \ 94 type __z; \ 95 long __err; \ 96 __err = __get_user_internal(SR_KERNEL, __z, (type *)(src)); \ 97 if (unlikely(__err)) \ 98 goto err_label; \ 99 else \ 100 *(type *)(dst) = __z; \ 101} 102 103 104#if !defined(CONFIG_64BIT) 105 106#define __get_user_asm64(sr, val, ptr) \ 107{ \ 108 union { \ 109 unsigned long long l; \ 110 __typeof__(*(ptr)) t; \ 111 } __gu_tmp; \ 112 \ 113 __asm__(" copy %%r0,%R0\n" \ 114 "1: ldw 0(%%sr%2,%3),%0\n" \ 115 "2: ldw 4(%%sr%2,%3),%R0\n" \ 116 "9:\n" \ 117 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 118 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 119 : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ 120 : "i"(sr), "r"(ptr)); \ 121 \ 122 (val) = __gu_tmp.t; \ 123} 124 125#endif /* !defined(CONFIG_64BIT) */ 126 127 128#define __put_user_internal(sr, x, ptr) \ 129({ \ 130 ASM_EXCEPTIONTABLE_VAR(__pu_err); \ 131 \ 132 switch (sizeof(*(ptr))) { \ 133 case 1: __put_user_asm(sr, "stb", x, ptr); break; \ 134 case 2: __put_user_asm(sr, "sth", x, ptr); break; \ 135 case 4: __put_user_asm(sr, "stw", x, ptr); break; \ 136 case 8: STD_USER(sr, x, ptr); break; \ 137 default: BUILD_BUG(); \ 138 } \ 139 \ 140 __pu_err; \ 141}) 142 143#define __put_user(x, ptr) \ 144({ \ 145 __typeof__(&*(ptr)) __ptr = ptr; \ 146 __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \ 147 __put_user_internal(SR_USER, __x, __ptr); \ 148}) 149 150#define __put_kernel_nofault(dst, src, type, err_label) \ 151{ \ 152 type __z = *(type *)(src); \ 153 long __err; \ 154 __err = __put_user_internal(SR_KERNEL, __z, (type *)(dst)); \ 155 if (unlikely(__err)) \ 156 goto err_label; \ 157} 158 159 160 161 162/* 163 * The "__put_user/kernel_asm()" macros tell gcc they read from memory 164 * instead of writing. This is because they do not write to any memory 165 * gcc knows about, so there are no aliasing issues. These macros must 166 * also be aware that fixups are executed in the context of the fault, 167 * and any registers used there must be listed as clobbers. 168 * The register holding the possible EFAULT error (ASM_EXCEPTIONTABLE_REG) 169 * is already listed as input and output register. 170 */ 171 172#define __put_user_asm(sr, stx, x, ptr) \ 173 __asm__ __volatile__ ( \ 174 "1: " stx " %1,0(%%sr%2,%3)\n" \ 175 "9:\n" \ 176 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 177 : "+r"(__pu_err) \ 178 : "r"(x), "i"(sr), "r"(ptr)) 179 180 181#if !defined(CONFIG_64BIT) 182 183#define __put_user_asm64(sr, __val, ptr) do { \ 184 __asm__ __volatile__ ( \ 185 "1: stw %1,0(%%sr%2,%3)\n" \ 186 "2: stw %R1,4(%%sr%2,%3)\n" \ 187 "9:\n" \ 188 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 189 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 190 : "+r"(__pu_err) \ 191 : "r"(__val), "i"(sr), "r"(ptr)); \ 192} while (0) 193 194#endif /* !defined(CONFIG_64BIT) */ 195 196 197/* 198 * Complex access routines -- external declarations 199 */ 200 201extern long strncpy_from_user(char *, const char __user *, long); 202extern __must_check unsigned lclear_user(void __user *, unsigned long); 203extern __must_check long strnlen_user(const char __user *src, long n); 204/* 205 * Complex access routines -- macros 206 */ 207 208#define clear_user lclear_user 209#define __clear_user lclear_user 210 211unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, 212 unsigned long len); 213unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, 214 unsigned long len); 215#define INLINE_COPY_TO_USER 216#define INLINE_COPY_FROM_USER 217 218struct pt_regs; 219int fixup_exception(struct pt_regs *regs); 220 221#endif /* __PARISC_UACCESS_H */