cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uaccess.h (12944B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Based on arch/arm/include/asm/uaccess.h
      4 *
      5 * Copyright (C) 2012 ARM Ltd.
      6 */
      7#ifndef __ASM_UACCESS_H
      8#define __ASM_UACCESS_H
      9
     10#include <asm/alternative.h>
     11#include <asm/kernel-pgtable.h>
     12#include <asm/sysreg.h>
     13
     14/*
     15 * User space memory access functions
     16 */
     17#include <linux/bitops.h>
     18#include <linux/kasan-checks.h>
     19#include <linux/string.h>
     20
     21#include <asm/asm-extable.h>
     22#include <asm/cpufeature.h>
     23#include <asm/mmu.h>
     24#include <asm/mte.h>
     25#include <asm/ptrace.h>
     26#include <asm/memory.h>
     27#include <asm/extable.h>
     28
     29static inline int __access_ok(const void __user *ptr, unsigned long size);
     30
     31/*
     32 * Test whether a block of memory is a valid user space address.
     33 * Returns 1 if the range is valid, 0 otherwise.
     34 *
     35 * This is equivalent to the following test:
     36 * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
     37 */
     38static inline int access_ok(const void __user *addr, unsigned long size)
     39{
     40	/*
     41	 * Asynchronous I/O running in a kernel thread does not have the
     42	 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
     43	 * the user address before checking.
     44	 */
     45	if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
     46	    (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
     47		addr = untagged_addr(addr);
     48
     49	return likely(__access_ok(addr, size));
     50}
     51#define access_ok access_ok
     52
     53#include <asm-generic/access_ok.h>
     54
     55/*
     56 * User access enabling/disabling.
     57 */
     58#ifdef CONFIG_ARM64_SW_TTBR0_PAN
     59static inline void __uaccess_ttbr0_disable(void)
     60{
     61	unsigned long flags, ttbr;
     62
     63	local_irq_save(flags);
     64	ttbr = read_sysreg(ttbr1_el1);
     65	ttbr &= ~TTBR_ASID_MASK;
     66	/* reserved_pg_dir placed before swapper_pg_dir */
     67	write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
     68	isb();
     69	/* Set reserved ASID */
     70	write_sysreg(ttbr, ttbr1_el1);
     71	isb();
     72	local_irq_restore(flags);
     73}
     74
     75static inline void __uaccess_ttbr0_enable(void)
     76{
     77	unsigned long flags, ttbr0, ttbr1;
     78
     79	/*
     80	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
     81	 * variable and the MSR. A context switch could trigger an ASID
     82	 * roll-over and an update of 'ttbr0'.
     83	 */
     84	local_irq_save(flags);
     85	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
     86
     87	/* Restore active ASID */
     88	ttbr1 = read_sysreg(ttbr1_el1);
     89	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
     90	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
     91	write_sysreg(ttbr1, ttbr1_el1);
     92	isb();
     93
     94	/* Restore user page table */
     95	write_sysreg(ttbr0, ttbr0_el1);
     96	isb();
     97	local_irq_restore(flags);
     98}
     99
    100static inline bool uaccess_ttbr0_disable(void)
    101{
    102	if (!system_uses_ttbr0_pan())
    103		return false;
    104	__uaccess_ttbr0_disable();
    105	return true;
    106}
    107
    108static inline bool uaccess_ttbr0_enable(void)
    109{
    110	if (!system_uses_ttbr0_pan())
    111		return false;
    112	__uaccess_ttbr0_enable();
    113	return true;
    114}
    115#else
    116static inline bool uaccess_ttbr0_disable(void)
    117{
    118	return false;
    119}
    120
    121static inline bool uaccess_ttbr0_enable(void)
    122{
    123	return false;
    124}
    125#endif
    126
    127static inline void __uaccess_disable_hw_pan(void)
    128{
    129	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
    130			CONFIG_ARM64_PAN));
    131}
    132
    133static inline void __uaccess_enable_hw_pan(void)
    134{
    135	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
    136			CONFIG_ARM64_PAN));
    137}
    138
    139/*
    140 * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
    141 * affects EL0 and TCF affects EL1 irrespective of which TTBR is
    142 * used.
    143 * The kernel accesses TTBR0 usually with LDTR/STTR instructions
    144 * when UAO is available, so these would act as EL0 accesses using
    145 * TCF0.
    146 * However futex.h code uses exclusives which would be executed as
    147 * EL1, this can potentially cause a tag check fault even if the
    148 * user disables TCF0.
    149 *
    150 * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
    151 * and reset it in uaccess_disable().
    152 *
    153 * The Tag check override (TCO) bit disables temporarily the tag checking
    154 * preventing the issue.
    155 */
    156static inline void __uaccess_disable_tco(void)
    157{
    158	asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
    159				 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
    160}
    161
    162static inline void __uaccess_enable_tco(void)
    163{
    164	asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
    165				 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
    166}
    167
    168/*
    169 * These functions disable tag checking only if in MTE async mode
    170 * since the sync mode generates exceptions synchronously and the
    171 * nofault or load_unaligned_zeropad can handle them.
    172 */
    173static inline void __uaccess_disable_tco_async(void)
    174{
    175	if (system_uses_mte_async_or_asymm_mode())
    176		 __uaccess_disable_tco();
    177}
    178
    179static inline void __uaccess_enable_tco_async(void)
    180{
    181	if (system_uses_mte_async_or_asymm_mode())
    182		__uaccess_enable_tco();
    183}
    184
    185static inline void uaccess_disable_privileged(void)
    186{
    187	__uaccess_disable_tco();
    188
    189	if (uaccess_ttbr0_disable())
    190		return;
    191
    192	__uaccess_enable_hw_pan();
    193}
    194
    195static inline void uaccess_enable_privileged(void)
    196{
    197	__uaccess_enable_tco();
    198
    199	if (uaccess_ttbr0_enable())
    200		return;
    201
    202	__uaccess_disable_hw_pan();
    203}
    204
    205/*
    206 * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
    207 * user address. In case the pointer is tagged (has the top byte set), untag
    208 * the pointer before checking.
    209 */
    210#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
    211static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
    212{
    213	void __user *safe_ptr;
    214
    215	asm volatile(
    216	"	bics	xzr, %3, %2\n"
    217	"	csel	%0, %1, xzr, eq\n"
    218	: "=&r" (safe_ptr)
    219	: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
    220	  "r" (untagged_addr(ptr))
    221	: "cc");
    222
    223	csdb();
    224	return safe_ptr;
    225}
    226
    227/*
    228 * The "__xxx" versions of the user access functions do not verify the address
    229 * space - it must have been done previously with a separate "access_ok()"
    230 * call.
    231 *
    232 * The "__xxx_error" versions set the third argument to -EFAULT if an error
    233 * occurs, and leave it unchanged on success.
    234 */
    235#define __get_mem_asm(load, reg, x, addr, err)				\
    236	asm volatile(							\
    237	"1:	" load "	" reg "1, [%2]\n"			\
    238	"2:\n"								\
    239	_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1)			\
    240	: "+r" (err), "=&r" (x)						\
    241	: "r" (addr))
    242
    243#define __raw_get_mem(ldr, x, ptr, err)					\
    244do {									\
    245	unsigned long __gu_val;						\
    246	switch (sizeof(*(ptr))) {					\
    247	case 1:								\
    248		__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err));	\
    249		break;							\
    250	case 2:								\
    251		__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err));	\
    252		break;							\
    253	case 4:								\
    254		__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err));	\
    255		break;							\
    256	case 8:								\
    257		__get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err));	\
    258		break;							\
    259	default:							\
    260		BUILD_BUG();						\
    261	}								\
    262	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
    263} while (0)
    264
    265/*
    266 * We must not call into the scheduler between uaccess_ttbr0_enable() and
    267 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
    268 * we must evaluate these outside of the critical section.
    269 */
    270#define __raw_get_user(x, ptr, err)					\
    271do {									\
    272	__typeof__(*(ptr)) __user *__rgu_ptr = (ptr);			\
    273	__typeof__(x) __rgu_val;					\
    274	__chk_user_ptr(ptr);						\
    275									\
    276	uaccess_ttbr0_enable();						\
    277	__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err);		\
    278	uaccess_ttbr0_disable();					\
    279									\
    280	(x) = __rgu_val;						\
    281} while (0)
    282
    283#define __get_user_error(x, ptr, err)					\
    284do {									\
    285	__typeof__(*(ptr)) __user *__p = (ptr);				\
    286	might_fault();							\
    287	if (access_ok(__p, sizeof(*__p))) {				\
    288		__p = uaccess_mask_ptr(__p);				\
    289		__raw_get_user((x), __p, (err));			\
    290	} else {							\
    291		(x) = (__force __typeof__(x))0; (err) = -EFAULT;	\
    292	}								\
    293} while (0)
    294
    295#define __get_user(x, ptr)						\
    296({									\
    297	int __gu_err = 0;						\
    298	__get_user_error((x), (ptr), __gu_err);				\
    299	__gu_err;							\
    300})
    301
    302#define get_user	__get_user
    303
    304/*
    305 * We must not call into the scheduler between __uaccess_enable_tco_async() and
    306 * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
    307 * functions, we must evaluate these outside of the critical section.
    308 */
    309#define __get_kernel_nofault(dst, src, type, err_label)			\
    310do {									\
    311	__typeof__(dst) __gkn_dst = (dst);				\
    312	__typeof__(src) __gkn_src = (src);				\
    313	int __gkn_err = 0;						\
    314									\
    315	__uaccess_enable_tco_async();					\
    316	__raw_get_mem("ldr", *((type *)(__gkn_dst)),			\
    317		      (__force type *)(__gkn_src), __gkn_err);		\
    318	__uaccess_disable_tco_async();					\
    319									\
    320	if (unlikely(__gkn_err))					\
    321		goto err_label;						\
    322} while (0)
    323
    324#define __put_mem_asm(store, reg, x, addr, err)				\
    325	asm volatile(							\
    326	"1:	" store "	" reg "1, [%2]\n"			\
    327	"2:\n"								\
    328	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)				\
    329	: "+r" (err)							\
    330	: "r" (x), "r" (addr))
    331
    332#define __raw_put_mem(str, x, ptr, err)					\
    333do {									\
    334	__typeof__(*(ptr)) __pu_val = (x);				\
    335	switch (sizeof(*(ptr))) {					\
    336	case 1:								\
    337		__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err));	\
    338		break;							\
    339	case 2:								\
    340		__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err));	\
    341		break;							\
    342	case 4:								\
    343		__put_mem_asm(str, "%w", __pu_val, (ptr), (err));	\
    344		break;							\
    345	case 8:								\
    346		__put_mem_asm(str, "%x", __pu_val, (ptr), (err));	\
    347		break;							\
    348	default:							\
    349		BUILD_BUG();						\
    350	}								\
    351} while (0)
    352
    353/*
    354 * We must not call into the scheduler between uaccess_ttbr0_enable() and
    355 * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
    356 * we must evaluate these outside of the critical section.
    357 */
    358#define __raw_put_user(x, ptr, err)					\
    359do {									\
    360	__typeof__(*(ptr)) __user *__rpu_ptr = (ptr);			\
    361	__typeof__(*(ptr)) __rpu_val = (x);				\
    362	__chk_user_ptr(__rpu_ptr);					\
    363									\
    364	uaccess_ttbr0_enable();						\
    365	__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err);		\
    366	uaccess_ttbr0_disable();					\
    367} while (0)
    368
    369#define __put_user_error(x, ptr, err)					\
    370do {									\
    371	__typeof__(*(ptr)) __user *__p = (ptr);				\
    372	might_fault();							\
    373	if (access_ok(__p, sizeof(*__p))) {				\
    374		__p = uaccess_mask_ptr(__p);				\
    375		__raw_put_user((x), __p, (err));			\
    376	} else	{							\
    377		(err) = -EFAULT;					\
    378	}								\
    379} while (0)
    380
    381#define __put_user(x, ptr)						\
    382({									\
    383	int __pu_err = 0;						\
    384	__put_user_error((x), (ptr), __pu_err);				\
    385	__pu_err;							\
    386})
    387
    388#define put_user	__put_user
    389
    390/*
    391 * We must not call into the scheduler between __uaccess_enable_tco_async() and
    392 * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
    393 * functions, we must evaluate these outside of the critical section.
    394 */
    395#define __put_kernel_nofault(dst, src, type, err_label)			\
    396do {									\
    397	__typeof__(dst) __pkn_dst = (dst);				\
    398	__typeof__(src) __pkn_src = (src);				\
    399	int __pkn_err = 0;						\
    400									\
    401	__uaccess_enable_tco_async();					\
    402	__raw_put_mem("str", *((type *)(__pkn_src)),			\
    403		      (__force type *)(__pkn_dst), __pkn_err);		\
    404	__uaccess_disable_tco_async();					\
    405									\
    406	if (unlikely(__pkn_err))					\
    407		goto err_label;						\
    408} while(0)
    409
    410extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
    411#define raw_copy_from_user(to, from, n)					\
    412({									\
    413	unsigned long __acfu_ret;					\
    414	uaccess_ttbr0_enable();						\
    415	__acfu_ret = __arch_copy_from_user((to),			\
    416				      __uaccess_mask_ptr(from), (n));	\
    417	uaccess_ttbr0_disable();					\
    418	__acfu_ret;							\
    419})
    420
    421extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
    422#define raw_copy_to_user(to, from, n)					\
    423({									\
    424	unsigned long __actu_ret;					\
    425	uaccess_ttbr0_enable();						\
    426	__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),	\
    427				    (from), (n));			\
    428	uaccess_ttbr0_disable();					\
    429	__actu_ret;							\
    430})
    431
    432#define INLINE_COPY_TO_USER
    433#define INLINE_COPY_FROM_USER
    434
    435extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
    436static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
    437{
    438	if (access_ok(to, n)) {
    439		uaccess_ttbr0_enable();
    440		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
    441		uaccess_ttbr0_disable();
    442	}
    443	return n;
    444}
    445#define clear_user	__clear_user
    446
    447extern long strncpy_from_user(char *dest, const char __user *src, long count);
    448
    449extern __must_check long strnlen_user(const char __user *str, long n);
    450
    451#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
    452struct page;
    453void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
    454extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
    455
    456static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
    457{
    458	kasan_check_write(dst, size);
    459	return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
    460}
    461#endif
    462
    463#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
    464
    465/*
    466 * Return 0 on success, the number of bytes not probed otherwise.
    467 */
    468static inline size_t probe_subpage_writeable(const char __user *uaddr,
    469					     size_t size)
    470{
    471	if (!system_supports_mte())
    472		return 0;
    473	return mte_probe_user_range(uaddr, size);
    474}
    475
    476#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
    477
    478#endif /* __ASM_UACCESS_H */