cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

futex.h (2469B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Copyright (c) 2006  Ralf Baechle (ralf@linux-mips.org)
      4 * Copyright (c) 2018  Jim Wilson (jimw@sifive.com)
      5 */
      6
      7#ifndef _ASM_RISCV_FUTEX_H
      8#define _ASM_RISCV_FUTEX_H
      9
     10#include <linux/futex.h>
     11#include <linux/uaccess.h>
     12#include <linux/errno.h>
     13#include <asm/asm.h>
     14#include <asm/asm-extable.h>
     15
     16/* We don't even really need the extable code, but for now keep it simple */
     17#ifndef CONFIG_MMU
     18#define __enable_user_access()		do { } while (0)
     19#define __disable_user_access()		do { } while (0)
     20#endif
     21
     22#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)	\
     23{								\
     24	__enable_user_access();					\
     25	__asm__ __volatile__ (					\
     26	"1:	" insn "				\n"	\
     27	"2:						\n"	\
     28	_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r])			\
     29	: [r] "+r" (ret), [ov] "=&r" (oldval),			\
     30	  [u] "+m" (*uaddr)					\
     31	: [op] "Jr" (oparg)					\
     32	: "memory");						\
     33	__disable_user_access();				\
     34}
     35
     36static inline int
     37arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
     38{
     39	int oldval = 0, ret = 0;
     40
     41	if (!access_ok(uaddr, sizeof(u32)))
     42		return -EFAULT;
     43
     44	switch (op) {
     45	case FUTEX_OP_SET:
     46		__futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
     47				  ret, oldval, uaddr, oparg);
     48		break;
     49	case FUTEX_OP_ADD:
     50		__futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
     51				  ret, oldval, uaddr, oparg);
     52		break;
     53	case FUTEX_OP_OR:
     54		__futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
     55				  ret, oldval, uaddr, oparg);
     56		break;
     57	case FUTEX_OP_ANDN:
     58		__futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
     59				  ret, oldval, uaddr, ~oparg);
     60		break;
     61	case FUTEX_OP_XOR:
     62		__futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
     63				  ret, oldval, uaddr, oparg);
     64		break;
     65	default:
     66		ret = -ENOSYS;
     67	}
     68
     69	if (!ret)
     70		*oval = oldval;
     71
     72	return ret;
     73}
     74
     75static inline int
     76futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
     77			      u32 oldval, u32 newval)
     78{
     79	int ret = 0;
     80	u32 val;
     81	uintptr_t tmp;
     82
     83	if (!access_ok(uaddr, sizeof(u32)))
     84		return -EFAULT;
     85
     86	__enable_user_access();
     87	__asm__ __volatile__ (
     88	"1:	lr.w.aqrl %[v],%[u]			\n"
     89	"	bne %[v],%z[ov],3f			\n"
     90	"2:	sc.w.aqrl %[t],%z[nv],%[u]		\n"
     91	"	bnez %[t],1b				\n"
     92	"3:						\n"
     93		_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r])	\
     94		_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r])	\
     95	: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
     96	: [ov] "Jr" (oldval), [nv] "Jr" (newval)
     97	: "memory");
     98	__disable_user_access();
     99
    100	*uval = val;
    101	return ret;
    102}
    103
    104#endif /* _ASM_RISCV_FUTEX_H */