cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sys_sparc_64.c (17415B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* linux/arch/sparc64/kernel/sys_sparc.c
      3 *
      4 * This file contains various random system calls that
      5 * have a non-standard calling sequence on the Linux/sparc
      6 * platform.
      7 */
      8
      9#include <linux/errno.h>
     10#include <linux/types.h>
     11#include <linux/sched/signal.h>
     12#include <linux/sched/mm.h>
     13#include <linux/sched/debug.h>
     14#include <linux/fs.h>
     15#include <linux/file.h>
     16#include <linux/mm.h>
     17#include <linux/sem.h>
     18#include <linux/msg.h>
     19#include <linux/shm.h>
     20#include <linux/stat.h>
     21#include <linux/mman.h>
     22#include <linux/utsname.h>
     23#include <linux/smp.h>
     24#include <linux/slab.h>
     25#include <linux/syscalls.h>
     26#include <linux/ipc.h>
     27#include <linux/personality.h>
     28#include <linux/random.h>
     29#include <linux/export.h>
     30#include <linux/context_tracking.h>
     31#include <linux/timex.h>
     32#include <linux/uaccess.h>
     33
     34#include <asm/utrap.h>
     35#include <asm/unistd.h>
     36
     37#include "entry.h"
     38#include "kernel.h"
     39#include "systbls.h"
     40
     41/* #define DEBUG_UNIMP_SYSCALL */
     42
     43SYSCALL_DEFINE0(getpagesize)
     44{
     45	return PAGE_SIZE;
     46}
     47
     48/* Does addr --> addr+len fall within 4GB of the VA-space hole or
     49 * overflow past the end of the 64-bit address space?
     50 */
     51static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
     52{
     53	unsigned long va_exclude_start, va_exclude_end;
     54
     55	va_exclude_start = VA_EXCLUDE_START;
     56	va_exclude_end   = VA_EXCLUDE_END;
     57
     58	if (unlikely(len >= va_exclude_start))
     59		return 1;
     60
     61	if (unlikely((addr + len) < addr))
     62		return 1;
     63
     64	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
     65		     ((addr + len) >= va_exclude_start &&
     66		      (addr + len) < va_exclude_end)))
     67		return 1;
     68
     69	return 0;
     70}
     71
     72/* These functions differ from the default implementations in
     73 * mm/mmap.c in two ways:
     74 *
     75 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
     76 *    for fixed such mappings we just validate what the user gave us.
     77 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
     78 *    the spitfire/niagara VA-hole.
     79 */
     80
     81static inline unsigned long COLOR_ALIGN(unsigned long addr,
     82					 unsigned long pgoff)
     83{
     84	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
     85	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
     86
     87	return base + off;
     88}
     89
     90unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
     91{
     92	struct mm_struct *mm = current->mm;
     93	struct vm_area_struct * vma;
     94	unsigned long task_size = TASK_SIZE;
     95	int do_color_align;
     96	struct vm_unmapped_area_info info;
     97
     98	if (flags & MAP_FIXED) {
     99		/* We do not accept a shared mapping if it would violate
    100		 * cache aliasing constraints.
    101		 */
    102		if ((flags & MAP_SHARED) &&
    103		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
    104			return -EINVAL;
    105		return addr;
    106	}
    107
    108	if (test_thread_flag(TIF_32BIT))
    109		task_size = STACK_TOP32;
    110	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
    111		return -ENOMEM;
    112
    113	do_color_align = 0;
    114	if (filp || (flags & MAP_SHARED))
    115		do_color_align = 1;
    116
    117	if (addr) {
    118		if (do_color_align)
    119			addr = COLOR_ALIGN(addr, pgoff);
    120		else
    121			addr = PAGE_ALIGN(addr);
    122
    123		vma = find_vma(mm, addr);
    124		if (task_size - len >= addr &&
    125		    (!vma || addr + len <= vm_start_gap(vma)))
    126			return addr;
    127	}
    128
    129	info.flags = 0;
    130	info.length = len;
    131	info.low_limit = TASK_UNMAPPED_BASE;
    132	info.high_limit = min(task_size, VA_EXCLUDE_START);
    133	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
    134	info.align_offset = pgoff << PAGE_SHIFT;
    135	addr = vm_unmapped_area(&info);
    136
    137	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
    138		VM_BUG_ON(addr != -ENOMEM);
    139		info.low_limit = VA_EXCLUDE_END;
    140		info.high_limit = task_size;
    141		addr = vm_unmapped_area(&info);
    142	}
    143
    144	return addr;
    145}
    146
    147unsigned long
    148arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
    149			  const unsigned long len, const unsigned long pgoff,
    150			  const unsigned long flags)
    151{
    152	struct vm_area_struct *vma;
    153	struct mm_struct *mm = current->mm;
    154	unsigned long task_size = STACK_TOP32;
    155	unsigned long addr = addr0;
    156	int do_color_align;
    157	struct vm_unmapped_area_info info;
    158
    159	/* This should only ever run for 32-bit processes.  */
    160	BUG_ON(!test_thread_flag(TIF_32BIT));
    161
    162	if (flags & MAP_FIXED) {
    163		/* We do not accept a shared mapping if it would violate
    164		 * cache aliasing constraints.
    165		 */
    166		if ((flags & MAP_SHARED) &&
    167		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
    168			return -EINVAL;
    169		return addr;
    170	}
    171
    172	if (unlikely(len > task_size))
    173		return -ENOMEM;
    174
    175	do_color_align = 0;
    176	if (filp || (flags & MAP_SHARED))
    177		do_color_align = 1;
    178
    179	/* requesting a specific address */
    180	if (addr) {
    181		if (do_color_align)
    182			addr = COLOR_ALIGN(addr, pgoff);
    183		else
    184			addr = PAGE_ALIGN(addr);
    185
    186		vma = find_vma(mm, addr);
    187		if (task_size - len >= addr &&
    188		    (!vma || addr + len <= vm_start_gap(vma)))
    189			return addr;
    190	}
    191
    192	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
    193	info.length = len;
    194	info.low_limit = PAGE_SIZE;
    195	info.high_limit = mm->mmap_base;
    196	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
    197	info.align_offset = pgoff << PAGE_SHIFT;
    198	addr = vm_unmapped_area(&info);
    199
    200	/*
    201	 * A failed mmap() very likely causes application failure,
    202	 * so fall back to the bottom-up function here. This scenario
    203	 * can happen with large stack limits and large mmap()
    204	 * allocations.
    205	 */
    206	if (addr & ~PAGE_MASK) {
    207		VM_BUG_ON(addr != -ENOMEM);
    208		info.flags = 0;
    209		info.low_limit = TASK_UNMAPPED_BASE;
    210		info.high_limit = STACK_TOP32;
    211		addr = vm_unmapped_area(&info);
    212	}
    213
    214	return addr;
    215}
    216
    217/* Try to align mapping such that we align it as much as possible. */
    218unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
    219{
    220	unsigned long align_goal, addr = -ENOMEM;
    221	unsigned long (*get_area)(struct file *, unsigned long,
    222				  unsigned long, unsigned long, unsigned long);
    223
    224	get_area = current->mm->get_unmapped_area;
    225
    226	if (flags & MAP_FIXED) {
    227		/* Ok, don't mess with it. */
    228		return get_area(NULL, orig_addr, len, pgoff, flags);
    229	}
    230	flags &= ~MAP_SHARED;
    231
    232	align_goal = PAGE_SIZE;
    233	if (len >= (4UL * 1024 * 1024))
    234		align_goal = (4UL * 1024 * 1024);
    235	else if (len >= (512UL * 1024))
    236		align_goal = (512UL * 1024);
    237	else if (len >= (64UL * 1024))
    238		align_goal = (64UL * 1024);
    239
    240	do {
    241		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
    242		if (!(addr & ~PAGE_MASK)) {
    243			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
    244			break;
    245		}
    246
    247		if (align_goal == (4UL * 1024 * 1024))
    248			align_goal = (512UL * 1024);
    249		else if (align_goal == (512UL * 1024))
    250			align_goal = (64UL * 1024);
    251		else
    252			align_goal = PAGE_SIZE;
    253	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
    254
    255	/* Mapping is smaller than 64K or larger areas could not
    256	 * be obtained.
    257	 */
    258	if (addr & ~PAGE_MASK)
    259		addr = get_area(NULL, orig_addr, len, pgoff, flags);
    260
    261	return addr;
    262}
    263EXPORT_SYMBOL(get_fb_unmapped_area);
    264
    265/* Essentially the same as PowerPC.  */
    266static unsigned long mmap_rnd(void)
    267{
    268	unsigned long rnd = 0UL;
    269
    270	if (current->flags & PF_RANDOMIZE) {
    271		unsigned long val = get_random_long();
    272		if (test_thread_flag(TIF_32BIT))
    273			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
    274		else
    275			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
    276	}
    277	return rnd << PAGE_SHIFT;
    278}
    279
    280void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
    281{
    282	unsigned long random_factor = mmap_rnd();
    283	unsigned long gap;
    284
    285	/*
    286	 * Fall back to the standard layout if the personality
    287	 * bit is set, or if the expected stack growth is unlimited:
    288	 */
    289	gap = rlim_stack->rlim_cur;
    290	if (!test_thread_flag(TIF_32BIT) ||
    291	    (current->personality & ADDR_COMPAT_LAYOUT) ||
    292	    gap == RLIM_INFINITY ||
    293	    sysctl_legacy_va_layout) {
    294		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
    295		mm->get_unmapped_area = arch_get_unmapped_area;
    296	} else {
    297		/* We know it's 32-bit */
    298		unsigned long task_size = STACK_TOP32;
    299
    300		if (gap < 128 * 1024 * 1024)
    301			gap = 128 * 1024 * 1024;
    302		if (gap > (task_size / 6 * 5))
    303			gap = (task_size / 6 * 5);
    304
    305		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
    306		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
    307	}
    308}
    309
    310/*
    311 * sys_pipe() is the normal C calling standard for creating
    312 * a pipe. It's not the way unix traditionally does this, though.
    313 */
    314SYSCALL_DEFINE0(sparc_pipe)
    315{
    316	int fd[2];
    317	int error;
    318
    319	error = do_pipe_flags(fd, 0);
    320	if (error)
    321		goto out;
    322	current_pt_regs()->u_regs[UREG_I1] = fd[1];
    323	error = fd[0];
    324out:
    325	return error;
    326}
    327
    328/*
    329 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
    330 *
    331 * This is really horribly ugly.
    332 */
    333
    334SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
    335		unsigned long, third, void __user *, ptr, long, fifth)
    336{
    337	long err;
    338
    339	if (!IS_ENABLED(CONFIG_SYSVIPC))
    340		return -ENOSYS;
    341
    342	/* No need for backward compatibility. We can start fresh... */
    343	if (call <= SEMTIMEDOP) {
    344		switch (call) {
    345		case SEMOP:
    346			err = ksys_semtimedop(first, ptr,
    347					      (unsigned int)second, NULL);
    348			goto out;
    349		case SEMTIMEDOP:
    350			err = ksys_semtimedop(first, ptr, (unsigned int)second,
    351				(const struct __kernel_timespec __user *)
    352					      (unsigned long) fifth);
    353			goto out;
    354		case SEMGET:
    355			err = ksys_semget(first, (int)second, (int)third);
    356			goto out;
    357		case SEMCTL: {
    358			err = ksys_old_semctl(first, second,
    359					      (int)third | IPC_64,
    360					      (unsigned long) ptr);
    361			goto out;
    362		}
    363		default:
    364			err = -ENOSYS;
    365			goto out;
    366		}
    367	}
    368	if (call <= MSGCTL) {
    369		switch (call) {
    370		case MSGSND:
    371			err = ksys_msgsnd(first, ptr, (size_t)second,
    372					 (int)third);
    373			goto out;
    374		case MSGRCV:
    375			err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
    376					 (int)third);
    377			goto out;
    378		case MSGGET:
    379			err = ksys_msgget((key_t)first, (int)second);
    380			goto out;
    381		case MSGCTL:
    382			err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
    383			goto out;
    384		default:
    385			err = -ENOSYS;
    386			goto out;
    387		}
    388	}
    389	if (call <= SHMCTL) {
    390		switch (call) {
    391		case SHMAT: {
    392			ulong raddr;
    393			err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
    394			if (!err) {
    395				if (put_user(raddr,
    396					     (ulong __user *) third))
    397					err = -EFAULT;
    398			}
    399			goto out;
    400		}
    401		case SHMDT:
    402			err = ksys_shmdt(ptr);
    403			goto out;
    404		case SHMGET:
    405			err = ksys_shmget(first, (size_t)second, (int)third);
    406			goto out;
    407		case SHMCTL:
    408			err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
    409			goto out;
    410		default:
    411			err = -ENOSYS;
    412			goto out;
    413		}
    414	} else {
    415		err = -ENOSYS;
    416	}
    417out:
    418	return err;
    419}
    420
    421SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
    422{
    423	long ret;
    424
    425	if (personality(current->personality) == PER_LINUX32 &&
    426	    personality(personality) == PER_LINUX)
    427		personality |= PER_LINUX32;
    428	ret = sys_personality(personality);
    429	if (personality(ret) == PER_LINUX32)
    430		ret &= ~PER_LINUX32;
    431
    432	return ret;
    433}
    434
    435int sparc_mmap_check(unsigned long addr, unsigned long len)
    436{
    437	if (test_thread_flag(TIF_32BIT)) {
    438		if (len >= STACK_TOP32)
    439			return -EINVAL;
    440
    441		if (addr > STACK_TOP32 - len)
    442			return -EINVAL;
    443	} else {
    444		if (len >= VA_EXCLUDE_START)
    445			return -EINVAL;
    446
    447		if (invalid_64bit_range(addr, len))
    448			return -EINVAL;
    449	}
    450
    451	return 0;
    452}
    453
    454/* Linux version of mmap */
    455SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
    456		unsigned long, prot, unsigned long, flags, unsigned long, fd,
    457		unsigned long, off)
    458{
    459	unsigned long retval = -EINVAL;
    460
    461	if ((off + PAGE_ALIGN(len)) < off)
    462		goto out;
    463	if (off & ~PAGE_MASK)
    464		goto out;
    465	retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
    466out:
    467	return retval;
    468}
    469
    470SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
    471{
    472	if (invalid_64bit_range(addr, len))
    473		return -EINVAL;
    474
    475	return vm_munmap(addr, len);
    476}
    477                
    478SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
    479		unsigned long, new_len, unsigned long, flags,
    480		unsigned long, new_addr)
    481{
    482	if (test_thread_flag(TIF_32BIT))
    483		return -EINVAL;
    484	return sys_mremap(addr, old_len, new_len, flags, new_addr);
    485}
    486
    487SYSCALL_DEFINE0(nis_syscall)
    488{
    489	static int count;
    490	struct pt_regs *regs = current_pt_regs();
    491	
    492	/* Don't make the system unusable, if someone goes stuck */
    493	if (count++ > 5)
    494		return -ENOSYS;
    495
    496	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
    497#ifdef DEBUG_UNIMP_SYSCALL	
    498	show_regs (regs);
    499#endif
    500
    501	return -ENOSYS;
    502}
    503
    504/* #define DEBUG_SPARC_BREAKPOINT */
    505
    506asmlinkage void sparc_breakpoint(struct pt_regs *regs)
    507{
    508	enum ctx_state prev_state = exception_enter();
    509
    510	if (test_thread_flag(TIF_32BIT)) {
    511		regs->tpc &= 0xffffffff;
    512		regs->tnpc &= 0xffffffff;
    513	}
    514#ifdef DEBUG_SPARC_BREAKPOINT
    515        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
    516#endif
    517	force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc);
    518#ifdef DEBUG_SPARC_BREAKPOINT
    519	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
    520#endif
    521	exception_exit(prev_state);
    522}
    523
    524SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
    525{
    526	int nlen, err;
    527	char tmp[__NEW_UTS_LEN + 1];
    528
    529	if (len < 0)
    530		return -EINVAL;
    531
    532	down_read(&uts_sem);
    533
    534	nlen = strlen(utsname()->domainname) + 1;
    535	err = -EINVAL;
    536	if (nlen > len)
    537		goto out_unlock;
    538	memcpy(tmp, utsname()->domainname, nlen);
    539
    540	up_read(&uts_sem);
    541
    542	if (copy_to_user(name, tmp, nlen))
    543		return -EFAULT;
    544	return 0;
    545
    546out_unlock:
    547	up_read(&uts_sem);
    548	return err;
    549}
    550
    551SYSCALL_DEFINE1(sparc_adjtimex, struct __kernel_timex __user *, txc_p)
    552{
    553	struct __kernel_timex txc;
    554	struct __kernel_old_timeval *tv = (void *)&txc.time;
    555	int ret;
    556
    557	/* Copy the user data space into the kernel copy
    558	 * structure. But bear in mind that the structures
    559	 * may change
    560	 */
    561	if (copy_from_user(&txc, txc_p, sizeof(txc)))
    562		return -EFAULT;
    563
    564	/*
    565	 * override for sparc64 specific timeval type: tv_usec
    566	 * is 32 bit wide instead of 64-bit in __kernel_timex
    567	 */
    568	txc.time.tv_usec = tv->tv_usec;
    569	ret = do_adjtimex(&txc);
    570	tv->tv_usec = txc.time.tv_usec;
    571
    572	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
    573}
    574
    575SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,
    576		struct __kernel_timex __user *, txc_p)
    577{
    578	struct __kernel_timex txc;
    579	struct __kernel_old_timeval *tv = (void *)&txc.time;
    580	int ret;
    581
    582	if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
    583		pr_err_once("process %d (%s) attempted a POSIX timer syscall "
    584		    "while CONFIG_POSIX_TIMERS is not set\n",
    585		    current->pid, current->comm);
    586
    587		return -ENOSYS;
    588	}
    589
    590	/* Copy the user data space into the kernel copy
    591	 * structure. But bear in mind that the structures
    592	 * may change
    593	 */
    594	if (copy_from_user(&txc, txc_p, sizeof(txc)))
    595		return -EFAULT;
    596
    597	/*
    598	 * override for sparc64 specific timeval type: tv_usec
    599	 * is 32 bit wide instead of 64-bit in __kernel_timex
    600	 */
    601	txc.time.tv_usec = tv->tv_usec;
    602	ret = do_clock_adjtime(which_clock, &txc);
    603	tv->tv_usec = txc.time.tv_usec;
    604
    605	return copy_to_user(txc_p, &txc, sizeof(txc)) ? -EFAULT : ret;
    606}
    607
    608SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
    609		utrap_handler_t, new_p, utrap_handler_t, new_d,
    610		utrap_handler_t __user *, old_p,
    611		utrap_handler_t __user *, old_d)
    612{
    613	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
    614		return -EINVAL;
    615	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
    616		if (old_p) {
    617			if (!current_thread_info()->utraps) {
    618				if (put_user(NULL, old_p))
    619					return -EFAULT;
    620			} else {
    621				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
    622					return -EFAULT;
    623			}
    624		}
    625		if (old_d) {
    626			if (put_user(NULL, old_d))
    627				return -EFAULT;
    628		}
    629		return 0;
    630	}
    631	if (!current_thread_info()->utraps) {
    632		current_thread_info()->utraps =
    633			kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
    634				GFP_KERNEL);
    635		if (!current_thread_info()->utraps)
    636			return -ENOMEM;
    637		current_thread_info()->utraps[0] = 1;
    638	} else {
    639		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
    640		    current_thread_info()->utraps[0] > 1) {
    641			unsigned long *p = current_thread_info()->utraps;
    642
    643			current_thread_info()->utraps =
    644				kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
    645					      sizeof(long),
    646					      GFP_KERNEL);
    647			if (!current_thread_info()->utraps) {
    648				current_thread_info()->utraps = p;
    649				return -ENOMEM;
    650			}
    651			p[0]--;
    652			current_thread_info()->utraps[0] = 1;
    653			memcpy(current_thread_info()->utraps+1, p+1,
    654			       UT_TRAP_INSTRUCTION_31*sizeof(long));
    655		}
    656	}
    657	if (old_p) {
    658		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
    659			return -EFAULT;
    660	}
    661	if (old_d) {
    662		if (put_user(NULL, old_d))
    663			return -EFAULT;
    664	}
    665	current_thread_info()->utraps[type] = (long)new_p;
    666
    667	return 0;
    668}
    669
    670SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
    671{
    672	struct pt_regs *regs = current_pt_regs();
    673	if (model >= 3)
    674		return -EINVAL;
    675	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
    676	return 0;
    677}
    678
    679SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
    680		struct sigaction __user *, oact, void __user *, restorer,
    681		size_t, sigsetsize)
    682{
    683	struct k_sigaction new_ka, old_ka;
    684	int ret;
    685
    686	/* XXX: Don't preclude handling different sized sigset_t's.  */
    687	if (sigsetsize != sizeof(sigset_t))
    688		return -EINVAL;
    689
    690	if (act) {
    691		new_ka.ka_restorer = restorer;
    692		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
    693			return -EFAULT;
    694	}
    695
    696	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
    697
    698	if (!ret && oact) {
    699		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
    700			return -EFAULT;
    701	}
    702
    703	return ret;
    704}
    705
    706SYSCALL_DEFINE0(kern_features)
    707{
    708	return KERN_FEATURE_MIXED_MODE_STACK;
    709}