cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stub_64.h (2497B)


      1/*
      2 * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
      3 * Licensed under the GPL
      4 */
      5
      6#ifndef __SYSDEP_STUB_H
      7#define __SYSDEP_STUB_H
      8
      9#include <sysdep/ptrace_user.h>
     10#include <generated/asm-offsets.h>
     11
     12#define STUB_MMAP_NR __NR_mmap
     13#define MMAP_OFFSET(o) (o)
     14
     15#define __syscall_clobber "r11","rcx","memory"
     16#define __syscall "syscall"
     17
     18static inline long stub_syscall0(long syscall)
     19{
     20	long ret;
     21
     22	__asm__ volatile (__syscall
     23		: "=a" (ret)
     24		: "0" (syscall) : __syscall_clobber );
     25
     26	return ret;
     27}
     28
     29static inline long stub_syscall2(long syscall, long arg1, long arg2)
     30{
     31	long ret;
     32
     33	__asm__ volatile (__syscall
     34		: "=a" (ret)
     35		: "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
     36
     37	return ret;
     38}
     39
     40static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
     41{
     42	long ret;
     43
     44	__asm__ volatile (__syscall
     45		: "=a" (ret)
     46		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
     47		: __syscall_clobber );
     48
     49	return ret;
     50}
     51
     52static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
     53				 long arg4)
     54{
     55	long ret;
     56
     57	__asm__ volatile ("movq %5,%%r10 ; " __syscall
     58		: "=a" (ret)
     59		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
     60		  "g" (arg4)
     61		: __syscall_clobber, "r10" );
     62
     63	return ret;
     64}
     65
     66static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
     67				 long arg4, long arg5)
     68{
     69	long ret;
     70
     71	__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
     72		: "=a" (ret)
     73		: "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
     74		  "g" (arg4), "g" (arg5)
     75		: __syscall_clobber, "r10", "r8" );
     76
     77	return ret;
     78}
     79
     80static inline void trap_myself(void)
     81{
     82	__asm("int3");
     83}
     84
     85static inline void remap_stack_and_trap(void)
     86{
     87	__asm__ volatile (
     88		"movq %0,%%rax ;"
     89		"movq %%rsp,%%rdi ;"
     90		"andq %1,%%rdi ;"
     91		"movq %2,%%r10 ;"
     92		"movq %%rdi,%%r8 ; addq %3,%%r8 ; movq (%%r8),%%r8 ;"
     93		"movq %%rdi,%%r9 ; addq %4,%%r9 ; movq (%%r9),%%r9 ;"
     94		__syscall ";"
     95		"movq %%rsp,%%rdi ; andq %1,%%rdi ;"
     96		"addq %5,%%rdi ; movq %%rax, (%%rdi) ;"
     97		"int3"
     98		: :
     99		"g" (STUB_MMAP_NR),
    100		"g" (~(UM_KERN_PAGE_SIZE - 1)),
    101		"g" (MAP_FIXED | MAP_SHARED),
    102		"g" (UML_STUB_FIELD_FD),
    103		"g" (UML_STUB_FIELD_OFFSET),
    104		"g" (UML_STUB_FIELD_CHILD_ERR),
    105		"S" (UM_KERN_PAGE_SIZE),
    106		"d" (PROT_READ | PROT_WRITE)
    107		:
    108		__syscall_clobber, "r10", "r8", "r9");
    109}
    110
    111static __always_inline void *get_stub_page(void)
    112{
    113	unsigned long ret;
    114
    115	asm volatile (
    116		"movq %%rsp,%0 ;"
    117		"andq %1,%0"
    118		: "=a" (ret)
    119		: "g" (~(UM_KERN_PAGE_SIZE - 1)));
    120
    121	return (void *)ret;
    122}
    123#endif