cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

string_64.h (2852B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_STRING_64_H
      3#define _ASM_X86_STRING_64_H
      4
      5#ifdef __KERNEL__
      6#include <linux/jump_label.h>
      7
      8/* Written 2002 by Andi Kleen */
      9
     10/* Even with __builtin_ the compiler may decide to use the out of line
     11   function. */
     12
     13#define __HAVE_ARCH_MEMCPY 1
     14extern void *memcpy(void *to, const void *from, size_t len);
     15extern void *__memcpy(void *to, const void *from, size_t len);
     16
     17#define __HAVE_ARCH_MEMSET
     18void *memset(void *s, int c, size_t n);
     19void *__memset(void *s, int c, size_t n);
     20
     21#define __HAVE_ARCH_MEMSET16
     22static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
     23{
     24	long d0, d1;
     25	asm volatile("rep\n\t"
     26		     "stosw"
     27		     : "=&c" (d0), "=&D" (d1)
     28		     : "a" (v), "1" (s), "0" (n)
     29		     : "memory");
     30	return s;
     31}
     32
     33#define __HAVE_ARCH_MEMSET32
     34static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
     35{
     36	long d0, d1;
     37	asm volatile("rep\n\t"
     38		     "stosl"
     39		     : "=&c" (d0), "=&D" (d1)
     40		     : "a" (v), "1" (s), "0" (n)
     41		     : "memory");
     42	return s;
     43}
     44
     45#define __HAVE_ARCH_MEMSET64
     46static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
     47{
     48	long d0, d1;
     49	asm volatile("rep\n\t"
     50		     "stosq"
     51		     : "=&c" (d0), "=&D" (d1)
     52		     : "a" (v), "1" (s), "0" (n)
     53		     : "memory");
     54	return s;
     55}
     56
     57#define __HAVE_ARCH_MEMMOVE
     58void *memmove(void *dest, const void *src, size_t count);
     59void *__memmove(void *dest, const void *src, size_t count);
     60
     61int memcmp(const void *cs, const void *ct, size_t count);
     62size_t strlen(const char *s);
     63char *strcpy(char *dest, const char *src);
     64char *strcat(char *dest, const char *src);
     65int strcmp(const char *cs, const char *ct);
     66
     67#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
     68
     69/*
     70 * For files that not instrumented (e.g. mm/slub.c) we
     71 * should use not instrumented version of mem* functions.
     72 */
     73
     74#undef memcpy
     75#define memcpy(dst, src, len) __memcpy(dst, src, len)
     76#define memmove(dst, src, len) __memmove(dst, src, len)
     77#define memset(s, c, n) __memset(s, c, n)
     78
     79#ifndef __NO_FORTIFY
     80#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
     81#endif
     82
     83#endif
     84
     85#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
     86#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
     87void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
     88static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
     89{
     90	if (__builtin_constant_p(cnt)) {
     91		switch (cnt) {
     92			case 4:
     93				asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
     94				return;
     95			case 8:
     96				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
     97				return;
     98			case 16:
     99				asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
    100				asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
    101				return;
    102		}
    103	}
    104	__memcpy_flushcache(dst, src, cnt);
    105}
    106#endif
    107
    108#endif /* __KERNEL__ */
    109
    110#endif /* _ASM_X86_STRING_64_H */