cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mte-kasan.h (3755B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Copyright (C) 2020 ARM Ltd.
      4 */
      5#ifndef __ASM_MTE_KASAN_H
      6#define __ASM_MTE_KASAN_H
      7
      8#include <asm/compiler.h>
      9#include <asm/cputype.h>
     10#include <asm/mte-def.h>
     11
     12#ifndef __ASSEMBLY__
     13
     14#include <linux/types.h>
     15
     16#ifdef CONFIG_ARM64_MTE
     17
     18/*
     19 * These functions are meant to be only used from KASAN runtime through
     20 * the arch_*() interface defined in asm/memory.h.
     21 * These functions don't include system_supports_mte() checks,
     22 * as KASAN only calls them when MTE is supported and enabled.
     23 */
     24
     25static inline u8 mte_get_ptr_tag(void *ptr)
     26{
     27	/* Note: The format of KASAN tags is 0xF<x> */
     28	u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT);
     29
     30	return tag;
     31}
     32
     33/* Get allocation tag for the address. */
     34static inline u8 mte_get_mem_tag(void *addr)
     35{
     36	asm(__MTE_PREAMBLE "ldg %0, [%0]"
     37		: "+r" (addr));
     38
     39	return mte_get_ptr_tag(addr);
     40}
     41
     42/* Generate a random tag. */
     43static inline u8 mte_get_random_tag(void)
     44{
     45	void *addr;
     46
     47	asm(__MTE_PREAMBLE "irg %0, %0"
     48		: "=r" (addr));
     49
     50	return mte_get_ptr_tag(addr);
     51}
     52
     53static inline u64 __stg_post(u64 p)
     54{
     55	asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
     56		     : "+r"(p)
     57		     :
     58		     : "memory");
     59	return p;
     60}
     61
     62static inline u64 __stzg_post(u64 p)
     63{
     64	asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
     65		     : "+r"(p)
     66		     :
     67		     : "memory");
     68	return p;
     69}
     70
     71static inline void __dc_gva(u64 p)
     72{
     73	asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
     74}
     75
     76static inline void __dc_gzva(u64 p)
     77{
     78	asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
     79}
     80
     81/*
     82 * Assign allocation tags for a region of memory based on the pointer tag.
     83 * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
     84 * size must be MTE_GRANULE_SIZE aligned.
     85 */
     86static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
     87					 bool init)
     88{
     89	u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
     90
     91	/* Read DC G(Z)VA block size from the system register. */
     92	dczid = read_cpuid(DCZID_EL0);
     93	dczid_bs = 4ul << (dczid & 0xf);
     94	dczid_dzp = (dczid >> 4) & 1;
     95
     96	curr = (u64)__tag_set(addr, tag);
     97	mask = dczid_bs - 1;
     98	/* STG/STZG up to the end of the first block. */
     99	end1 = curr | mask;
    100	end3 = curr + size;
    101	/* DC GVA / GZVA in [end1, end2) */
    102	end2 = end3 & ~mask;
    103
    104	/*
    105	 * The following code uses STG on the first DC GVA block even if the
    106	 * start address is aligned - it appears to be faster than an alignment
    107	 * check + conditional branch. Also, if the range size is at least 2 DC
    108	 * GVA blocks, the first two loops can use post-condition to save one
    109	 * branch each.
    110	 */
    111#define SET_MEMTAG_RANGE(stg_post, dc_gva)		\
    112	do {						\
    113		if (!dczid_dzp && size >= 2 * dczid_bs) {\
    114			do {				\
    115				curr = stg_post(curr);	\
    116			} while (curr < end1);		\
    117							\
    118			do {				\
    119				dc_gva(curr);		\
    120				curr += dczid_bs;	\
    121			} while (curr < end2);		\
    122		}					\
    123							\
    124		while (curr < end3)			\
    125			curr = stg_post(curr);		\
    126	} while (0)
    127
    128	if (init)
    129		SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
    130	else
    131		SET_MEMTAG_RANGE(__stg_post, __dc_gva);
    132#undef SET_MEMTAG_RANGE
    133}
    134
    135void mte_enable_kernel_sync(void);
    136void mte_enable_kernel_async(void);
    137void mte_enable_kernel_asymm(void);
    138
    139#else /* CONFIG_ARM64_MTE */
    140
    141static inline u8 mte_get_ptr_tag(void *ptr)
    142{
    143	return 0xFF;
    144}
    145
    146static inline u8 mte_get_mem_tag(void *addr)
    147{
    148	return 0xFF;
    149}
    150
    151static inline u8 mte_get_random_tag(void)
    152{
    153	return 0xFF;
    154}
    155
    156static inline void mte_set_mem_tag_range(void *addr, size_t size,
    157						u8 tag, bool init)
    158{
    159}
    160
    161static inline void mte_enable_kernel_sync(void)
    162{
    163}
    164
    165static inline void mte_enable_kernel_async(void)
    166{
    167}
    168
    169static inline void mte_enable_kernel_asymm(void)
    170{
    171}
    172
    173#endif /* CONFIG_ARM64_MTE */
    174
    175#endif /* __ASSEMBLY__ */
    176
    177#endif /* __ASM_MTE_KASAN_H  */