cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu.h (2504B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012 ARM Ltd.
      4 */
      5#ifndef __ASM_MMU_H
      6#define __ASM_MMU_H
      7
      8#include <asm/cputype.h>
      9
     10#define MMCF_AARCH32	0x1	/* mm context flag for AArch32 executables */
     11#define USER_ASID_BIT	48
     12#define USER_ASID_FLAG	(UL(1) << USER_ASID_BIT)
     13#define TTBR_ASID_MASK	(UL(0xffff) << 48)
     14
     15#ifndef __ASSEMBLY__
     16
     17#include <linux/refcount.h>
     18#include <asm/cpufeature.h>
     19
     20typedef struct {
     21	atomic64_t	id;
     22#ifdef CONFIG_COMPAT
     23	void		*sigpage;
     24#endif
     25	refcount_t	pinned;
     26	void		*vdso;
     27	unsigned long	flags;
     28} mm_context_t;
     29
     30/*
     31 * We use atomic64_read() here because the ASID for an 'mm_struct' can
     32 * be reallocated when scheduling one of its threads following a
     33 * rollover event (see new_context() and flush_context()). In this case,
     34 * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush())
     35 * may use a stale ASID. This is fine in principle as the new ASID is
     36 * guaranteed to be clean in the TLB, but the TLBI routines have to take
     37 * care to handle the following race:
     38 *
     39 *    CPU 0                    CPU 1                          CPU 2
     40 *
     41 *    // ptep_clear_flush(mm)
     42 *    xchg_relaxed(pte, 0)
     43 *    DSB ISHST
     44 *    old = ASID(mm)
     45 *         |                                                  <rollover>
     46 *         |                   new = new_context(mm)
     47 *         \-----------------> atomic_set(mm->context.id, new)
     48 *                             cpu_switch_mm(mm)
     49 *                             // Hardware walk of pte using new ASID
     50 *    TLBI(old)
     51 *
     52 * In this scenario, the barrier on CPU 0 and the dependency on CPU 1
     53 * ensure that the page-table walker on CPU 1 *must* see the invalid PTE
     54 * written by CPU 0.
     55 */
     56#define ASID(mm)	(atomic64_read(&(mm)->context.id) & 0xffff)
     57
     58static inline bool arm64_kernel_unmapped_at_el0(void)
     59{
     60	return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
     61}
     62
     63extern void arm64_memblock_init(void);
     64extern void paging_init(void);
     65extern void bootmem_init(void);
     66extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
     67extern void init_mem_pgprot(void);
     68extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
     69			       unsigned long virt, phys_addr_t size,
     70			       pgprot_t prot, bool page_mappings_only);
     71extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
     72extern void mark_linear_text_alias_ro(void);
     73extern bool kaslr_requires_kpti(void);
     74
     75#define INIT_MM_CONTEXT(name)	\
     76	.pgd = init_pg_dir,
     77
     78#endif	/* !__ASSEMBLY__ */
     79#endif