cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_context.h (3624B)


      1/*
      2 * Switch an MMU context.
      3 *
      4 * This file is subject to the terms and conditions of the GNU General Public
      5 * License.  See the file "COPYING" in the main directory of this archive
      6 * for more details.
      7 *
      8 * Copyright (C) 2001 - 2013 Tensilica Inc.
      9 */
     10
     11#ifndef _XTENSA_MMU_CONTEXT_H
     12#define _XTENSA_MMU_CONTEXT_H
     13
     14#ifndef CONFIG_MMU
     15#include <asm/nommu_context.h>
     16#else
     17
     18#include <linux/stringify.h>
     19#include <linux/sched.h>
     20#include <linux/mm_types.h>
     21#include <linux/pgtable.h>
     22
     23#include <asm/vectors.h>
     24
     25#include <asm/cacheflush.h>
     26#include <asm/tlbflush.h>
     27#include <asm-generic/mm_hooks.h>
     28#include <asm-generic/percpu.h>
     29
     30#if (XCHAL_HAVE_TLBS != 1)
     31# error "Linux must have an MMU!"
     32#endif
     33
     34DECLARE_PER_CPU(unsigned long, asid_cache);
     35#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
     36
     37/*
     38 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
     39 * any user or kernel context.  We use the reserved values in the
     40 * ASID_INSERT macro below.
     41 *
     42 * 0 invalid
     43 * 1 kernel
     44 * 2 reserved
     45 * 3 reserved
     46 * 4...255 available
     47 */
     48
     49#define NO_CONTEXT	0
     50#define ASID_USER_FIRST	4
     51#define ASID_MASK	((1 << XCHAL_MMU_ASID_BITS) - 1)
     52#define ASID_INSERT(x)	(0x03020001 | (((x) & ASID_MASK) << 8))
     53
     54void init_mmu(void);
     55void init_kio(void);
     56
     57static inline void set_rasid_register (unsigned long val)
     58{
     59	__asm__ __volatile__ (" wsr %0, rasid\n\t"
     60			      " isync\n" : : "a" (val));
     61}
     62
     63static inline unsigned long get_rasid_register (void)
     64{
     65	unsigned long tmp;
     66	__asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
     67	return tmp;
     68}
     69
     70static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
     71{
     72	unsigned long asid = cpu_asid_cache(cpu);
     73	if ((++asid & ASID_MASK) == 0) {
     74		/*
     75		 * Start new asid cycle; continue counting with next
     76		 * incarnation bits; skipping over 0, 1, 2, 3.
     77		 */
     78		local_flush_tlb_all();
     79		asid += ASID_USER_FIRST;
     80	}
     81	cpu_asid_cache(cpu) = asid;
     82	mm->context.asid[cpu] = asid;
     83	mm->context.cpu = cpu;
     84}
     85
     86static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
     87{
     88	/*
     89	 * Check if our ASID is of an older version and thus invalid.
     90	 */
     91
     92	if (mm) {
     93		unsigned long asid = mm->context.asid[cpu];
     94
     95		if (asid == NO_CONTEXT ||
     96				((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
     97			get_new_mmu_context(mm, cpu);
     98	}
     99}
    100
    101static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
    102{
    103	get_mmu_context(mm, cpu);
    104	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
    105	invalidate_page_directory();
    106}
    107
    108/*
    109 * Initialize the context related info for a new mm_struct
    110 * instance.  Valid cpu values are 0..(NR_CPUS-1), so initializing
    111 * to -1 says the process has never run on any core.
    112 */
    113
    114#define init_new_context init_new_context
    115static inline int init_new_context(struct task_struct *tsk,
    116		struct mm_struct *mm)
    117{
    118	int cpu;
    119	for_each_possible_cpu(cpu) {
    120		mm->context.asid[cpu] = NO_CONTEXT;
    121	}
    122	mm->context.cpu = -1;
    123	return 0;
    124}
    125
    126static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
    127			     struct task_struct *tsk)
    128{
    129	unsigned int cpu = smp_processor_id();
    130	int migrated = next->context.cpu != cpu;
    131	/* Flush the icache if we migrated to a new core. */
    132	if (migrated) {
    133		__invalidate_icache_all();
    134		next->context.cpu = cpu;
    135	}
    136	if (migrated || prev != next)
    137		activate_context(next, cpu);
    138}
    139
    140/*
    141 * Destroy context related info for an mm_struct that is about
    142 * to be put to rest.
    143 */
    144#define destroy_context destroy_context
    145static inline void destroy_context(struct mm_struct *mm)
    146{
    147	invalidate_page_directory();
    148}
    149
    150
    151#include <asm-generic/mmu_context.h>
    152
    153#endif /* CONFIG_MMU */
    154#endif /* _XTENSA_MMU_CONTEXT_H */