cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_context.h (7207B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __M68K_MMU_CONTEXT_H
      3#define __M68K_MMU_CONTEXT_H
      4
      5#include <asm-generic/mm_hooks.h>
      6#include <linux/mm_types.h>
      7
      8#ifdef CONFIG_MMU
      9
     10#if defined(CONFIG_COLDFIRE)
     11
     12#include <asm/atomic.h>
     13#include <asm/bitops.h>
     14#include <asm/mcfmmu.h>
     15#include <asm/mmu.h>
     16
     17#define NO_CONTEXT		256
     18#define LAST_CONTEXT		255
     19#define FIRST_CONTEXT		1
     20
     21extern unsigned long context_map[];
     22extern mm_context_t next_mmu_context;
     23
     24extern atomic_t nr_free_contexts;
     25extern struct mm_struct *context_mm[LAST_CONTEXT+1];
     26extern void steal_context(void);
     27
     28static inline void get_mmu_context(struct mm_struct *mm)
     29{
     30	mm_context_t ctx;
     31
     32	if (mm->context != NO_CONTEXT)
     33		return;
     34	while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
     35		atomic_inc(&nr_free_contexts);
     36		steal_context();
     37	}
     38	ctx = next_mmu_context;
     39	while (test_and_set_bit(ctx, context_map)) {
     40		ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
     41		if (ctx > LAST_CONTEXT)
     42			ctx = 0;
     43	}
     44	next_mmu_context = (ctx + 1) & LAST_CONTEXT;
     45	mm->context = ctx;
     46	context_mm[ctx] = mm;
     47}
     48
     49/*
     50 * Set up the context for a new address space.
     51 */
     52#define init_new_context(tsk, mm)	(((mm)->context = NO_CONTEXT), 0)
     53
     54/*
     55 * We're finished using the context for an address space.
     56 */
     57#define destroy_context destroy_context
     58static inline void destroy_context(struct mm_struct *mm)
     59{
     60	if (mm->context != NO_CONTEXT) {
     61		clear_bit(mm->context, context_map);
     62		mm->context = NO_CONTEXT;
     63		atomic_inc(&nr_free_contexts);
     64	}
     65}
     66
     67static inline void set_context(mm_context_t context, pgd_t *pgd)
     68{
     69	__asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
     70}
     71
     72static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
     73	struct task_struct *tsk)
     74{
     75	get_mmu_context(tsk->mm);
     76	set_context(tsk->mm->context, next->pgd);
     77}
     78
     79/*
     80 * After we have set current->mm to a new value, this activates
     81 * the context for the new mm so we see the new mappings.
     82 */
     83#define activate_mm activate_mm
     84static inline void activate_mm(struct mm_struct *active_mm,
     85	struct mm_struct *mm)
     86{
     87	get_mmu_context(mm);
     88	set_context(mm->context, mm->pgd);
     89}
     90
     91#define prepare_arch_switch(next) load_ksp_mmu(next)
     92
     93static inline void load_ksp_mmu(struct task_struct *task)
     94{
     95	unsigned long flags;
     96	struct mm_struct *mm;
     97	int asid;
     98	pgd_t *pgd;
     99	p4d_t *p4d;
    100	pud_t *pud;
    101	pmd_t *pmd;
    102	pte_t *pte;
    103	unsigned long mmuar;
    104
    105	local_irq_save(flags);
    106	mmuar = task->thread.ksp;
    107
    108	/* Search for a valid TLB entry, if one is found, don't remap */
    109	mmu_write(MMUAR, mmuar);
    110	mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
    111	if (mmu_read(MMUSR) & MMUSR_HIT)
    112		goto end;
    113
    114	if (mmuar >= PAGE_OFFSET) {
    115		mm = &init_mm;
    116	} else {
    117		pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
    118		mm = task->mm;
    119	}
    120
    121	if (!mm)
    122		goto bug;
    123
    124	pgd = pgd_offset(mm, mmuar);
    125	if (pgd_none(*pgd))
    126		goto bug;
    127
    128	p4d = p4d_offset(pgd, mmuar);
    129	if (p4d_none(*p4d))
    130		goto bug;
    131
    132	pud = pud_offset(p4d, mmuar);
    133	if (pud_none(*pud))
    134		goto bug;
    135
    136	pmd = pmd_offset(pud, mmuar);
    137	if (pmd_none(*pmd))
    138		goto bug;
    139
    140	pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
    141				     : pte_offset_map(pmd, mmuar);
    142	if (pte_none(*pte) || !pte_present(*pte))
    143		goto bug;
    144
    145	set_pte(pte, pte_mkyoung(*pte));
    146	asid = mm->context & 0xff;
    147	if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
    148		set_pte(pte, pte_wrprotect(*pte));
    149
    150	mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
    151		(((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
    152		>> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
    153
    154	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
    155		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
    156
    157	mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
    158
    159	goto end;
    160
    161bug:
    162	pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
    163end:
    164	local_irq_restore(flags);
    165}
    166
    167#elif defined(CONFIG_SUN3)
    168#include <asm/sun3mmu.h>
    169#include <linux/sched.h>
    170
    171extern unsigned long get_free_context(struct mm_struct *mm);
    172extern void clear_context(unsigned long context);
    173
    174/* set the context for a new task to unmapped */
    175#define init_new_context init_new_context
    176static inline int init_new_context(struct task_struct *tsk,
    177				   struct mm_struct *mm)
    178{
    179	mm->context = SUN3_INVALID_CONTEXT;
    180	return 0;
    181}
    182
    183/* find the context given to this process, and if it hasn't already
    184   got one, go get one for it. */
    185static inline void get_mmu_context(struct mm_struct *mm)
    186{
    187	if (mm->context == SUN3_INVALID_CONTEXT)
    188		mm->context = get_free_context(mm);
    189}
    190
    191/* flush context if allocated... */
    192#define destroy_context destroy_context
    193static inline void destroy_context(struct mm_struct *mm)
    194{
    195	if (mm->context != SUN3_INVALID_CONTEXT)
    196		clear_context(mm->context);
    197}
    198
    199static inline void activate_context(struct mm_struct *mm)
    200{
    201	get_mmu_context(mm);
    202	sun3_put_context(mm->context);
    203}
    204
    205static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
    206			     struct task_struct *tsk)
    207{
    208	activate_context(tsk->mm);
    209}
    210
    211#define activate_mm activate_mm
    212static inline void activate_mm(struct mm_struct *prev_mm,
    213			       struct mm_struct *next_mm)
    214{
    215	activate_context(next_mm);
    216}
    217
    218#else
    219
    220#include <asm/setup.h>
    221#include <asm/page.h>
    222#include <asm/cacheflush.h>
    223
    224#define init_new_context init_new_context
    225static inline int init_new_context(struct task_struct *tsk,
    226				   struct mm_struct *mm)
    227{
    228	mm->context = virt_to_phys(mm->pgd);
    229	return 0;
    230}
    231
    232static inline void switch_mm_0230(struct mm_struct *mm)
    233{
    234	unsigned long crp[2] = {
    235		0x80000000 | _PAGE_TABLE, mm->context
    236	};
    237	unsigned long tmp;
    238
    239	asm volatile (".chip 68030");
    240
    241	/* flush MC68030/MC68020 caches (they are virtually addressed) */
    242	asm volatile (
    243		"movec %%cacr,%0;"
    244		"orw %1,%0; "
    245		"movec %0,%%cacr"
    246		: "=d" (tmp) : "di" (FLUSH_I_AND_D));
    247
    248	/* Switch the root pointer. For a 030-only kernel,
    249	 * avoid flushing the whole ATC, we only need to
    250	 * flush the user entries. The 68851 does this by
    251	 * itself. Avoid a runtime check here.
    252	 */
    253	asm volatile (
    254#ifdef CPU_M68030_ONLY
    255		"pmovefd %0,%%crp; "
    256		"pflush #0,#4"
    257#else
    258		"pmove %0,%%crp"
    259#endif
    260		: : "m" (crp[0]));
    261
    262	asm volatile (".chip 68k");
    263}
    264
    265static inline void switch_mm_0460(struct mm_struct *mm)
    266{
    267	asm volatile (".chip 68040");
    268
    269	/* flush address translation cache (user entries) */
    270	asm volatile ("pflushan");
    271
    272	/* switch the root pointer */
    273	asm volatile ("movec %0,%%urp" : : "r" (mm->context));
    274
    275	if (CPU_IS_060) {
    276		unsigned long tmp;
    277
    278		/* clear user entries in the branch cache */
    279		asm volatile (
    280			"movec %%cacr,%0; "
    281		        "orl %1,%0; "
    282		        "movec %0,%%cacr"
    283			: "=d" (tmp): "di" (0x00200000));
    284	}
    285
    286	asm volatile (".chip 68k");
    287}
    288
    289static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
    290{
    291	if (prev != next) {
    292		if (CPU_IS_020_OR_030)
    293			switch_mm_0230(next);
    294		else
    295			switch_mm_0460(next);
    296	}
    297}
    298
    299#define activate_mm activate_mm
    300static inline void activate_mm(struct mm_struct *prev_mm,
    301			       struct mm_struct *next_mm)
    302{
    303	next_mm->context = virt_to_phys(next_mm->pgd);
    304
    305	if (CPU_IS_020_OR_030)
    306		switch_mm_0230(next_mm);
    307	else
    308		switch_mm_0460(next_mm);
    309}
    310
    311#endif
    312
    313#include <asm-generic/mmu_context.h>
    314
    315#else /* !CONFIG_MMU */
    316
    317#include <asm-generic/nommu_context.h>
    318
    319#endif /* CONFIG_MMU */
    320#endif /* __M68K_MMU_CONTEXT_H */