cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_context.h (3848B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  arch/arm/include/asm/mmu_context.h
      4 *
      5 *  Copyright (C) 1996 Russell King.
      6 *
      7 *  Changelog:
      8 *   27-06-1996	RMK	Created
      9 */
     10#ifndef __ASM_ARM_MMU_CONTEXT_H
     11#define __ASM_ARM_MMU_CONTEXT_H
     12
     13#include <linux/compiler.h>
     14#include <linux/sched.h>
     15#include <linux/mm_types.h>
     16#include <linux/preempt.h>
     17
     18#include <asm/cacheflush.h>
     19#include <asm/cachetype.h>
     20#include <asm/proc-fns.h>
     21#include <asm/smp_plat.h>
     22#include <asm-generic/mm_hooks.h>
     23
     24void __check_vmalloc_seq(struct mm_struct *mm);
     25
     26#ifdef CONFIG_MMU
     27static inline void check_vmalloc_seq(struct mm_struct *mm)
     28{
     29	if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
     30	    unlikely(atomic_read(&mm->context.vmalloc_seq) !=
     31		     atomic_read(&init_mm.context.vmalloc_seq)))
     32		__check_vmalloc_seq(mm);
     33}
     34#endif
     35
     36#ifdef CONFIG_CPU_HAS_ASID
     37
     38void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
     39
     40#define init_new_context init_new_context
     41static inline int
     42init_new_context(struct task_struct *tsk, struct mm_struct *mm)
     43{
     44	atomic64_set(&mm->context.id, 0);
     45	return 0;
     46}
     47
     48#ifdef CONFIG_ARM_ERRATA_798181
     49void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
     50			     cpumask_t *mask);
     51#else  /* !CONFIG_ARM_ERRATA_798181 */
     52static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
     53					   cpumask_t *mask)
     54{
     55}
     56#endif /* CONFIG_ARM_ERRATA_798181 */
     57
     58#else	/* !CONFIG_CPU_HAS_ASID */
     59
     60#ifdef CONFIG_MMU
     61
     62static inline void check_and_switch_context(struct mm_struct *mm,
     63					    struct task_struct *tsk)
     64{
     65	check_vmalloc_seq(mm);
     66
     67	if (irqs_disabled())
     68		/*
     69		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
     70		 * high interrupt latencies, defer the call and continue
     71		 * running with the old mm. Since we only support UP systems
     72		 * on non-ASID CPUs, the old mm will remain valid until the
     73		 * finish_arch_post_lock_switch() call.
     74		 */
     75		mm->context.switch_pending = 1;
     76	else
     77		cpu_switch_mm(mm->pgd, mm);
     78}
     79
     80#ifndef MODULE
     81#define finish_arch_post_lock_switch \
     82	finish_arch_post_lock_switch
     83static inline void finish_arch_post_lock_switch(void)
     84{
     85	struct mm_struct *mm = current->mm;
     86
     87	if (mm && mm->context.switch_pending) {
     88		/*
     89		 * Preemption must be disabled during cpu_switch_mm() as we
     90		 * have some stateful cache flush implementations. Check
     91		 * switch_pending again in case we were preempted and the
     92		 * switch to this mm was already done.
     93		 */
     94		preempt_disable();
     95		if (mm->context.switch_pending) {
     96			mm->context.switch_pending = 0;
     97			cpu_switch_mm(mm->pgd, mm);
     98		}
     99		preempt_enable_no_resched();
    100	}
    101}
    102#endif /* !MODULE */
    103
    104#endif	/* CONFIG_MMU */
    105
    106#endif	/* CONFIG_CPU_HAS_ASID */
    107
    108#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
    109
    110/*
    111 * This is the actual mm switch as far as the scheduler
    112 * is concerned.  No registers are touched.  We avoid
    113 * calling the CPU specific function when the mm hasn't
    114 * actually changed.
    115 */
    116static inline void
    117switch_mm(struct mm_struct *prev, struct mm_struct *next,
    118	  struct task_struct *tsk)
    119{
    120#ifdef CONFIG_MMU
    121	unsigned int cpu = smp_processor_id();
    122
    123	/*
    124	 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
    125	 * so check for possible thread migration and invalidate the I-cache
    126	 * if we're new to this CPU.
    127	 */
    128	if (cache_ops_need_broadcast() &&
    129	    !cpumask_empty(mm_cpumask(next)) &&
    130	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
    131		__flush_icache_all();
    132
    133	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
    134		check_and_switch_context(next, tsk);
    135		if (cache_is_vivt())
    136			cpumask_clear_cpu(cpu, mm_cpumask(prev));
    137	}
    138#endif
    139}
    140
    141#ifdef CONFIG_VMAP_STACK
    142static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
    143{
    144	if (mm != &init_mm)
    145		check_vmalloc_seq(mm);
    146}
    147#define enter_lazy_tlb enter_lazy_tlb
    148#endif
    149
    150#include <asm-generic/mmu_context.h>
    151
    152#endif