cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_context_64.h (5481B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __SPARC64_MMU_CONTEXT_H
      3#define __SPARC64_MMU_CONTEXT_H
      4
      5/* Derived heavily from Linus's Alpha/AXP ASN code... */
      6
      7#ifndef __ASSEMBLY__
      8
      9#include <linux/spinlock.h>
     10#include <linux/mm_types.h>
     11#include <linux/smp.h>
     12#include <linux/sched.h>
     13
     14#include <asm/spitfire.h>
     15#include <asm/adi_64.h>
     16#include <asm-generic/mm_hooks.h>
     17#include <asm/percpu.h>
     18
     19extern spinlock_t ctx_alloc_lock;
     20extern unsigned long tlb_context_cache;
     21extern unsigned long mmu_context_bmap[];
     22
     23DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
     24void get_new_mmu_context(struct mm_struct *mm);
     25
     26#define init_new_context init_new_context
     27int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
     28#define destroy_context destroy_context
     29void destroy_context(struct mm_struct *mm);
     30
     31void __tsb_context_switch(unsigned long pgd_pa,
     32			  struct tsb_config *tsb_base,
     33			  struct tsb_config *tsb_huge,
     34			  unsigned long tsb_descr_pa,
     35			  unsigned long secondary_ctx);
     36
     37static inline void tsb_context_switch_ctx(struct mm_struct *mm,
     38					  unsigned long ctx)
     39{
     40	__tsb_context_switch(__pa(mm->pgd),
     41			     &mm->context.tsb_block[MM_TSB_BASE],
     42#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
     43			     (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
     44			      &mm->context.tsb_block[MM_TSB_HUGE] :
     45			      NULL)
     46#else
     47			     NULL
     48#endif
     49			     , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
     50			     ctx);
     51}
     52
     53#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
     54
     55void tsb_grow(struct mm_struct *mm,
     56	      unsigned long tsb_index,
     57	      unsigned long mm_rss);
     58#ifdef CONFIG_SMP
     59void smp_tsb_sync(struct mm_struct *mm);
     60#else
     61#define smp_tsb_sync(__mm) do { } while (0)
     62#endif
     63
     64/* Set MMU context in the actual hardware. */
     65#define load_secondary_context(__mm) \
     66	__asm__ __volatile__( \
     67	"\n661:	stxa		%0, [%1] %2\n" \
     68	"	.section	.sun4v_1insn_patch, \"ax\"\n" \
     69	"	.word		661b\n" \
     70	"	stxa		%0, [%1] %3\n" \
     71	"	.previous\n" \
     72	"	flush		%%g6\n" \
     73	: /* No outputs */ \
     74	: "r" (CTX_HWBITS((__mm)->context)), \
     75	  "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
     76
     77void __flush_tlb_mm(unsigned long, unsigned long);
     78
     79/* Switch the current MM context. */
     80static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
     81{
     82	unsigned long ctx_valid, flags;
     83	int cpu = smp_processor_id();
     84
     85	per_cpu(per_cpu_secondary_mm, cpu) = mm;
     86	if (unlikely(mm == &init_mm))
     87		return;
     88
     89	spin_lock_irqsave(&mm->context.lock, flags);
     90	ctx_valid = CTX_VALID(mm->context);
     91	if (!ctx_valid)
     92		get_new_mmu_context(mm);
     93
     94	/* We have to be extremely careful here or else we will miss
     95	 * a TSB grow if we switch back and forth between a kernel
     96	 * thread and an address space which has it's TSB size increased
     97	 * on another processor.
     98	 *
     99	 * It is possible to play some games in order to optimize the
    100	 * switch, but the safest thing to do is to unconditionally
    101	 * perform the secondary context load and the TSB context switch.
    102	 *
    103	 * For reference the bad case is, for address space "A":
    104	 *
    105	 *		CPU 0			CPU 1
    106	 *	run address space A
    107	 *	set cpu0's bits in cpu_vm_mask
    108	 *	switch to kernel thread, borrow
    109	 *	address space A via entry_lazy_tlb
    110	 *					run address space A
    111	 *					set cpu1's bit in cpu_vm_mask
    112	 *					flush_tlb_pending()
    113	 *					reset cpu_vm_mask to just cpu1
    114	 *					TSB grow
    115	 *	run address space A
    116	 *	context was valid, so skip
    117	 *	TSB context switch
    118	 *
    119	 * At that point cpu0 continues to use a stale TSB, the one from
    120	 * before the TSB grow performed on cpu1.  cpu1 did not cross-call
    121	 * cpu0 to update it's TSB because at that point the cpu_vm_mask
    122	 * only had cpu1 set in it.
    123	 */
    124	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
    125
    126	/* Any time a processor runs a context on an address space
    127	 * for the first time, we must flush that context out of the
    128	 * local TLB.
    129	 */
    130	if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
    131		cpumask_set_cpu(cpu, mm_cpumask(mm));
    132		__flush_tlb_mm(CTX_HWBITS(mm->context),
    133			       SECONDARY_CONTEXT);
    134	}
    135	spin_unlock_irqrestore(&mm->context.lock, flags);
    136}
    137
    138#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
    139
    140#define  __HAVE_ARCH_START_CONTEXT_SWITCH
    141static inline void arch_start_context_switch(struct task_struct *prev)
    142{
    143	/* Save the current state of MCDPER register for the process
    144	 * we are switching from
    145	 */
    146	if (adi_capable()) {
    147		register unsigned long tmp_mcdper;
    148
    149		__asm__ __volatile__(
    150			".word 0x83438000\n\t"	/* rd  %mcdper, %g1 */
    151			"mov %%g1, %0\n\t"
    152			: "=r" (tmp_mcdper)
    153			:
    154			: "g1");
    155		if (tmp_mcdper)
    156			set_tsk_thread_flag(prev, TIF_MCDPER);
    157		else
    158			clear_tsk_thread_flag(prev, TIF_MCDPER);
    159	}
    160}
    161
    162#define finish_arch_post_lock_switch	finish_arch_post_lock_switch
    163static inline void finish_arch_post_lock_switch(void)
    164{
    165	/* Restore the state of MCDPER register for the new process
    166	 * just switched to.
    167	 */
    168	if (adi_capable()) {
    169		register unsigned long tmp_mcdper;
    170
    171		tmp_mcdper = test_thread_flag(TIF_MCDPER);
    172		__asm__ __volatile__(
    173			"mov %0, %%g1\n\t"
    174			".word 0x9d800001\n\t"	/* wr %g0, %g1, %mcdper" */
    175			".word 0xaf902001\n\t"	/* wrpr %g0, 1, %pmcdper */
    176			:
    177			: "ir" (tmp_mcdper)
    178			: "g1");
    179		if (current && current->mm && current->mm->context.adi) {
    180			struct pt_regs *regs;
    181
    182			regs = task_pt_regs(current);
    183			regs->tstate |= TSTATE_MCDE;
    184		}
    185	}
    186}
    187
    188#include <asm-generic/mmu_context.h>
    189
    190#endif /* !(__ASSEMBLY__) */
    191
    192#endif /* !(__SPARC64_MMU_CONTEXT_H) */