cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mmu_context.h (9271B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_POWERPC_MMU_CONTEXT_H
      3#define __ASM_POWERPC_MMU_CONTEXT_H
      4#ifdef __KERNEL__
      5
      6#include <linux/kernel.h>
      7#include <linux/mm.h>
      8#include <linux/sched.h>
      9#include <linux/spinlock.h>
     10#include <asm/mmu.h>	
     11#include <asm/cputable.h>
     12#include <asm/cputhreads.h>
     13
     14/*
     15 * Most if the context management is out of line
     16 */
     17#define init_new_context init_new_context
     18extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
     19#define destroy_context destroy_context
     20extern void destroy_context(struct mm_struct *mm);
     21#ifdef CONFIG_SPAPR_TCE_IOMMU
     22struct mm_iommu_table_group_mem_t;
     23
     24extern bool mm_iommu_preregistered(struct mm_struct *mm);
     25extern long mm_iommu_new(struct mm_struct *mm,
     26		unsigned long ua, unsigned long entries,
     27		struct mm_iommu_table_group_mem_t **pmem);
     28extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
     29		unsigned long entries, unsigned long dev_hpa,
     30		struct mm_iommu_table_group_mem_t **pmem);
     31extern long mm_iommu_put(struct mm_struct *mm,
     32		struct mm_iommu_table_group_mem_t *mem);
     33extern void mm_iommu_init(struct mm_struct *mm);
     34extern void mm_iommu_cleanup(struct mm_struct *mm);
     35extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
     36		unsigned long ua, unsigned long size);
     37extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
     38		unsigned long ua, unsigned long entries);
     39extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
     40		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
     41extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
     42		unsigned int pageshift, unsigned long *size);
     43extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
     44extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
     45#else
     46static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
     47		unsigned int pageshift, unsigned long *size)
     48{
     49	return false;
     50}
     51static inline void mm_iommu_init(struct mm_struct *mm) { }
     52#endif
     53extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
     54
     55#ifdef CONFIG_PPC_BOOK3S_64
     56extern void radix__switch_mmu_context(struct mm_struct *prev,
     57				      struct mm_struct *next);
     58static inline void switch_mmu_context(struct mm_struct *prev,
     59				      struct mm_struct *next,
     60				      struct task_struct *tsk)
     61{
     62	if (radix_enabled())
     63		return radix__switch_mmu_context(prev, next);
     64	return switch_slb(tsk, next);
     65}
     66
     67extern int hash__alloc_context_id(void);
     68void __init hash__reserve_context_id(int id);
     69extern void __destroy_context(int context_id);
     70static inline void mmu_context_init(void) { }
     71
     72#ifdef CONFIG_PPC_64S_HASH_MMU
     73static inline int alloc_extended_context(struct mm_struct *mm,
     74					 unsigned long ea)
     75{
     76	int context_id;
     77
     78	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
     79
     80	context_id = hash__alloc_context_id();
     81	if (context_id < 0)
     82		return context_id;
     83
     84	VM_WARN_ON(mm->context.extended_id[index]);
     85	mm->context.extended_id[index] = context_id;
     86	return context_id;
     87}
     88
     89static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
     90{
     91	int context_id;
     92
     93	context_id = get_user_context(&mm->context, ea);
     94	if (!context_id)
     95		return true;
     96	return false;
     97}
     98#endif
     99
    100#else
    101extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
    102			       struct task_struct *tsk);
    103extern unsigned long __init_new_context(void);
    104extern void __destroy_context(unsigned long context_id);
    105extern void mmu_context_init(void);
    106static inline int alloc_extended_context(struct mm_struct *mm,
    107					 unsigned long ea)
    108{
    109	/* non book3s_64 should never find this called */
    110	WARN_ON(1);
    111	return -ENOMEM;
    112}
    113
    114static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
    115{
    116	return false;
    117}
    118#endif
    119
    120extern void switch_cop(struct mm_struct *next);
    121extern int use_cop(unsigned long acop, struct mm_struct *mm);
    122extern void drop_cop(unsigned long acop, struct mm_struct *mm);
    123
    124#ifdef CONFIG_PPC_BOOK3S_64
    125static inline void inc_mm_active_cpus(struct mm_struct *mm)
    126{
    127	atomic_inc(&mm->context.active_cpus);
    128}
    129
    130static inline void dec_mm_active_cpus(struct mm_struct *mm)
    131{
    132	atomic_dec(&mm->context.active_cpus);
    133}
    134
    135static inline void mm_context_add_copro(struct mm_struct *mm)
    136{
    137	/*
    138	 * If any copro is in use, increment the active CPU count
    139	 * in order to force TLB invalidations to be global as to
    140	 * propagate to the Nest MMU.
    141	 */
    142	if (atomic_inc_return(&mm->context.copros) == 1)
    143		inc_mm_active_cpus(mm);
    144}
    145
    146static inline void mm_context_remove_copro(struct mm_struct *mm)
    147{
    148	int c;
    149
    150	/*
    151	 * When removing the last copro, we need to broadcast a global
    152	 * flush of the full mm, as the next TLBI may be local and the
    153	 * nMMU and/or PSL need to be cleaned up.
    154	 *
    155	 * Both the 'copros' and 'active_cpus' counts are looked at in
    156	 * flush_all_mm() to determine the scope (local/global) of the
    157	 * TLBIs, so we need to flush first before decrementing
    158	 * 'copros'. If this API is used by several callers for the
    159	 * same context, it can lead to over-flushing. It's hopefully
    160	 * not common enough to be a problem.
    161	 *
    162	 * Skip on hash, as we don't know how to do the proper flush
    163	 * for the time being. Invalidations will remain global if
    164	 * used on hash. Note that we can't drop 'copros' either, as
    165	 * it could make some invalidations local with no flush
    166	 * in-between.
    167	 */
    168	if (radix_enabled()) {
    169		flush_all_mm(mm);
    170
    171		c = atomic_dec_if_positive(&mm->context.copros);
    172		/* Detect imbalance between add and remove */
    173		WARN_ON(c < 0);
    174
    175		if (c == 0)
    176			dec_mm_active_cpus(mm);
    177	}
    178}
    179
    180/*
    181 * vas_windows counter shows number of open windows in the mm
    182 * context. During context switch, use this counter to clear the
    183 * foreign real address mapping (CP_ABORT) for the thread / process
    184 * that intend to use COPY/PASTE. When a process closes all windows,
    185 * disable CP_ABORT which is expensive to run.
    186 *
    187 * For user context, register a copro so that TLBIs are seen by the
    188 * nest MMU. mm_context_add/remove_vas_window() are used only for user
    189 * space windows.
    190 */
    191static inline void mm_context_add_vas_window(struct mm_struct *mm)
    192{
    193	atomic_inc(&mm->context.vas_windows);
    194	mm_context_add_copro(mm);
    195}
    196
    197static inline void mm_context_remove_vas_window(struct mm_struct *mm)
    198{
    199	int v;
    200
    201	mm_context_remove_copro(mm);
    202	v = atomic_dec_if_positive(&mm->context.vas_windows);
    203
    204	/* Detect imbalance between add and remove */
    205	WARN_ON(v < 0);
    206}
    207#else
    208static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
    209static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
    210static inline void mm_context_add_copro(struct mm_struct *mm) { }
    211static inline void mm_context_remove_copro(struct mm_struct *mm) { }
    212#endif
    213
    214#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
    215void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
    216			     unsigned long type, unsigned long pg_sizes,
    217			     unsigned long start, unsigned long end);
    218#else
    219static inline void do_h_rpt_invalidate_prt(unsigned long pid,
    220					   unsigned long lpid,
    221					   unsigned long type,
    222					   unsigned long pg_sizes,
    223					   unsigned long start,
    224					   unsigned long end) { }
    225#endif
    226
    227extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
    228			       struct task_struct *tsk);
    229
    230static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
    231			     struct task_struct *tsk)
    232{
    233	unsigned long flags;
    234
    235	local_irq_save(flags);
    236	switch_mm_irqs_off(prev, next, tsk);
    237	local_irq_restore(flags);
    238}
    239#define switch_mm_irqs_off switch_mm_irqs_off
    240
    241/*
    242 * After we have set current->mm to a new value, this activates
    243 * the context for the new mm so we see the new mappings.
    244 */
    245#define activate_mm activate_mm
    246static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
    247{
    248	switch_mm_irqs_off(prev, next, current);
    249}
    250
    251/* We don't currently use enter_lazy_tlb() for anything */
    252#ifdef CONFIG_PPC_BOOK3E_64
    253#define enter_lazy_tlb enter_lazy_tlb
    254static inline void enter_lazy_tlb(struct mm_struct *mm,
    255				  struct task_struct *tsk)
    256{
    257	/* 64-bit Book3E keeps track of current PGD in the PACA */
    258	get_paca()->pgd = NULL;
    259}
    260#endif
    261
    262extern void arch_exit_mmap(struct mm_struct *mm);
    263
    264static inline void arch_unmap(struct mm_struct *mm,
    265			      unsigned long start, unsigned long end)
    266{
    267	unsigned long vdso_base = (unsigned long)mm->context.vdso;
    268
    269	if (start <= vdso_base && vdso_base < end)
    270		mm->context.vdso = NULL;
    271}
    272
    273#ifdef CONFIG_PPC_MEM_KEYS
    274bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
    275			       bool execute, bool foreign);
    276void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
    277#else /* CONFIG_PPC_MEM_KEYS */
    278static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
    279		bool write, bool execute, bool foreign)
    280{
    281	/* by default, allow everything */
    282	return true;
    283}
    284
    285#define pkey_mm_init(mm)
    286#define arch_dup_pkeys(oldmm, mm)
    287
    288static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
    289{
    290	return 0x0UL;
    291}
    292
    293#endif /* CONFIG_PPC_MEM_KEYS */
    294
    295static inline int arch_dup_mmap(struct mm_struct *oldmm,
    296				struct mm_struct *mm)
    297{
    298	arch_dup_pkeys(oldmm, mm);
    299	return 0;
    300}
    301
    302#include <asm-generic/mmu_context.h>
    303
    304#endif /* __KERNEL__ */
    305#endif /* __ASM_POWERPC_MMU_CONTEXT_H */