cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlbflush_64.h (1776B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _SPARC64_TLBFLUSH_H
      3#define _SPARC64_TLBFLUSH_H
      4
      5#include <asm/mmu_context.h>
      6
      7/* TSB flush operations. */
      8
      9#define TLB_BATCH_NR	192
     10
     11struct tlb_batch {
     12	unsigned int hugepage_shift;
     13	struct mm_struct *mm;
     14	unsigned long tlb_nr;
     15	unsigned long active;
     16	unsigned long vaddrs[TLB_BATCH_NR];
     17};
     18
     19void flush_tsb_kernel_range(unsigned long start, unsigned long end);
     20void flush_tsb_user(struct tlb_batch *tb);
     21void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
     22			 unsigned int hugepage_shift);
     23
     24/* TLB flush operations. */
     25
     26static inline void flush_tlb_mm(struct mm_struct *mm)
     27{
     28}
     29
     30static inline void flush_tlb_page(struct vm_area_struct *vma,
     31				  unsigned long vmaddr)
     32{
     33}
     34
     35static inline void flush_tlb_range(struct vm_area_struct *vma,
     36				   unsigned long start, unsigned long end)
     37{
     38}
     39
     40void flush_tlb_kernel_range(unsigned long start, unsigned long end);
     41
     42#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
     43
     44void flush_tlb_pending(void);
     45void arch_enter_lazy_mmu_mode(void);
     46void arch_leave_lazy_mmu_mode(void);
     47#define arch_flush_lazy_mmu_mode()      do {} while (0)
     48
     49/* Local cpu only.  */
     50void __flush_tlb_all(void);
     51void __flush_tlb_page(unsigned long context, unsigned long vaddr);
     52void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
     53
     54#ifndef CONFIG_SMP
     55
     56static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
     57{
     58	__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
     59}
     60
     61#else /* CONFIG_SMP */
     62
     63void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
     64void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
     65
     66#define global_flush_tlb_page(mm, vaddr) \
     67	smp_flush_tlb_page(mm, vaddr)
     68
     69#endif /* ! CONFIG_SMP */
     70
     71#endif /* _SPARC64_TLBFLUSH_H */