cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlbflush.c (2052B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <linux/mm.h>
      4#include <linux/smp.h>
      5#include <linux/sched.h>
      6#include <asm/sbi.h>
      7#include <asm/mmu_context.h>
      8
      9static inline void local_flush_tlb_all_asid(unsigned long asid)
     10{
     11	__asm__ __volatile__ ("sfence.vma x0, %0"
     12			:
     13			: "r" (asid)
     14			: "memory");
     15}
     16
     17static inline void local_flush_tlb_page_asid(unsigned long addr,
     18		unsigned long asid)
     19{
     20	__asm__ __volatile__ ("sfence.vma %0, %1"
     21			:
     22			: "r" (addr), "r" (asid)
     23			: "memory");
     24}
     25
     26void flush_tlb_all(void)
     27{
     28	sbi_remote_sfence_vma(NULL, 0, -1);
     29}
     30
     31static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
     32				  unsigned long size, unsigned long stride)
     33{
     34	struct cpumask *cmask = mm_cpumask(mm);
     35	unsigned int cpuid;
     36	bool broadcast;
     37
     38	if (cpumask_empty(cmask))
     39		return;
     40
     41	cpuid = get_cpu();
     42	/* check if the tlbflush needs to be sent to other CPUs */
     43	broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
     44	if (static_branch_unlikely(&use_asid_allocator)) {
     45		unsigned long asid = atomic_long_read(&mm->context.id);
     46
     47		if (broadcast) {
     48			sbi_remote_sfence_vma_asid(cmask, start, size, asid);
     49		} else if (size <= stride) {
     50			local_flush_tlb_page_asid(start, asid);
     51		} else {
     52			local_flush_tlb_all_asid(asid);
     53		}
     54	} else {
     55		if (broadcast) {
     56			sbi_remote_sfence_vma(cmask, start, size);
     57		} else if (size <= stride) {
     58			local_flush_tlb_page(start);
     59		} else {
     60			local_flush_tlb_all();
     61		}
     62	}
     63
     64	put_cpu();
     65}
     66
     67void flush_tlb_mm(struct mm_struct *mm)
     68{
     69	__sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
     70}
     71
     72void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
     73{
     74	__sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
     75}
     76
     77void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
     78		     unsigned long end)
     79{
     80	__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
     81}
     82#ifdef CONFIG_TRANSPARENT_HUGEPAGE
     83void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
     84			unsigned long end)
     85{
     86	__sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
     87}
     88#endif