cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb.h (1200B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_TLB_H
      3#define _ASM_X86_TLB_H
      4
      5#define tlb_start_vma(tlb, vma) do { } while (0)
      6#define tlb_end_vma(tlb, vma) do { } while (0)
      7
      8#define tlb_flush tlb_flush
      9static inline void tlb_flush(struct mmu_gather *tlb);
     10
     11#include <asm-generic/tlb.h>
     12
     13static inline void tlb_flush(struct mmu_gather *tlb)
     14{
     15	unsigned long start = 0UL, end = TLB_FLUSH_ALL;
     16	unsigned int stride_shift = tlb_get_unmap_shift(tlb);
     17
     18	if (!tlb->fullmm && !tlb->need_flush_all) {
     19		start = tlb->start;
     20		end = tlb->end;
     21	}
     22
     23	flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
     24}
     25
     26/*
     27 * While x86 architecture in general requires an IPI to perform TLB
     28 * shootdown, enablement code for several hypervisors overrides
     29 * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
     30 * a hypercall. To keep software pagetable walkers safe in this case we
     31 * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment
     32 * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
     33 * for more details.
     34 */
     35static inline void __tlb_remove_table(void *table)
     36{
     37	free_page_and_swap_cache(table);
     38}
     39
     40#endif /* _ASM_X86_TLB_H */