cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb.h (4296B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _S390_TLB_H
      3#define _S390_TLB_H
      4
      5/*
      6 * TLB flushing on s390 is complicated. The following requirement
      7 * from the principles of operation is the most arduous:
      8 *
      9 * "A valid table entry must not be changed while it is attached
     10 * to any CPU and may be used for translation by that CPU except to
     11 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
     12 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
     13 * table entry, or (3) make a change by means of a COMPARE AND SWAP
     14 * AND PURGE instruction that purges the TLB."
     15 *
     16 * The modification of a pte of an active mm struct therefore is
     17 * a two step process: i) invalidate the pte, ii) store the new pte.
     18 * This is true for the page protection bit as well.
     19 * The only possible optimization is to flush at the beginning of
     20 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
     21 *
     22 * Pages used for the page tables is a different story. FIXME: more
     23 */
     24
     25void __tlb_remove_table(void *_table);
     26static inline void tlb_flush(struct mmu_gather *tlb);
     27static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
     28					  struct page *page, int page_size);
     29
     30#define tlb_start_vma(tlb, vma)			do { } while (0)
     31#define tlb_end_vma(tlb, vma)			do { } while (0)
     32
     33#define tlb_flush tlb_flush
     34#define pte_free_tlb pte_free_tlb
     35#define pmd_free_tlb pmd_free_tlb
     36#define p4d_free_tlb p4d_free_tlb
     37#define pud_free_tlb pud_free_tlb
     38
     39#include <asm/tlbflush.h>
     40#include <asm-generic/tlb.h>
     41
     42/*
     43 * Release the page cache reference for a pte removed by
     44 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
     45 * has already been freed, so just do free_page_and_swap_cache.
     46 */
     47static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
     48					  struct page *page, int page_size)
     49{
     50	free_page_and_swap_cache(page);
     51	return false;
     52}
     53
     54static inline void tlb_flush(struct mmu_gather *tlb)
     55{
     56	__tlb_flush_mm_lazy(tlb->mm);
     57}
     58
     59/*
     60 * pte_free_tlb frees a pte table and clears the CRSTE for the
     61 * page table from the tlb.
     62 */
     63static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
     64                                unsigned long address)
     65{
     66	__tlb_adjust_range(tlb, address, PAGE_SIZE);
     67	tlb->mm->context.flush_mm = 1;
     68	tlb->freed_tables = 1;
     69	tlb->cleared_pmds = 1;
     70	/*
     71	 * page_table_free_rcu takes care of the allocation bit masks
     72	 * of the 2K table fragments in the 4K page table page,
     73	 * then calls tlb_remove_table.
     74	 */
     75	page_table_free_rcu(tlb, (unsigned long *) pte, address);
     76}
     77
     78/*
     79 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
     80 * segment table entry from the tlb.
     81 * If the mm uses a two level page table the single pmd is freed
     82 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
     83 * to avoid the double free of the pmd in this case.
     84 */
     85static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
     86				unsigned long address)
     87{
     88	if (mm_pmd_folded(tlb->mm))
     89		return;
     90	pgtable_pmd_page_dtor(virt_to_page(pmd));
     91	__tlb_adjust_range(tlb, address, PAGE_SIZE);
     92	tlb->mm->context.flush_mm = 1;
     93	tlb->freed_tables = 1;
     94	tlb->cleared_puds = 1;
     95	tlb_remove_table(tlb, pmd);
     96}
     97
     98/*
     99 * p4d_free_tlb frees a pud table and clears the CRSTE for the
    100 * region second table entry from the tlb.
    101 * If the mm uses a four level page table the single p4d is freed
    102 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
    103 * to avoid the double free of the p4d in this case.
    104 */
    105static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
    106				unsigned long address)
    107{
    108	if (mm_p4d_folded(tlb->mm))
    109		return;
    110	__tlb_adjust_range(tlb, address, PAGE_SIZE);
    111	tlb->mm->context.flush_mm = 1;
    112	tlb->freed_tables = 1;
    113	tlb_remove_table(tlb, p4d);
    114}
    115
    116/*
    117 * pud_free_tlb frees a pud table and clears the CRSTE for the
    118 * region third table entry from the tlb.
    119 * If the mm uses a three level page table the single pud is freed
    120 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
    121 * to avoid the double free of the pud in this case.
    122 */
    123static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
    124				unsigned long address)
    125{
    126	if (mm_pud_folded(tlb->mm))
    127		return;
    128	tlb->mm->context.flush_mm = 1;
    129	tlb->freed_tables = 1;
    130	tlb->cleared_p4ds = 1;
    131	tlb_remove_table(tlb, pud);
    132}
    133
    134
    135#endif /* _S390_TLB_H */