cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fault-armv.c (6776B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/mm/fault-armv.c
      4 *
      5 *  Copyright (C) 1995  Linus Torvalds
      6 *  Modifications for ARM processor (c) 1995-2002 Russell King
      7 */
      8#include <linux/sched.h>
      9#include <linux/kernel.h>
     10#include <linux/mm.h>
     11#include <linux/bitops.h>
     12#include <linux/vmalloc.h>
     13#include <linux/init.h>
     14#include <linux/pagemap.h>
     15#include <linux/gfp.h>
     16
     17#include <asm/bugs.h>
     18#include <asm/cacheflush.h>
     19#include <asm/cachetype.h>
     20#include <asm/tlbflush.h>
     21
     22#include "mm.h"
     23
     24static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
     25
     26#if __LINUX_ARM_ARCH__ < 6
     27/*
     28 * We take the easy way out of this problem - we make the
     29 * PTE uncacheable.  However, we leave the write buffer on.
     30 *
     31 * Note that the pte lock held when calling update_mmu_cache must also
     32 * guard the pte (somewhere else in the same mm) that we modify here.
     33 * Therefore those configurations which might call adjust_pte (those
     34 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
     35 */
     36static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
     37	unsigned long pfn, pte_t *ptep)
     38{
     39	pte_t entry = *ptep;
     40	int ret;
     41
     42	/*
     43	 * If this page is present, it's actually being shared.
     44	 */
     45	ret = pte_present(entry);
     46
     47	/*
     48	 * If this page isn't present, or is already setup to
     49	 * fault (ie, is old), we can safely ignore any issues.
     50	 */
     51	if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
     52		flush_cache_page(vma, address, pfn);
     53		outer_flush_range((pfn << PAGE_SHIFT),
     54				  (pfn << PAGE_SHIFT) + PAGE_SIZE);
     55		pte_val(entry) &= ~L_PTE_MT_MASK;
     56		pte_val(entry) |= shared_pte_mask;
     57		set_pte_at(vma->vm_mm, address, ptep, entry);
     58		flush_tlb_page(vma, address);
     59	}
     60
     61	return ret;
     62}
     63
     64#if USE_SPLIT_PTE_PTLOCKS
     65/*
     66 * If we are using split PTE locks, then we need to take the page
     67 * lock here.  Otherwise we are using shared mm->page_table_lock
     68 * which is already locked, thus cannot take it.
     69 */
     70static inline void do_pte_lock(spinlock_t *ptl)
     71{
     72	/*
     73	 * Use nested version here to indicate that we are already
     74	 * holding one similar spinlock.
     75	 */
     76	spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
     77}
     78
     79static inline void do_pte_unlock(spinlock_t *ptl)
     80{
     81	spin_unlock(ptl);
     82}
     83#else /* !USE_SPLIT_PTE_PTLOCKS */
     84static inline void do_pte_lock(spinlock_t *ptl) {}
     85static inline void do_pte_unlock(spinlock_t *ptl) {}
     86#endif /* USE_SPLIT_PTE_PTLOCKS */
     87
     88static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
     89	unsigned long pfn)
     90{
     91	spinlock_t *ptl;
     92	pgd_t *pgd;
     93	p4d_t *p4d;
     94	pud_t *pud;
     95	pmd_t *pmd;
     96	pte_t *pte;
     97	int ret;
     98
     99	pgd = pgd_offset(vma->vm_mm, address);
    100	if (pgd_none_or_clear_bad(pgd))
    101		return 0;
    102
    103	p4d = p4d_offset(pgd, address);
    104	if (p4d_none_or_clear_bad(p4d))
    105		return 0;
    106
    107	pud = pud_offset(p4d, address);
    108	if (pud_none_or_clear_bad(pud))
    109		return 0;
    110
    111	pmd = pmd_offset(pud, address);
    112	if (pmd_none_or_clear_bad(pmd))
    113		return 0;
    114
    115	/*
    116	 * This is called while another page table is mapped, so we
    117	 * must use the nested version.  This also means we need to
    118	 * open-code the spin-locking.
    119	 */
    120	ptl = pte_lockptr(vma->vm_mm, pmd);
    121	pte = pte_offset_map(pmd, address);
    122	do_pte_lock(ptl);
    123
    124	ret = do_adjust_pte(vma, address, pfn, pte);
    125
    126	do_pte_unlock(ptl);
    127	pte_unmap(pte);
    128
    129	return ret;
    130}
    131
    132static void
    133make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
    134	unsigned long addr, pte_t *ptep, unsigned long pfn)
    135{
    136	struct mm_struct *mm = vma->vm_mm;
    137	struct vm_area_struct *mpnt;
    138	unsigned long offset;
    139	pgoff_t pgoff;
    140	int aliases = 0;
    141
    142	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
    143
    144	/*
    145	 * If we have any shared mappings that are in the same mm
    146	 * space, then we need to handle them specially to maintain
    147	 * cache coherency.
    148	 */
    149	flush_dcache_mmap_lock(mapping);
    150	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
    151		/*
    152		 * If this VMA is not in our MM, we can ignore it.
    153		 * Note that we intentionally mask out the VMA
    154		 * that we are fixing up.
    155		 */
    156		if (mpnt->vm_mm != mm || mpnt == vma)
    157			continue;
    158		if (!(mpnt->vm_flags & VM_MAYSHARE))
    159			continue;
    160		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
    161		aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
    162	}
    163	flush_dcache_mmap_unlock(mapping);
    164	if (aliases)
    165		do_adjust_pte(vma, addr, pfn, ptep);
    166}
    167
    168/*
    169 * Take care of architecture specific things when placing a new PTE into
    170 * a page table, or changing an existing PTE.  Basically, there are two
    171 * things that we need to take care of:
    172 *
    173 *  1. If PG_dcache_clean is not set for the page, we need to ensure
    174 *     that any cache entries for the kernels virtual memory
    175 *     range are written back to the page.
    176 *  2. If we have multiple shared mappings of the same space in
    177 *     an object, we need to deal with the cache aliasing issues.
    178 *
    179 * Note that the pte lock will be held.
    180 */
    181void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
    182	pte_t *ptep)
    183{
    184	unsigned long pfn = pte_pfn(*ptep);
    185	struct address_space *mapping;
    186	struct page *page;
    187
    188	if (!pfn_valid(pfn))
    189		return;
    190
    191	/*
    192	 * The zero page is never written to, so never has any dirty
    193	 * cache lines, and therefore never needs to be flushed.
    194	 */
    195	page = pfn_to_page(pfn);
    196	if (page == ZERO_PAGE(0))
    197		return;
    198
    199	mapping = page_mapping_file(page);
    200	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
    201		__flush_dcache_page(mapping, page);
    202	if (mapping) {
    203		if (cache_is_vivt())
    204			make_coherent(mapping, vma, addr, ptep, pfn);
    205		else if (vma->vm_flags & VM_EXEC)
    206			__flush_icache_all();
    207	}
    208}
    209#endif	/* __LINUX_ARM_ARCH__ < 6 */
    210
    211/*
    212 * Check whether the write buffer has physical address aliasing
    213 * issues.  If it has, we need to avoid them for the case where
    214 * we have several shared mappings of the same object in user
    215 * space.
    216 */
    217static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
    218{
    219	register unsigned long zero = 0, one = 1, val;
    220
    221	local_irq_disable();
    222	mb();
    223	*p1 = one;
    224	mb();
    225	*p2 = zero;
    226	mb();
    227	val = *p1;
    228	mb();
    229	local_irq_enable();
    230	return val != zero;
    231}
    232
    233void __init check_writebuffer_bugs(void)
    234{
    235	struct page *page;
    236	const char *reason;
    237	unsigned long v = 1;
    238
    239	pr_info("CPU: Testing write buffer coherency: ");
    240
    241	page = alloc_page(GFP_KERNEL);
    242	if (page) {
    243		unsigned long *p1, *p2;
    244		pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
    245					L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
    246
    247		p1 = vmap(&page, 1, VM_IOREMAP, prot);
    248		p2 = vmap(&page, 1, VM_IOREMAP, prot);
    249
    250		if (p1 && p2) {
    251			v = check_writebuffer(p1, p2);
    252			reason = "enabling work-around";
    253		} else {
    254			reason = "unable to map memory\n";
    255		}
    256
    257		vunmap(p1);
    258		vunmap(p2);
    259		put_page(page);
    260	} else {
    261		reason = "unable to grab page\n";
    262	}
    263
    264	if (v) {
    265		pr_cont("failed, %s\n", reason);
    266		shared_pte_mask = L_PTE_MT_UNCACHED;
    267	} else {
    268		pr_cont("ok\n");
    269	}
    270}