cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

flush.c (9772B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/mm/flush.c
      4 *
      5 *  Copyright (C) 1995-2002 Russell King
      6 */
      7#include <linux/module.h>
      8#include <linux/mm.h>
      9#include <linux/pagemap.h>
     10#include <linux/highmem.h>
     11
     12#include <asm/cacheflush.h>
     13#include <asm/cachetype.h>
     14#include <asm/highmem.h>
     15#include <asm/smp_plat.h>
     16#include <asm/tlbflush.h>
     17#include <linux/hugetlb.h>
     18
     19#include "mm.h"
     20
     21#ifdef CONFIG_ARM_HEAVY_MB
     22void (*soc_mb)(void);
     23
     24void arm_heavy_mb(void)
     25{
     26#ifdef CONFIG_OUTER_CACHE_SYNC
     27	if (outer_cache.sync)
     28		outer_cache.sync();
     29#endif
     30	if (soc_mb)
     31		soc_mb();
     32}
     33EXPORT_SYMBOL(arm_heavy_mb);
     34#endif
     35
     36#ifdef CONFIG_CPU_CACHE_VIPT
     37
     38static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
     39{
     40	unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
     41	const int zero = 0;
     42
     43	set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
     44
     45	asm(	"mcrr	p15, 0, %1, %0, c14\n"
     46	"	mcr	p15, 0, %2, c7, c10, 4"
     47	    :
     48	    : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
     49	    : "cc");
     50}
     51
     52static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
     53{
     54	unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
     55	unsigned long offset = vaddr & (PAGE_SIZE - 1);
     56	unsigned long to;
     57
     58	set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
     59	to = va + offset;
     60	flush_icache_range(to, to + len);
     61}
     62
     63void flush_cache_mm(struct mm_struct *mm)
     64{
     65	if (cache_is_vivt()) {
     66		vivt_flush_cache_mm(mm);
     67		return;
     68	}
     69
     70	if (cache_is_vipt_aliasing()) {
     71		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
     72		"	mcr	p15, 0, %0, c7, c10, 4"
     73		    :
     74		    : "r" (0)
     75		    : "cc");
     76	}
     77}
     78
     79void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
     80{
     81	if (cache_is_vivt()) {
     82		vivt_flush_cache_range(vma, start, end);
     83		return;
     84	}
     85
     86	if (cache_is_vipt_aliasing()) {
     87		asm(	"mcr	p15, 0, %0, c7, c14, 0\n"
     88		"	mcr	p15, 0, %0, c7, c10, 4"
     89		    :
     90		    : "r" (0)
     91		    : "cc");
     92	}
     93
     94	if (vma->vm_flags & VM_EXEC)
     95		__flush_icache_all();
     96}
     97
     98void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
     99{
    100	if (cache_is_vivt()) {
    101		vivt_flush_cache_page(vma, user_addr, pfn);
    102		return;
    103	}
    104
    105	if (cache_is_vipt_aliasing()) {
    106		flush_pfn_alias(pfn, user_addr);
    107		__flush_icache_all();
    108	}
    109
    110	if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
    111		__flush_icache_all();
    112}
    113
    114#else
    115#define flush_pfn_alias(pfn,vaddr)		do { } while (0)
    116#define flush_icache_alias(pfn,vaddr,len)	do { } while (0)
    117#endif
    118
    119#define FLAG_PA_IS_EXEC 1
    120#define FLAG_PA_CORE_IN_MM 2
    121
    122static void flush_ptrace_access_other(void *args)
    123{
    124	__flush_icache_all();
    125}
    126
    127static inline
    128void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
    129			   unsigned long len, unsigned int flags)
    130{
    131	if (cache_is_vivt()) {
    132		if (flags & FLAG_PA_CORE_IN_MM) {
    133			unsigned long addr = (unsigned long)kaddr;
    134			__cpuc_coherent_kern_range(addr, addr + len);
    135		}
    136		return;
    137	}
    138
    139	if (cache_is_vipt_aliasing()) {
    140		flush_pfn_alias(page_to_pfn(page), uaddr);
    141		__flush_icache_all();
    142		return;
    143	}
    144
    145	/* VIPT non-aliasing D-cache */
    146	if (flags & FLAG_PA_IS_EXEC) {
    147		unsigned long addr = (unsigned long)kaddr;
    148		if (icache_is_vipt_aliasing())
    149			flush_icache_alias(page_to_pfn(page), uaddr, len);
    150		else
    151			__cpuc_coherent_kern_range(addr, addr + len);
    152		if (cache_ops_need_broadcast())
    153			smp_call_function(flush_ptrace_access_other,
    154					  NULL, 1);
    155	}
    156}
    157
    158static
    159void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
    160			 unsigned long uaddr, void *kaddr, unsigned long len)
    161{
    162	unsigned int flags = 0;
    163	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
    164		flags |= FLAG_PA_CORE_IN_MM;
    165	if (vma->vm_flags & VM_EXEC)
    166		flags |= FLAG_PA_IS_EXEC;
    167	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
    168}
    169
    170void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
    171			     void *kaddr, unsigned long len)
    172{
    173	unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
    174
    175	__flush_ptrace_access(page, uaddr, kaddr, len, flags);
    176}
    177
    178/*
    179 * Copy user data from/to a page which is mapped into a different
    180 * processes address space.  Really, we want to allow our "user
    181 * space" model to handle this.
    182 *
    183 * Note that this code needs to run on the current CPU.
    184 */
    185void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
    186		       unsigned long uaddr, void *dst, const void *src,
    187		       unsigned long len)
    188{
    189#ifdef CONFIG_SMP
    190	preempt_disable();
    191#endif
    192	memcpy(dst, src, len);
    193	flush_ptrace_access(vma, page, uaddr, dst, len);
    194#ifdef CONFIG_SMP
    195	preempt_enable();
    196#endif
    197}
    198
    199void __flush_dcache_page(struct address_space *mapping, struct page *page)
    200{
    201	/*
    202	 * Writeback any data associated with the kernel mapping of this
    203	 * page.  This ensures that data in the physical page is mutually
    204	 * coherent with the kernels mapping.
    205	 */
    206	if (!PageHighMem(page)) {
    207		__cpuc_flush_dcache_area(page_address(page), page_size(page));
    208	} else {
    209		unsigned long i;
    210		if (cache_is_vipt_nonaliasing()) {
    211			for (i = 0; i < compound_nr(page); i++) {
    212				void *addr = kmap_atomic(page + i);
    213				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
    214				kunmap_atomic(addr);
    215			}
    216		} else {
    217			for (i = 0; i < compound_nr(page); i++) {
    218				void *addr = kmap_high_get(page + i);
    219				if (addr) {
    220					__cpuc_flush_dcache_area(addr, PAGE_SIZE);
    221					kunmap_high(page + i);
    222				}
    223			}
    224		}
    225	}
    226
    227	/*
    228	 * If this is a page cache page, and we have an aliasing VIPT cache,
    229	 * we only need to do one flush - which would be at the relevant
    230	 * userspace colour, which is congruent with page->index.
    231	 */
    232	if (mapping && cache_is_vipt_aliasing())
    233		flush_pfn_alias(page_to_pfn(page),
    234				page->index << PAGE_SHIFT);
    235}
    236
    237static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
    238{
    239	struct mm_struct *mm = current->active_mm;
    240	struct vm_area_struct *mpnt;
    241	pgoff_t pgoff;
    242
    243	/*
    244	 * There are possible user space mappings of this page:
    245	 * - VIVT cache: we need to also write back and invalidate all user
    246	 *   data in the current VM view associated with this page.
    247	 * - aliasing VIPT: we only need to find one mapping of this page.
    248	 */
    249	pgoff = page->index;
    250
    251	flush_dcache_mmap_lock(mapping);
    252	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
    253		unsigned long offset;
    254
    255		/*
    256		 * If this VMA is not in our MM, we can ignore it.
    257		 */
    258		if (mpnt->vm_mm != mm)
    259			continue;
    260		if (!(mpnt->vm_flags & VM_MAYSHARE))
    261			continue;
    262		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
    263		flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
    264	}
    265	flush_dcache_mmap_unlock(mapping);
    266}
    267
    268#if __LINUX_ARM_ARCH__ >= 6
    269void __sync_icache_dcache(pte_t pteval)
    270{
    271	unsigned long pfn;
    272	struct page *page;
    273	struct address_space *mapping;
    274
    275	if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
    276		/* only flush non-aliasing VIPT caches for exec mappings */
    277		return;
    278	pfn = pte_pfn(pteval);
    279	if (!pfn_valid(pfn))
    280		return;
    281
    282	page = pfn_to_page(pfn);
    283	if (cache_is_vipt_aliasing())
    284		mapping = page_mapping_file(page);
    285	else
    286		mapping = NULL;
    287
    288	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
    289		__flush_dcache_page(mapping, page);
    290
    291	if (pte_exec(pteval))
    292		__flush_icache_all();
    293}
    294#endif
    295
    296/*
    297 * Ensure cache coherency between kernel mapping and userspace mapping
    298 * of this page.
    299 *
    300 * We have three cases to consider:
    301 *  - VIPT non-aliasing cache: fully coherent so nothing required.
    302 *  - VIVT: fully aliasing, so we need to handle every alias in our
    303 *          current VM view.
    304 *  - VIPT aliasing: need to handle one alias in our current VM view.
    305 *
    306 * If we need to handle aliasing:
    307 *  If the page only exists in the page cache and there are no user
    308 *  space mappings, we can be lazy and remember that we may have dirty
    309 *  kernel cache lines for later.  Otherwise, we assume we have
    310 *  aliasing mappings.
    311 *
    312 * Note that we disable the lazy flush for SMP configurations where
    313 * the cache maintenance operations are not automatically broadcasted.
    314 */
    315void flush_dcache_page(struct page *page)
    316{
    317	struct address_space *mapping;
    318
    319	/*
    320	 * The zero page is never written to, so never has any dirty
    321	 * cache lines, and therefore never needs to be flushed.
    322	 */
    323	if (page == ZERO_PAGE(0))
    324		return;
    325
    326	if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
    327		if (test_bit(PG_dcache_clean, &page->flags))
    328			clear_bit(PG_dcache_clean, &page->flags);
    329		return;
    330	}
    331
    332	mapping = page_mapping_file(page);
    333
    334	if (!cache_ops_need_broadcast() &&
    335	    mapping && !page_mapcount(page))
    336		clear_bit(PG_dcache_clean, &page->flags);
    337	else {
    338		__flush_dcache_page(mapping, page);
    339		if (mapping && cache_is_vivt())
    340			__flush_dcache_aliases(mapping, page);
    341		else if (mapping)
    342			__flush_icache_all();
    343		set_bit(PG_dcache_clean, &page->flags);
    344	}
    345}
    346EXPORT_SYMBOL(flush_dcache_page);
    347
    348/*
    349 * Flush an anonymous page so that users of get_user_pages()
    350 * can safely access the data.  The expected sequence is:
    351 *
    352 *  get_user_pages()
    353 *    -> flush_anon_page
    354 *  memcpy() to/from page
    355 *  if written to page, flush_dcache_page()
    356 */
    357void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
    358{
    359	unsigned long pfn;
    360
    361	/* VIPT non-aliasing caches need do nothing */
    362	if (cache_is_vipt_nonaliasing())
    363		return;
    364
    365	/*
    366	 * Write back and invalidate userspace mapping.
    367	 */
    368	pfn = page_to_pfn(page);
    369	if (cache_is_vivt()) {
    370		flush_cache_page(vma, vmaddr, pfn);
    371	} else {
    372		/*
    373		 * For aliasing VIPT, we can flush an alias of the
    374		 * userspace address only.
    375		 */
    376		flush_pfn_alias(pfn, vmaddr);
    377		__flush_icache_all();
    378	}
    379
    380	/*
    381	 * Invalidate kernel mapping.  No data should be contained
    382	 * in this mapping of the page.  FIXME: this is overkill
    383	 * since we actually ask for a write-back and invalidate.
    384	 */
    385	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
    386}