cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

flush.c (3286B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Based on arch/arm/mm/flush.c
      4 *
      5 * Copyright (C) 1995-2002 Russell King
      6 * Copyright (C) 2012 ARM Ltd.
      7 */
      8
      9#include <linux/export.h>
     10#include <linux/mm.h>
     11#include <linux/pagemap.h>
     12
     13#include <asm/cacheflush.h>
     14#include <asm/cache.h>
     15#include <asm/tlbflush.h>
     16
     17void sync_icache_aliases(unsigned long start, unsigned long end)
     18{
     19	if (icache_is_aliasing()) {
     20		dcache_clean_pou(start, end);
     21		icache_inval_all_pou();
     22	} else {
     23		/*
     24		 * Don't issue kick_all_cpus_sync() after I-cache invalidation
     25		 * for user mappings.
     26		 */
     27		caches_clean_inval_pou(start, end);
     28	}
     29}
     30
     31static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
     32				unsigned long end)
     33{
     34	if (vma->vm_flags & VM_EXEC)
     35		sync_icache_aliases(start, end);
     36}
     37
     38/*
     39 * Copy user data from/to a page which is mapped into a different processes
     40 * address space.  Really, we want to allow our "user space" model to handle
     41 * this.
     42 */
     43void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
     44		       unsigned long uaddr, void *dst, const void *src,
     45		       unsigned long len)
     46{
     47	memcpy(dst, src, len);
     48	flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
     49}
     50
     51void __sync_icache_dcache(pte_t pte)
     52{
     53	struct page *page = pte_page(pte);
     54
     55	/*
     56	 * HugeTLB pages are always fully mapped, so only setting head page's
     57	 * PG_dcache_clean flag is enough.
     58	 */
     59	if (PageHuge(page))
     60		page = compound_head(page);
     61
     62	if (!test_bit(PG_dcache_clean, &page->flags)) {
     63		sync_icache_aliases((unsigned long)page_address(page),
     64				    (unsigned long)page_address(page) +
     65					    page_size(page));
     66		set_bit(PG_dcache_clean, &page->flags);
     67	}
     68}
     69EXPORT_SYMBOL_GPL(__sync_icache_dcache);
     70
     71/*
     72 * This function is called when a page has been modified by the kernel. Mark
     73 * it as dirty for later flushing when mapped in user space (if executable,
     74 * see __sync_icache_dcache).
     75 */
     76void flush_dcache_page(struct page *page)
     77{
     78	/*
     79	 * Only the head page's flags of HugeTLB can be cleared since the tail
     80	 * vmemmap pages associated with each HugeTLB page are mapped with
     81	 * read-only when CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP is enabled (more
     82	 * details can refer to vmemmap_remap_pte()).  Although
     83	 * __sync_icache_dcache() only set PG_dcache_clean flag on the head
     84	 * page struct, there is more than one page struct with PG_dcache_clean
     85	 * associated with the HugeTLB page since the head vmemmap page frame
     86	 * is reused (more details can refer to the comments above
     87	 * page_fixed_fake_head()).
     88	 */
     89	if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
     90		page = compound_head(page);
     91
     92	if (test_bit(PG_dcache_clean, &page->flags))
     93		clear_bit(PG_dcache_clean, &page->flags);
     94}
     95EXPORT_SYMBOL(flush_dcache_page);
     96
     97/*
     98 * Additional functions defined in assembly.
     99 */
    100EXPORT_SYMBOL(caches_clean_inval_pou);
    101
    102#ifdef CONFIG_ARCH_HAS_PMEM_API
    103void arch_wb_cache_pmem(void *addr, size_t size)
    104{
    105	/* Ensure order against any prior non-cacheable writes */
    106	dmb(osh);
    107	dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
    108}
    109EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
    110
    111void arch_invalidate_pmem(void *addr, size_t size)
    112{
    113	dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
    114}
    115EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
    116#endif