cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cacheflush.h (3824B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
      4 *
      5 *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
      6 *   -flush_cache_dup_mm (fork)
      7 *   -likewise for flush_cache_mm (exit/execve)
      8 *   -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
      9 *
     10 *  vineetg: April 2008
     11 *   -Added a critical CacheLine flush to copy_to_user_page( ) which
     12 *     was causing gdbserver to not setup breakpoints consistently
     13 */
     14
     15#ifndef _ASM_CACHEFLUSH_H
     16#define _ASM_CACHEFLUSH_H
     17
     18#include <linux/mm.h>
     19#include <asm/shmparam.h>
     20
     21/*
     22 * Semantically we need this because icache doesn't snoop dcache/dma.
     23 * However ARC Cache flush requires paddr as well as vaddr, latter not available
     24 * in the flush_icache_page() API. So we no-op it but do the equivalent work
     25 * in update_mmu_cache()
     26 */
     27#define flush_icache_page(vma, page)
     28
     29void flush_cache_all(void);
     30
     31void flush_icache_range(unsigned long kstart, unsigned long kend);
     32void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len);
     33void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr);
     34void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
     35
     36#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
     37
     38void flush_dcache_page(struct page *page);
     39
     40void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
     41void dma_cache_inv(phys_addr_t start, unsigned long sz);
     42void dma_cache_wback(phys_addr_t start, unsigned long sz);
     43
     44#define flush_dcache_mmap_lock(mapping)		do { } while (0)
     45#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
     46
     47/* TBD: optimize this */
     48#define flush_cache_vmap(start, end)		flush_cache_all()
     49#define flush_cache_vunmap(start, end)		flush_cache_all()
     50
     51#define flush_cache_dup_mm(mm)			/* called on fork (VIVT only) */
     52
     53#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
     54
     55#define flush_cache_mm(mm)			/* called on munmap/exit */
     56#define flush_cache_range(mm, u_vstart, u_vend)
     57#define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
     58
     59#else	/* VIPT aliasing dcache */
     60
     61/* To clear out stale userspace mappings */
     62void flush_cache_mm(struct mm_struct *mm);
     63void flush_cache_range(struct vm_area_struct *vma,
     64	unsigned long start,unsigned long end);
     65void flush_cache_page(struct vm_area_struct *vma,
     66	unsigned long user_addr, unsigned long page);
     67
     68/*
     69 * To make sure that userspace mapping is flushed to memory before
     70 * get_user_pages() uses a kernel mapping to access the page
     71 */
     72#define ARCH_HAS_FLUSH_ANON_PAGE
     73void flush_anon_page(struct vm_area_struct *vma,
     74	struct page *page, unsigned long u_vaddr);
     75
     76#endif	/* CONFIG_ARC_CACHE_VIPT_ALIASING */
     77
     78/*
     79 * A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
     80 * This works around some PIO based drivers which don't call flush_dcache_page
     81 * to record that they dirtied the dcache
     82 */
     83#define PG_dc_clean	PG_arch_1
     84
     85#define CACHE_COLORS_NUM	4
     86#define CACHE_COLORS_MSK	(CACHE_COLORS_NUM - 1)
     87#define CACHE_COLOR(addr)	(((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
     88
     89/*
     90 * Simple wrapper over config option
     91 * Bootup code ensures that hardware matches kernel configuration
     92 */
     93static inline int cache_is_vipt_aliasing(void)
     94{
     95	return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
     96}
     97
     98/*
     99 * checks if two addresses (after page aligning) index into same cache set
    100 */
    101#define addr_not_cache_congruent(addr1, addr2)				\
    102({									\
    103	cache_is_vipt_aliasing() ? 					\
    104		(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0;		\
    105})
    106
    107#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
    108do {									\
    109	memcpy(dst, src, len);						\
    110	if (vma->vm_flags & VM_EXEC)					\
    111		__sync_icache_dcache((unsigned long)(dst), vaddr, len);	\
    112} while (0)
    113
    114#define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
    115	memcpy(dst, src, len);						\
    116
    117#endif