cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cachev2.c (2692B)


      1// SPDX-License-Identifier: GPL-2.0
      2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
      3
      4#include <linux/spinlock.h>
      5#include <linux/smp.h>
      6#include <linux/mm.h>
      7#include <asm/cache.h>
      8#include <asm/barrier.h>
      9
     10/* for L1-cache */
     11#define INS_CACHE		(1 << 0)
     12#define DATA_CACHE		(1 << 1)
     13#define CACHE_INV		(1 << 4)
     14#define CACHE_CLR		(1 << 5)
     15#define CACHE_OMS		(1 << 6)
     16
     17void local_icache_inv_all(void *priv)
     18{
     19	mtcr("cr17", INS_CACHE|CACHE_INV);
     20	sync_is();
     21}
     22
     23#ifdef CONFIG_CPU_HAS_ICACHE_INS
     24void icache_inv_range(unsigned long start, unsigned long end)
     25{
     26	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
     27
     28	for (; i < end; i += L1_CACHE_BYTES)
     29		asm volatile("icache.iva %0\n"::"r"(i):"memory");
     30	sync_is();
     31}
     32#else
     33struct cache_range {
     34	unsigned long start;
     35	unsigned long end;
     36};
     37
     38static DEFINE_SPINLOCK(cache_lock);
     39
     40static inline void cache_op_line(unsigned long i, unsigned int val)
     41{
     42	mtcr("cr22", i);
     43	mtcr("cr17", val);
     44}
     45
     46void local_icache_inv_range(void *priv)
     47{
     48	struct cache_range *param = priv;
     49	unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
     50	unsigned long flags;
     51
     52	spin_lock_irqsave(&cache_lock, flags);
     53
     54	for (; i < param->end; i += L1_CACHE_BYTES)
     55		cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
     56
     57	spin_unlock_irqrestore(&cache_lock, flags);
     58
     59	sync_is();
     60}
     61
     62void icache_inv_range(unsigned long start, unsigned long end)
     63{
     64	struct cache_range param = { start, end };
     65
     66	if (irqs_disabled())
     67		local_icache_inv_range(&param);
     68	else
     69		on_each_cpu(local_icache_inv_range, &param, 1);
     70}
     71#endif
     72
     73inline void dcache_wb_line(unsigned long start)
     74{
     75	asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
     76	sync_is();
     77}
     78
     79void dcache_wb_range(unsigned long start, unsigned long end)
     80{
     81	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
     82
     83	for (; i < end; i += L1_CACHE_BYTES)
     84		asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
     85	sync_is();
     86}
     87
     88void cache_wbinv_range(unsigned long start, unsigned long end)
     89{
     90	dcache_wb_range(start, end);
     91	icache_inv_range(start, end);
     92}
     93EXPORT_SYMBOL(cache_wbinv_range);
     94
     95void dma_wbinv_range(unsigned long start, unsigned long end)
     96{
     97	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
     98
     99	for (; i < end; i += L1_CACHE_BYTES)
    100		asm volatile("dcache.civa %0\n"::"r"(i):"memory");
    101	sync_is();
    102}
    103
    104void dma_inv_range(unsigned long start, unsigned long end)
    105{
    106	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
    107
    108	for (; i < end; i += L1_CACHE_BYTES)
    109		asm volatile("dcache.iva %0\n"::"r"(i):"memory");
    110	sync_is();
    111}
    112
    113void dma_wb_range(unsigned long start, unsigned long end)
    114{
    115	unsigned long i = start & ~(L1_CACHE_BYTES - 1);
    116
    117	for (; i < end; i += L1_CACHE_BYTES)
    118		asm volatile("dcache.cva %0\n"::"r"(i):"memory");
    119	sync_is();
    120}