cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cacheflush.c (2401B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2017 SiFive
      4 */
      5
      6#include <asm/cacheflush.h>
      7
      8#ifdef CONFIG_SMP
      9
     10#include <asm/sbi.h>
     11
     12static void ipi_remote_fence_i(void *info)
     13{
     14	return local_flush_icache_all();
     15}
     16
     17void flush_icache_all(void)
     18{
     19	local_flush_icache_all();
     20
     21	if (IS_ENABLED(CONFIG_RISCV_SBI))
     22		sbi_remote_fence_i(NULL);
     23	else
     24		on_each_cpu(ipi_remote_fence_i, NULL, 1);
     25}
     26EXPORT_SYMBOL(flush_icache_all);
     27
     28/*
     29 * Performs an icache flush for the given MM context.  RISC-V has no direct
     30 * mechanism for instruction cache shoot downs, so instead we send an IPI that
     31 * informs the remote harts they need to flush their local instruction caches.
     32 * To avoid pathologically slow behavior in a common case (a bunch of
     33 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
     34 * IPIs for harts that are not currently executing a MM context and instead
     35 * schedule a deferred local instruction cache flush to be performed before
     36 * execution resumes on each hart.
     37 */
     38void flush_icache_mm(struct mm_struct *mm, bool local)
     39{
     40	unsigned int cpu;
     41	cpumask_t others, *mask;
     42
     43	preempt_disable();
     44
     45	/* Mark every hart's icache as needing a flush for this MM. */
     46	mask = &mm->context.icache_stale_mask;
     47	cpumask_setall(mask);
     48	/* Flush this hart's I$ now, and mark it as flushed. */
     49	cpu = smp_processor_id();
     50	cpumask_clear_cpu(cpu, mask);
     51	local_flush_icache_all();
     52
     53	/*
     54	 * Flush the I$ of other harts concurrently executing, and mark them as
     55	 * flushed.
     56	 */
     57	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
     58	local |= cpumask_empty(&others);
     59	if (mm == current->active_mm && local) {
     60		/*
     61		 * It's assumed that at least one strongly ordered operation is
     62		 * performed on this hart between setting a hart's cpumask bit
     63		 * and scheduling this MM context on that hart.  Sending an SBI
     64		 * remote message will do this, but in the case where no
     65		 * messages are sent we still need to order this hart's writes
     66		 * with flush_icache_deferred().
     67		 */
     68		smp_mb();
     69	} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
     70		sbi_remote_fence_i(&others);
     71	} else {
     72		on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
     73	}
     74
     75	preempt_enable();
     76}
     77
     78#endif /* CONFIG_SMP */
     79
     80#ifdef CONFIG_MMU
     81void flush_icache_pte(pte_t pte)
     82{
     83	struct page *page = pte_page(pte);
     84
     85	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
     86		flush_icache_all();
     87}
     88#endif /* CONFIG_MMU */