cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

context.c (9245B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2012 Regents of the University of California
      4 * Copyright (C) 2017 SiFive
      5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
      6 */
      7
      8#include <linux/bitops.h>
      9#include <linux/cpumask.h>
     10#include <linux/mm.h>
     11#include <linux/percpu.h>
     12#include <linux/slab.h>
     13#include <linux/spinlock.h>
     14#include <linux/static_key.h>
     15#include <asm/tlbflush.h>
     16#include <asm/cacheflush.h>
     17#include <asm/mmu_context.h>
     18
     19#ifdef CONFIG_MMU
     20
     21DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
     22
     23static unsigned long asid_bits;
     24static unsigned long num_asids;
     25static unsigned long asid_mask;
     26
     27static atomic_long_t current_version;
     28
     29static DEFINE_RAW_SPINLOCK(context_lock);
     30static cpumask_t context_tlb_flush_pending;
     31static unsigned long *context_asid_map;
     32
     33static DEFINE_PER_CPU(atomic_long_t, active_context);
     34static DEFINE_PER_CPU(unsigned long, reserved_context);
     35
     36static bool check_update_reserved_context(unsigned long cntx,
     37					  unsigned long newcntx)
     38{
     39	int cpu;
     40	bool hit = false;
     41
     42	/*
     43	 * Iterate over the set of reserved CONTEXT looking for a match.
     44	 * If we find one, then we can update our mm to use new CONTEXT
     45	 * (i.e. the same CONTEXT in the current_version) but we can't
     46	 * exit the loop early, since we need to ensure that all copies
     47	 * of the old CONTEXT are updated to reflect the mm. Failure to do
     48	 * so could result in us missing the reserved CONTEXT in a future
     49	 * version.
     50	 */
     51	for_each_possible_cpu(cpu) {
     52		if (per_cpu(reserved_context, cpu) == cntx) {
     53			hit = true;
     54			per_cpu(reserved_context, cpu) = newcntx;
     55		}
     56	}
     57
     58	return hit;
     59}
     60
     61static void __flush_context(void)
     62{
     63	int i;
     64	unsigned long cntx;
     65
     66	/* Must be called with context_lock held */
     67	lockdep_assert_held(&context_lock);
     68
     69	/* Update the list of reserved ASIDs and the ASID bitmap. */
     70	bitmap_clear(context_asid_map, 0, num_asids);
     71
     72	/* Mark already active ASIDs as used */
     73	for_each_possible_cpu(i) {
     74		cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
     75		/*
     76		 * If this CPU has already been through a rollover, but
     77		 * hasn't run another task in the meantime, we must preserve
     78		 * its reserved CONTEXT, as this is the only trace we have of
     79		 * the process it is still running.
     80		 */
     81		if (cntx == 0)
     82			cntx = per_cpu(reserved_context, i);
     83
     84		__set_bit(cntx & asid_mask, context_asid_map);
     85		per_cpu(reserved_context, i) = cntx;
     86	}
     87
     88	/* Mark ASID #0 as used because it is used at boot-time */
     89	__set_bit(0, context_asid_map);
     90
     91	/* Queue a TLB invalidation for each CPU on next context-switch */
     92	cpumask_setall(&context_tlb_flush_pending);
     93}
     94
     95static unsigned long __new_context(struct mm_struct *mm)
     96{
     97	static u32 cur_idx = 1;
     98	unsigned long cntx = atomic_long_read(&mm->context.id);
     99	unsigned long asid, ver = atomic_long_read(&current_version);
    100
    101	/* Must be called with context_lock held */
    102	lockdep_assert_held(&context_lock);
    103
    104	if (cntx != 0) {
    105		unsigned long newcntx = ver | (cntx & asid_mask);
    106
    107		/*
    108		 * If our current CONTEXT was active during a rollover, we
    109		 * can continue to use it and this was just a false alarm.
    110		 */
    111		if (check_update_reserved_context(cntx, newcntx))
    112			return newcntx;
    113
    114		/*
    115		 * We had a valid CONTEXT in a previous life, so try to
    116		 * re-use it if possible.
    117		 */
    118		if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
    119			return newcntx;
    120	}
    121
    122	/*
    123	 * Allocate a free ASID. If we can't find one then increment
    124	 * current_version and flush all ASIDs.
    125	 */
    126	asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
    127	if (asid != num_asids)
    128		goto set_asid;
    129
    130	/* We're out of ASIDs, so increment current_version */
    131	ver = atomic_long_add_return_relaxed(num_asids, &current_version);
    132
    133	/* Flush everything  */
    134	__flush_context();
    135
    136	/* We have more ASIDs than CPUs, so this will always succeed */
    137	asid = find_next_zero_bit(context_asid_map, num_asids, 1);
    138
    139set_asid:
    140	__set_bit(asid, context_asid_map);
    141	cur_idx = asid;
    142	return asid | ver;
    143}
    144
    145static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
    146{
    147	unsigned long flags;
    148	bool need_flush_tlb = false;
    149	unsigned long cntx, old_active_cntx;
    150
    151	cntx = atomic_long_read(&mm->context.id);
    152
    153	/*
    154	 * If our active_context is non-zero and the context matches the
    155	 * current_version, then we update the active_context entry with a
    156	 * relaxed cmpxchg.
    157	 *
    158	 * Following is how we handle racing with a concurrent rollover:
    159	 *
    160	 * - We get a zero back from the cmpxchg and end up waiting on the
    161	 *   lock. Taking the lock synchronises with the rollover and so
    162	 *   we are forced to see the updated verion.
    163	 *
    164	 * - We get a valid context back from the cmpxchg then we continue
    165	 *   using old ASID because __flush_context() would have marked ASID
    166	 *   of active_context as used and next context switch we will
    167	 *   allocate new context.
    168	 */
    169	old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
    170	if (old_active_cntx &&
    171	    ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
    172	    atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
    173					old_active_cntx, cntx))
    174		goto switch_mm_fast;
    175
    176	raw_spin_lock_irqsave(&context_lock, flags);
    177
    178	/* Check that our ASID belongs to the current_version. */
    179	cntx = atomic_long_read(&mm->context.id);
    180	if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
    181		cntx = __new_context(mm);
    182		atomic_long_set(&mm->context.id, cntx);
    183	}
    184
    185	if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
    186		need_flush_tlb = true;
    187
    188	atomic_long_set(&per_cpu(active_context, cpu), cntx);
    189
    190	raw_spin_unlock_irqrestore(&context_lock, flags);
    191
    192switch_mm_fast:
    193	csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
    194		  ((cntx & asid_mask) << SATP_ASID_SHIFT) |
    195		  satp_mode);
    196
    197	if (need_flush_tlb)
    198		local_flush_tlb_all();
    199}
    200
    201static void set_mm_noasid(struct mm_struct *mm)
    202{
    203	/* Switch the page table and blindly nuke entire local TLB */
    204	csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
    205	local_flush_tlb_all();
    206}
    207
    208static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
    209{
    210	if (static_branch_unlikely(&use_asid_allocator))
    211		set_mm_asid(mm, cpu);
    212	else
    213		set_mm_noasid(mm);
    214}
    215
    216static int __init asids_init(void)
    217{
    218	unsigned long old;
    219
    220	/* Figure-out number of ASID bits in HW */
    221	old = csr_read(CSR_SATP);
    222	asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
    223	csr_write(CSR_SATP, asid_bits);
    224	asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT)  & SATP_ASID_MASK;
    225	asid_bits = fls_long(asid_bits);
    226	csr_write(CSR_SATP, old);
    227
    228	/*
    229	 * In the process of determining number of ASID bits (above)
    230	 * we polluted the TLB of current HART so let's do TLB flushed
    231	 * to remove unwanted TLB enteries.
    232	 */
    233	local_flush_tlb_all();
    234
    235	/* Pre-compute ASID details */
    236	if (asid_bits) {
    237		num_asids = 1 << asid_bits;
    238		asid_mask = num_asids - 1;
    239	}
    240
    241	/*
    242	 * Use ASID allocator only if number of HW ASIDs are
    243	 * at-least twice more than CPUs
    244	 */
    245	if (num_asids > (2 * num_possible_cpus())) {
    246		atomic_long_set(&current_version, num_asids);
    247
    248		context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
    249		if (!context_asid_map)
    250			panic("Failed to allocate bitmap for %lu ASIDs\n",
    251			      num_asids);
    252
    253		__set_bit(0, context_asid_map);
    254
    255		static_branch_enable(&use_asid_allocator);
    256
    257		pr_info("ASID allocator using %lu bits (%lu entries)\n",
    258			asid_bits, num_asids);
    259	} else {
    260		pr_info("ASID allocator disabled (%lu bits)\n", asid_bits);
    261	}
    262
    263	return 0;
    264}
    265early_initcall(asids_init);
    266#else
    267static inline void set_mm(struct mm_struct *mm, unsigned int cpu)
    268{
    269	/* Nothing to do here when there is no MMU */
    270}
    271#endif
    272
    273/*
    274 * When necessary, performs a deferred icache flush for the given MM context,
    275 * on the local CPU.  RISC-V has no direct mechanism for instruction cache
    276 * shoot downs, so instead we send an IPI that informs the remote harts they
    277 * need to flush their local instruction caches.  To avoid pathologically slow
    278 * behavior in a common case (a bunch of single-hart processes on a many-hart
    279 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
    280 * executing a MM context and instead schedule a deferred local instruction
    281 * cache flush to be performed before execution resumes on each hart.  This
    282 * actually performs that local instruction cache flush, which implicitly only
    283 * refers to the current hart.
    284 *
    285 * The "cpu" argument must be the current local CPU number.
    286 */
    287static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu)
    288{
    289#ifdef CONFIG_SMP
    290	cpumask_t *mask = &mm->context.icache_stale_mask;
    291
    292	if (cpumask_test_cpu(cpu, mask)) {
    293		cpumask_clear_cpu(cpu, mask);
    294		/*
    295		 * Ensure the remote hart's writes are visible to this hart.
    296		 * This pairs with a barrier in flush_icache_mm.
    297		 */
    298		smp_mb();
    299		local_flush_icache_all();
    300	}
    301
    302#endif
    303}
    304
    305void switch_mm(struct mm_struct *prev, struct mm_struct *next,
    306	struct task_struct *task)
    307{
    308	unsigned int cpu;
    309
    310	if (unlikely(prev == next))
    311		return;
    312
    313	/*
    314	 * Mark the current MM context as inactive, and the next as
    315	 * active.  This is at least used by the icache flushing
    316	 * routines in order to determine who should be flushed.
    317	 */
    318	cpu = smp_processor_id();
    319
    320	cpumask_clear_cpu(cpu, mm_cpumask(prev));
    321	cpumask_set_cpu(cpu, mm_cpumask(next));
    322
    323	set_mm(next, cpu);
    324
    325	flush_icache_deferred(next, cpu);
    326}