cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

paravirt.h (4468B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2#ifndef _ASM_POWERPC_PARAVIRT_H
      3#define _ASM_POWERPC_PARAVIRT_H
      4
      5#include <linux/jump_label.h>
      6#include <asm/smp.h>
      7#ifdef CONFIG_PPC64
      8#include <asm/paca.h>
      9#include <asm/hvcall.h>
     10#endif
     11
     12#ifdef CONFIG_PPC_SPLPAR
     13#include <linux/smp.h>
     14#include <asm/kvm_guest.h>
     15#include <asm/cputhreads.h>
     16
     17DECLARE_STATIC_KEY_FALSE(shared_processor);
     18
     19static inline bool is_shared_processor(void)
     20{
     21	return static_branch_unlikely(&shared_processor);
     22}
     23
     24/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
     25static inline u32 yield_count_of(int cpu)
     26{
     27	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
     28	return be32_to_cpu(yield_count);
     29}
     30
     31/*
     32 * Spinlock code confers and prods, so don't trace the hcalls because the
     33 * tracing code takes spinlocks which can cause recursion deadlocks.
     34 *
     35 * These calls are made while the lock is not held: the lock slowpath yields if
     36 * it can not acquire the lock, and unlock slow path might prod if a waiter has
     37 * yielded). So this may not be a problem for simple spin locks because the
     38 * tracing does not technically recurse on the lock, but we avoid it anyway.
     39 *
     40 * However the queued spin lock contended path is more strictly ordered: the
     41 * H_CONFER hcall is made after the task has queued itself on the lock, so then
     42 * recursing on that lock will cause the task to then queue up again behind the
     43 * first instance (or worse: queued spinlocks use tricks that assume a context
     44 * never waits on more than one spinlock, so such recursion may cause random
     45 * corruption in the lock code).
     46 */
     47static inline void yield_to_preempted(int cpu, u32 yield_count)
     48{
     49	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
     50}
     51
     52static inline void prod_cpu(int cpu)
     53{
     54	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
     55}
     56
     57static inline void yield_to_any(void)
     58{
     59	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
     60}
     61#else
     62static inline bool is_shared_processor(void)
     63{
     64	return false;
     65}
     66
     67static inline u32 yield_count_of(int cpu)
     68{
     69	return 0;
     70}
     71
     72extern void ___bad_yield_to_preempted(void);
     73static inline void yield_to_preempted(int cpu, u32 yield_count)
     74{
     75	___bad_yield_to_preempted(); /* This would be a bug */
     76}
     77
     78extern void ___bad_yield_to_any(void);
     79static inline void yield_to_any(void)
     80{
     81	___bad_yield_to_any(); /* This would be a bug */
     82}
     83
     84extern void ___bad_prod_cpu(void);
     85static inline void prod_cpu(int cpu)
     86{
     87	___bad_prod_cpu(); /* This would be a bug */
     88}
     89
     90#endif
     91
     92#define vcpu_is_preempted vcpu_is_preempted
     93static inline bool vcpu_is_preempted(int cpu)
     94{
     95	/*
     96	 * The dispatch/yield bit alone is an imperfect indicator of
     97	 * whether the hypervisor has dispatched @cpu to run on a physical
     98	 * processor. When it is clear, @cpu is definitely not preempted.
     99	 * But when it is set, it means only that it *might* be, subject to
    100	 * other conditions. So we check other properties of the VM and
    101	 * @cpu first, resorting to the yield count last.
    102	 */
    103
    104	/*
    105	 * Hypervisor preemption isn't possible in dedicated processor
    106	 * mode by definition.
    107	 */
    108	if (!is_shared_processor())
    109		return false;
    110
    111#ifdef CONFIG_PPC_SPLPAR
    112	if (!is_kvm_guest()) {
    113		int first_cpu;
    114
    115		/*
    116		 * The result of vcpu_is_preempted() is used in a
    117		 * speculative way, and is always subject to invalidation
    118		 * by events internal and external to Linux. While we can
    119		 * be called in preemptable context (in the Linux sense),
    120		 * we're not accessing per-cpu resources in a way that can
    121		 * race destructively with Linux scheduler preemption and
    122		 * migration, and callers can tolerate the potential for
    123		 * error introduced by sampling the CPU index without
    124		 * pinning the task to it. So it is permissible to use
    125		 * raw_smp_processor_id() here to defeat the preempt debug
    126		 * warnings that can arise from using smp_processor_id()
    127		 * in arbitrary contexts.
    128		 */
    129		first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
    130
    131		/*
    132		 * The PowerVM hypervisor dispatches VMs on a whole core
    133		 * basis. So we know that a thread sibling of the local CPU
    134		 * cannot have been preempted by the hypervisor, even if it
    135		 * has called H_CONFER, which will set the yield bit.
    136		 */
    137		if (cpu_first_thread_sibling(cpu) == first_cpu)
    138			return false;
    139	}
    140#endif
    141
    142	if (yield_count_of(cpu) & 1)
    143		return true;
    144	return false;
    145}
    146
    147static inline bool pv_is_native_spin_unlock(void)
    148{
    149	return !is_shared_processor();
    150}
    151
    152#endif /* _ASM_POWERPC_PARAVIRT_H */