cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

preempt.h (3218B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __ASM_PREEMPT_H
      3#define __ASM_PREEMPT_H
      4
      5#include <asm/current.h>
      6#include <linux/thread_info.h>
      7#include <asm/atomic_ops.h>
      8
      9#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
     10
     11/* We use the MSB mostly because its available */
     12#define PREEMPT_NEED_RESCHED	0x80000000
     13#define PREEMPT_ENABLED	(0 + PREEMPT_NEED_RESCHED)
     14
     15static inline int preempt_count(void)
     16{
     17	return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
     18}
     19
     20static inline void preempt_count_set(int pc)
     21{
     22	int old, new;
     23
     24	do {
     25		old = READ_ONCE(S390_lowcore.preempt_count);
     26		new = (old & PREEMPT_NEED_RESCHED) |
     27			(pc & ~PREEMPT_NEED_RESCHED);
     28	} while (__atomic_cmpxchg(&S390_lowcore.preempt_count,
     29				  old, new) != old);
     30}
     31
     32static inline void set_preempt_need_resched(void)
     33{
     34	__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
     35}
     36
     37static inline void clear_preempt_need_resched(void)
     38{
     39	__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
     40}
     41
     42static inline bool test_preempt_need_resched(void)
     43{
     44	return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
     45}
     46
     47static inline void __preempt_count_add(int val)
     48{
     49	/*
     50	 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
     51	 * enabled, gcc 12 fails to handle __builtin_constant_p().
     52	 */
     53	if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
     54		if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
     55			__atomic_add_const(val, &S390_lowcore.preempt_count);
     56			return;
     57		}
     58	}
     59	__atomic_add(val, &S390_lowcore.preempt_count);
     60}
     61
     62static inline void __preempt_count_sub(int val)
     63{
     64	__preempt_count_add(-val);
     65}
     66
     67static inline bool __preempt_count_dec_and_test(void)
     68{
     69	return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
     70}
     71
     72static inline bool should_resched(int preempt_offset)
     73{
     74	return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
     75			preempt_offset);
     76}
     77
     78#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
     79
     80#define PREEMPT_ENABLED	(0)
     81
     82static inline int preempt_count(void)
     83{
     84	return READ_ONCE(S390_lowcore.preempt_count);
     85}
     86
     87static inline void preempt_count_set(int pc)
     88{
     89	S390_lowcore.preempt_count = pc;
     90}
     91
     92static inline void set_preempt_need_resched(void)
     93{
     94}
     95
     96static inline void clear_preempt_need_resched(void)
     97{
     98}
     99
    100static inline bool test_preempt_need_resched(void)
    101{
    102	return false;
    103}
    104
    105static inline void __preempt_count_add(int val)
    106{
    107	S390_lowcore.preempt_count += val;
    108}
    109
    110static inline void __preempt_count_sub(int val)
    111{
    112	S390_lowcore.preempt_count -= val;
    113}
    114
    115static inline bool __preempt_count_dec_and_test(void)
    116{
    117	return !--S390_lowcore.preempt_count && tif_need_resched();
    118}
    119
    120static inline bool should_resched(int preempt_offset)
    121{
    122	return unlikely(preempt_count() == preempt_offset &&
    123			tif_need_resched());
    124}
    125
    126#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
    127
    128#define init_task_preempt_count(p)	do { } while (0)
    129/* Deferred to CPU bringup time */
    130#define init_idle_preempt_count(p, cpu)	do { } while (0)
    131
    132#ifdef CONFIG_PREEMPTION
    133extern void preempt_schedule(void);
    134#define __preempt_schedule() preempt_schedule()
    135extern void preempt_schedule_notrace(void);
    136#define __preempt_schedule_notrace() preempt_schedule_notrace()
    137#endif /* CONFIG_PREEMPTION */
    138
    139#endif /* __ASM_PREEMPT_H */