cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

idle.h (1890B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_SCHED_IDLE_H
      3#define _LINUX_SCHED_IDLE_H
      4
      5#include <linux/sched.h>
      6
      7enum cpu_idle_type {
      8	CPU_IDLE,
      9	CPU_NOT_IDLE,
     10	CPU_NEWLY_IDLE,
     11	CPU_MAX_IDLE_TYPES
     12};
     13
     14#ifdef CONFIG_SMP
     15extern void wake_up_if_idle(int cpu);
     16#else
     17static inline void wake_up_if_idle(int cpu) { }
     18#endif
     19
     20/*
     21 * Idle thread specific functions to determine the need_resched
     22 * polling state.
     23 */
     24#ifdef TIF_POLLING_NRFLAG
     25
     26static inline void __current_set_polling(void)
     27{
     28	set_thread_flag(TIF_POLLING_NRFLAG);
     29}
     30
     31static inline bool __must_check current_set_polling_and_test(void)
     32{
     33	__current_set_polling();
     34
     35	/*
     36	 * Polling state must be visible before we test NEED_RESCHED,
     37	 * paired by resched_curr()
     38	 */
     39	smp_mb__after_atomic();
     40
     41	return unlikely(tif_need_resched());
     42}
     43
     44static inline void __current_clr_polling(void)
     45{
     46	clear_thread_flag(TIF_POLLING_NRFLAG);
     47}
     48
     49static inline bool __must_check current_clr_polling_and_test(void)
     50{
     51	__current_clr_polling();
     52
     53	/*
     54	 * Polling state must be visible before we test NEED_RESCHED,
     55	 * paired by resched_curr()
     56	 */
     57	smp_mb__after_atomic();
     58
     59	return unlikely(tif_need_resched());
     60}
     61
     62#else
     63static inline void __current_set_polling(void) { }
     64static inline void __current_clr_polling(void) { }
     65
     66static inline bool __must_check current_set_polling_and_test(void)
     67{
     68	return unlikely(tif_need_resched());
     69}
     70static inline bool __must_check current_clr_polling_and_test(void)
     71{
     72	return unlikely(tif_need_resched());
     73}
     74#endif
     75
     76static inline void current_clr_polling(void)
     77{
     78	__current_clr_polling();
     79
     80	/*
     81	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
     82	 * Once the bit is cleared, we'll get IPIs with every new
     83	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
     84	 * fold.
     85	 */
     86	smp_mb(); /* paired with resched_curr() */
     87
     88	preempt_fold_need_resched();
     89}
     90
     91#endif /* _LINUX_SCHED_IDLE_H */