cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

process.h (1094B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2//
      3// Code shared between 32 and 64 bit
      4
      5#include <asm/spec-ctrl.h>
      6
      7void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
      8
      9/*
     10 * This needs to be inline to optimize for the common case where no extra
     11 * work needs to be done.
     12 */
     13static inline void switch_to_extra(struct task_struct *prev,
     14				   struct task_struct *next)
     15{
     16	unsigned long next_tif = read_task_thread_flags(next);
     17	unsigned long prev_tif = read_task_thread_flags(prev);
     18
     19	if (IS_ENABLED(CONFIG_SMP)) {
     20		/*
     21		 * Avoid __switch_to_xtra() invocation when conditional
     22		 * STIBP is disabled and the only different bit is
     23		 * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
     24		 * in the TIF_WORK_CTXSW masks.
     25		 */
     26		if (!static_branch_likely(&switch_to_cond_stibp)) {
     27			prev_tif &= ~_TIF_SPEC_IB;
     28			next_tif &= ~_TIF_SPEC_IB;
     29		}
     30	}
     31
     32	/*
     33	 * __switch_to_xtra() handles debug registers, i/o bitmaps,
     34	 * speculation mitigations etc.
     35	 */
     36	if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
     37		     prev_tif & _TIF_WORK_CTXSW_PREV))
     38		__switch_to_xtra(prev, next);
     39}