cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

entry-kvm.h (2807B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __LINUX_ENTRYKVM_H
      3#define __LINUX_ENTRYKVM_H
      4
      5#include <linux/static_call_types.h>
      6#include <linux/resume_user_mode.h>
      7#include <linux/syscalls.h>
      8#include <linux/seccomp.h>
      9#include <linux/sched.h>
     10#include <linux/tick.h>
     11
     12/* Transfer to guest mode work */
     13#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
     14
     15#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
     16# define ARCH_XFER_TO_GUEST_MODE_WORK	(0)
     17#endif
     18
     19#define XFER_TO_GUEST_MODE_WORK						\
     20	(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL |	\
     21	 _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
     22
     23struct kvm_vcpu;
     24
     25/**
     26 * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
     27 *					 mode work handling function.
     28 * @vcpu:	Pointer to current's VCPU data
     29 * @ti_work:	Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
     30 *
     31 * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
     32 * replaced by architecture specific code.
     33 */
     34static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
     35						      unsigned long ti_work);
     36
     37#ifndef arch_xfer_to_guest_mode_work
     38static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
     39						      unsigned long ti_work)
     40{
     41	return 0;
     42}
     43#endif
     44
     45/**
     46 * xfer_to_guest_mode_handle_work - Check and handle pending work which needs
     47 *				    to be handled before going to guest mode
     48 * @vcpu:	Pointer to current's VCPU data
     49 *
     50 * Returns: 0 or an error code
     51 */
     52int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
     53
     54/**
     55 * xfer_to_guest_mode_prepare - Perform last minute preparation work that
     56 *				need to be handled while IRQs are disabled
     57 *				upon entering to guest.
     58 *
     59 * Has to be invoked with interrupts disabled before the last call
     60 * to xfer_to_guest_mode_work_pending().
     61 */
     62static inline void xfer_to_guest_mode_prepare(void)
     63{
     64	lockdep_assert_irqs_disabled();
     65	tick_nohz_user_enter_prepare();
     66}
     67
     68/**
     69 * __xfer_to_guest_mode_work_pending - Check if work is pending
     70 *
     71 * Returns: True if work pending, False otherwise.
     72 *
     73 * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
     74 * interrupt enabled code for racy quick checks with care.
     75 */
     76static inline bool __xfer_to_guest_mode_work_pending(void)
     77{
     78	unsigned long ti_work = read_thread_flags();
     79
     80	return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
     81}
     82
     83/**
     84 * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
     85 *				     handled before returning to guest mode
     86 *
     87 * Returns: True if work pending, False otherwise.
     88 *
     89 * Has to be invoked with interrupts disabled before the transition to
     90 * guest mode.
     91 */
     92static inline bool xfer_to_guest_mode_work_pending(void)
     93{
     94	lockdep_assert_irqs_disabled();
     95	return __xfer_to_guest_mode_work_pending();
     96}
     97#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
     98
     99#endif