cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

svm_ops.h (1575B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __KVM_X86_SVM_OPS_H
      3#define __KVM_X86_SVM_OPS_H
      4
      5#include <linux/compiler_types.h>
      6
      7#include "x86.h"
      8
      9#define svm_asm(insn, clobber...)				\
     10do {								\
     11	asm_volatile_goto("1: " __stringify(insn) "\n\t"	\
     12			  _ASM_EXTABLE(1b, %l[fault])		\
     13			  ::: clobber : fault);			\
     14	return;							\
     15fault:								\
     16	kvm_spurious_fault();					\
     17} while (0)
     18
     19#define svm_asm1(insn, op1, clobber...)				\
     20do {								\
     21	asm_volatile_goto("1: "  __stringify(insn) " %0\n\t"	\
     22			  _ASM_EXTABLE(1b, %l[fault])		\
     23			  :: op1 : clobber : fault);		\
     24	return;							\
     25fault:								\
     26	kvm_spurious_fault();					\
     27} while (0)
     28
     29#define svm_asm2(insn, op1, op2, clobber...)				\
     30do {									\
     31	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
     32			  _ASM_EXTABLE(1b, %l[fault])			\
     33			  :: op1, op2 : clobber : fault);		\
     34	return;								\
     35fault:									\
     36	kvm_spurious_fault();						\
     37} while (0)
     38
     39static inline void clgi(void)
     40{
     41	svm_asm(clgi);
     42}
     43
     44static inline void stgi(void)
     45{
     46	svm_asm(stgi);
     47}
     48
     49static inline void invlpga(unsigned long addr, u32 asid)
     50{
     51	svm_asm2(invlpga, "c"(asid), "a"(addr));
     52}
     53
     54/*
     55 * Despite being a physical address, the portion of rAX that is consumed by
     56 * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
     57 * hence 'unsigned long' instead of 'hpa_t'.
     58 */
     59static __always_inline void vmsave(unsigned long pa)
     60{
     61	svm_asm1(vmsave, "a" (pa), "memory");
     62}
     63
     64static __always_inline void vmload(unsigned long pa)
     65{
     66	svm_asm1(vmload, "a" (pa), "memory");
     67}
     68
     69#endif /* __KVM_X86_SVM_OPS_H */