cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

asm-prototypes.h (2796B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
      3#define _ASM_POWERPC_ASM_PROTOTYPES_H
      4/*
      5 * This file is for prototypes of C functions that are only called
      6 * from asm, and any associated variables.
      7 *
      8 * Copyright 2016, Daniel Axtens, IBM Corporation.
      9 */
     10
     11#include <linux/threads.h>
     12#include <asm/cacheflush.h>
     13#include <asm/checksum.h>
     14#include <linux/uaccess.h>
     15#include <asm/epapr_hcalls.h>
     16#include <asm/dcr.h>
     17#include <asm/mmu_context.h>
     18#include <asm/ultravisor-api.h>
     19
     20#include <uapi/asm/ucontext.h>
     21
     22/* Ultravisor */
     23#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
     24long ucall_norets(unsigned long opcode, ...);
     25#else
     26static inline long ucall_norets(unsigned long opcode, ...)
     27{
     28	return U_NOT_AVAILABLE;
     29}
     30#endif
     31
     32/* OPAL */
     33int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
     34		    int64_t a4, int64_t a5, int64_t a6, int64_t a7,
     35		    int64_t opcode, uint64_t msr);
     36
     37/* prom_init (OpenFirmware) */
     38unsigned long __init prom_init(unsigned long r3, unsigned long r4,
     39			       unsigned long pp,
     40			       unsigned long r6, unsigned long r7,
     41			       unsigned long kbase);
     42
     43/* misc runtime */
     44extern u64 __bswapdi2(u64);
     45extern s64 __lshrdi3(s64, int);
     46extern s64 __ashldi3(s64, int);
     47extern s64 __ashrdi3(s64, int);
     48extern int __cmpdi2(s64, s64);
     49extern int __ucmpdi2(u64, u64);
     50
     51/* tracing */
     52void _mcount(void);
     53
     54/* Transaction memory related */
     55void tm_enable(void);
     56void tm_disable(void);
     57void tm_abort(uint8_t cause);
     58
     59struct kvm_vcpu;
     60void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
     61void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
     62
     63/* Patch sites */
     64extern s32 patch__call_flush_branch_caches1;
     65extern s32 patch__call_flush_branch_caches2;
     66extern s32 patch__call_flush_branch_caches3;
     67extern s32 patch__flush_count_cache_return;
     68extern s32 patch__flush_link_stack_return;
     69extern s32 patch__call_kvm_flush_link_stack;
     70extern s32 patch__call_kvm_flush_link_stack_p9;
     71extern s32 patch__memset_nocache, patch__memcpy_nocache;
     72
     73extern long flush_branch_caches;
     74extern long kvm_flush_link_stack;
     75
     76#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
     77void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
     78void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
     79#else
     80static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
     81				     bool preserve_nv) { }
     82static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
     83					bool preserve_nv) { }
     84#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
     85
     86void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
     87
     88long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
     89long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
     90			unsigned long dabrx);
     91
     92#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */