cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvm-s390.h (16330B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * definition for kvm on s390
      4 *
      5 * Copyright IBM Corp. 2008, 2020
      6 *
      7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
      8 *               Christian Borntraeger <borntraeger@de.ibm.com>
      9 *               Christian Ehrhardt <ehrhardt@de.ibm.com>
     10 */
     11
     12#ifndef ARCH_S390_KVM_S390_H
     13#define ARCH_S390_KVM_S390_H
     14
     15#include <linux/hrtimer.h>
     16#include <linux/kvm.h>
     17#include <linux/kvm_host.h>
     18#include <linux/lockdep.h>
     19#include <asm/facility.h>
     20#include <asm/processor.h>
     21#include <asm/sclp.h>
     22
     23/* Transactional Memory Execution related macros */
     24#define IS_TE_ENABLED(vcpu)	((vcpu->arch.sie_block->ecb & ECB_TE))
     25#define TDB_FORMAT1		1
     26#define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
     27
     28extern debug_info_t *kvm_s390_dbf;
     29extern debug_info_t *kvm_s390_dbf_uv;
     30
     31#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
     32do { \
     33	debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
     34	  d_args); \
     35	debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
     36			    "%d: " d_string "\n", (d_kvm)->userspace_pid, \
     37			    d_args); \
     38} while (0)
     39
     40#define KVM_EVENT(d_loglevel, d_string, d_args...)\
     41do { \
     42	debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
     43	  d_args); \
     44} while (0)
     45
     46#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
     47do { \
     48	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
     49	  d_args); \
     50} while (0)
     51
     52#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
     53do { \
     54	debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
     55	  "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
     56	  d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
     57	  d_args); \
     58} while (0)
     59
     60static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
     61{
     62	atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
     63}
     64
     65static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
     66{
     67	atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
     68}
     69
     70static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
     71{
     72	return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
     73}
     74
     75static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
     76{
     77	return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
     78}
     79
     80static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
     81{
     82	return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
     83}
     84
     85static inline int kvm_is_ucontrol(struct kvm *kvm)
     86{
     87#ifdef CONFIG_KVM_S390_UCONTROL
     88	if (kvm->arch.gmap)
     89		return 0;
     90	return 1;
     91#else
     92	return 0;
     93#endif
     94}
     95
     96#define GUEST_PREFIX_SHIFT 13
     97static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
     98{
     99	return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
    100}
    101
    102static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
    103{
    104	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
    105		   prefix);
    106	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
    107	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
    108	kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
    109}
    110
    111static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
    112{
    113	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
    114	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
    115
    116	if (ar)
    117		*ar = base2;
    118
    119	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
    120}
    121
    122static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
    123					      u64 *address1, u64 *address2,
    124					      u8 *ar_b1, u8 *ar_b2)
    125{
    126	u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
    127	u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
    128	u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
    129	u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
    130
    131	*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
    132	*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
    133
    134	if (ar_b1)
    135		*ar_b1 = base1;
    136	if (ar_b2)
    137		*ar_b2 = base2;
    138}
    139
    140static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
    141{
    142	if (r1)
    143		*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
    144	if (r2)
    145		*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
    146}
    147
    148static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
    149{
    150	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
    151	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
    152			((vcpu->arch.sie_block->ipb & 0xff00) << 4);
    153	/* The displacement is a 20bit _SIGNED_ value */
    154	if (disp2 & 0x80000)
    155		disp2+=0xfff00000;
    156
    157	if (ar)
    158		*ar = base2;
    159
    160	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
    161}
    162
    163static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
    164{
    165	u32 base2 = vcpu->arch.sie_block->ipb >> 28;
    166	u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
    167
    168	if (ar)
    169		*ar = base2;
    170
    171	return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
    172}
    173
    174/* Set the condition code in the guest program status word */
    175static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
    176{
    177	vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
    178	vcpu->arch.sie_block->gpsw.mask |= cc << 44;
    179}
    180
    181/* test availability of facility in a kvm instance */
    182static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
    183{
    184	return __test_facility(nr, kvm->arch.model.fac_mask) &&
    185		__test_facility(nr, kvm->arch.model.fac_list);
    186}
    187
    188static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
    189{
    190	unsigned char *ptr;
    191
    192	if (nr >= MAX_FACILITY_BIT)
    193		return -EINVAL;
    194	ptr = (unsigned char *) fac_list + (nr >> 3);
    195	*ptr |= (0x80UL >> (nr & 7));
    196	return 0;
    197}
    198
    199static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
    200{
    201	WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
    202	return test_bit_inv(nr, kvm->arch.cpu_feat);
    203}
    204
    205/* are cpu states controlled by user space */
    206static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
    207{
    208	return kvm->arch.user_cpu_state_ctrl != 0;
    209}
    210
    211static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
    212{
    213	if (kvm->arch.user_cpu_state_ctrl)
    214		return;
    215
    216	VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
    217	kvm->arch.user_cpu_state_ctrl = 1;
    218}
    219
    220/* get the end gfn of the last (highest gfn) memslot */
    221static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
    222{
    223	struct rb_node *node;
    224	struct kvm_memory_slot *ms;
    225
    226	if (WARN_ON(kvm_memslots_empty(slots)))
    227		return 0;
    228
    229	node = rb_last(&slots->gfn_tree);
    230	ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
    231	return ms->base_gfn + ms->npages;
    232}
    233
    234static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
    235{
    236	u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
    237
    238	if (gd && sclp.has_gisaf)
    239		gd |= GISA_FORMAT1;
    240	return gd;
    241}
    242
    243/* implemented in pv.c */
    244int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
    245int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
    246int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
    247int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
    248int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
    249			      u16 *rrc);
    250int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
    251		       unsigned long tweak, u16 *rc, u16 *rrc);
    252int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
    253
    254static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
    255{
    256	return kvm->arch.pv.handle;
    257}
    258
    259static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
    260{
    261	return vcpu->arch.pv.handle;
    262}
    263
    264static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
    265{
    266	lockdep_assert_held(&kvm->lock);
    267	return !!kvm_s390_pv_get_handle(kvm);
    268}
    269
    270static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
    271{
    272	lockdep_assert_held(&vcpu->mutex);
    273	return !!kvm_s390_pv_cpu_get_handle(vcpu);
    274}
    275
    276/* implemented in interrupt.c */
    277int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
    278void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
    279enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
    280int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
    281void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
    282void kvm_s390_clear_float_irqs(struct kvm *kvm);
    283int __must_check kvm_s390_inject_vm(struct kvm *kvm,
    284				    struct kvm_s390_interrupt *s390int);
    285int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
    286				      struct kvm_s390_irq *irq);
    287static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
    288					   struct kvm_s390_pgm_info *pgm_info)
    289{
    290	struct kvm_s390_irq irq = {
    291		.type = KVM_S390_PROGRAM_INT,
    292		.u.pgm = *pgm_info,
    293	};
    294
    295	return kvm_s390_inject_vcpu(vcpu, &irq);
    296}
    297static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
    298{
    299	struct kvm_s390_irq irq = {
    300		.type = KVM_S390_PROGRAM_INT,
    301		.u.pgm.code = code,
    302	};
    303
    304	return kvm_s390_inject_vcpu(vcpu, &irq);
    305}
    306struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
    307						    u64 isc_mask, u32 schid);
    308int kvm_s390_reinject_io_int(struct kvm *kvm,
    309			     struct kvm_s390_interrupt_info *inti);
    310int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
    311
    312/* implemented in intercept.c */
    313u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
    314int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
    315static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
    316{
    317	struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
    318
    319	sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
    320}
    321static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
    322{
    323	kvm_s390_rewind_psw(vcpu, -ilen);
    324}
    325static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
    326{
    327	/* don't inject PER events if we re-execute the instruction */
    328	vcpu->arch.sie_block->icptstatus &= ~0x02;
    329	kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
    330}
    331
    332int handle_sthyi(struct kvm_vcpu *vcpu);
    333
    334/* implemented in priv.c */
    335int is_valid_psw(psw_t *psw);
    336int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
    337int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
    338int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
    339int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
    340int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
    341int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
    342int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
    343int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
    344int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
    345int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
    346int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
    347
    348/* implemented in vsie.c */
    349int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
    350void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
    351void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
    352				 unsigned long end);
    353void kvm_s390_vsie_init(struct kvm *kvm);
    354void kvm_s390_vsie_destroy(struct kvm *kvm);
    355
    356/* implemented in sigp.c */
    357int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
    358int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
    359
    360/* implemented in kvm-s390.c */
    361void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
    362int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
    363long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
    364int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
    365int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
    366int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
    367int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
    368void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
    369void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
    370bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
    371void exit_sie(struct kvm_vcpu *vcpu);
    372void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
    373int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
    374void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
    375void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
    376__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
    377
    378/* implemented in diag.c */
    379int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
    380
    381static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
    382{
    383	unsigned long i;
    384	struct kvm_vcpu *vcpu;
    385
    386	WARN_ON(!mutex_is_locked(&kvm->lock));
    387	kvm_for_each_vcpu(i, vcpu, kvm)
    388		kvm_s390_vcpu_block(vcpu);
    389}
    390
    391static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
    392{
    393	unsigned long i;
    394	struct kvm_vcpu *vcpu;
    395
    396	kvm_for_each_vcpu(i, vcpu, kvm)
    397		kvm_s390_vcpu_unblock(vcpu);
    398}
    399
    400static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
    401{
    402	u64 rc;
    403
    404	preempt_disable();
    405	rc = get_tod_clock_fast() + kvm->arch.epoch;
    406	preempt_enable();
    407	return rc;
    408}
    409
    410/**
    411 * kvm_s390_inject_prog_cond - conditionally inject a program check
    412 * @vcpu: virtual cpu
    413 * @rc: original return/error code
    414 *
    415 * This function is supposed to be used after regular guest access functions
    416 * failed, to conditionally inject a program check to a vcpu. The typical
    417 * pattern would look like
    418 *
    419 * rc = write_guest(vcpu, addr, data, len);
    420 * if (rc)
    421 *	return kvm_s390_inject_prog_cond(vcpu, rc);
    422 *
    423 * A negative return code from guest access functions implies an internal error
    424 * like e.g. out of memory. In these cases no program check should be injected
    425 * to the guest.
    426 * A positive value implies that an exception happened while accessing a guest's
    427 * memory. In this case all data belonging to the corresponding program check
    428 * has been stored in vcpu->arch.pgm and can be injected with
    429 * kvm_s390_inject_prog_irq().
    430 *
    431 * Returns: - the original @rc value if @rc was negative (internal error)
    432 *	    - zero if @rc was already zero
    433 *	    - zero or error code from injecting if @rc was positive
    434 *	      (program check injected to @vcpu)
    435 */
    436static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
    437{
    438	if (rc <= 0)
    439		return rc;
    440	return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
    441}
    442
    443int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
    444			struct kvm_s390_irq *s390irq);
    445
    446/* implemented in interrupt.c */
    447int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
    448int psw_extint_disabled(struct kvm_vcpu *vcpu);
    449void kvm_s390_destroy_adapters(struct kvm *kvm);
    450int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
    451extern struct kvm_device_ops kvm_flic_ops;
    452int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
    453int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
    454void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
    455int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
    456			   void __user *buf, int len);
    457int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
    458			   __u8 __user *buf, int len);
    459void kvm_s390_gisa_init(struct kvm *kvm);
    460void kvm_s390_gisa_clear(struct kvm *kvm);
    461void kvm_s390_gisa_destroy(struct kvm *kvm);
    462void kvm_s390_gisa_disable(struct kvm *kvm);
    463void kvm_s390_gisa_enable(struct kvm *kvm);
    464int kvm_s390_gib_init(u8 nisc);
    465void kvm_s390_gib_destroy(void);
    466
    467/* implemented in guestdbg.c */
    468void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
    469void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
    470void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
    471int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
    472			    struct kvm_guest_debug *dbg);
    473void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
    474void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
    475int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
    476int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
    477
    478/* support for Basic/Extended SCA handling */
    479static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
    480{
    481	struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
    482
    483	return &sca->ipte_control;
    484}
    485static inline int kvm_s390_use_sca_entries(void)
    486{
    487	/*
    488	 * Without SIGP interpretation, only SRS interpretation (if available)
    489	 * might use the entries. By not setting the entries and keeping them
    490	 * invalid, hardware will not access them but intercept.
    491	 */
    492	return sclp.has_sigpif;
    493}
    494void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
    495				     struct mcck_volatile_info *mcck_info);
    496
    497/**
    498 * kvm_s390_vcpu_crypto_reset_all
    499 *
    500 * Reset the crypto attributes for each vcpu. This can be done while the vcpus
    501 * are running as each vcpu will be removed from SIE before resetting the crypt
    502 * attributes and restored to SIE afterward.
    503 *
    504 * Note: The kvm->lock must be held while calling this function
    505 *
    506 * @kvm: the KVM guest
    507 */
    508void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
    509
    510/**
    511 * diag9c_forwarding_hz
    512 *
    513 * Set the maximum number of diag9c forwarding per second
    514 */
    515extern unsigned int diag9c_forwarding_hz;
    516
    517#endif