cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xen.c (50778B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
      4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
      5 *
      6 * KVM Xen emulation
      7 */
      8
      9#include "x86.h"
     10#include "xen.h"
     11#include "hyperv.h"
     12#include "lapic.h"
     13
     14#include <linux/eventfd.h>
     15#include <linux/kvm_host.h>
     16#include <linux/sched/stat.h>
     17
     18#include <trace/events/kvm.h>
     19#include <xen/interface/xen.h>
     20#include <xen/interface/vcpu.h>
     21#include <xen/interface/version.h>
     22#include <xen/interface/event_channel.h>
     23#include <xen/interface/sched.h>
     24
     25#include "trace.h"
     26
     27static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
     28static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
     29static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);
     30
     31DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
     32
     33static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
     34{
     35	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
     36	struct pvclock_wall_clock *wc;
     37	gpa_t gpa = gfn_to_gpa(gfn);
     38	u32 *wc_sec_hi;
     39	u32 wc_version;
     40	u64 wall_nsec;
     41	int ret = 0;
     42	int idx = srcu_read_lock(&kvm->srcu);
     43
     44	if (gfn == GPA_INVALID) {
     45		kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
     46		goto out;
     47	}
     48
     49	do {
     50		ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
     51						gpa, PAGE_SIZE);
     52		if (ret)
     53			goto out;
     54
     55		/*
     56		 * This code mirrors kvm_write_wall_clock() except that it writes
     57		 * directly through the pfn cache and doesn't mark the page dirty.
     58		 */
     59		wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
     60
     61		/* It could be invalid again already, so we need to check */
     62		read_lock_irq(&gpc->lock);
     63
     64		if (gpc->valid)
     65			break;
     66
     67		read_unlock_irq(&gpc->lock);
     68	} while (1);
     69
     70	/* Paranoia checks on the 32-bit struct layout */
     71	BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
     72	BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
     73	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
     74
     75#ifdef CONFIG_X86_64
     76	/* Paranoia checks on the 64-bit struct layout */
     77	BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
     78	BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
     79
     80	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
     81		struct shared_info *shinfo = gpc->khva;
     82
     83		wc_sec_hi = &shinfo->wc_sec_hi;
     84		wc = &shinfo->wc;
     85	} else
     86#endif
     87	{
     88		struct compat_shared_info *shinfo = gpc->khva;
     89
     90		wc_sec_hi = &shinfo->arch.wc_sec_hi;
     91		wc = &shinfo->wc;
     92	}
     93
     94	/* Increment and ensure an odd value */
     95	wc_version = wc->version = (wc->version + 1) | 1;
     96	smp_wmb();
     97
     98	wc->nsec = do_div(wall_nsec,  1000000000);
     99	wc->sec = (u32)wall_nsec;
    100	*wc_sec_hi = wall_nsec >> 32;
    101	smp_wmb();
    102
    103	wc->version = wc_version + 1;
    104	read_unlock_irq(&gpc->lock);
    105
    106	kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
    107
    108out:
    109	srcu_read_unlock(&kvm->srcu, idx);
    110	return ret;
    111}
    112
    113void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
    114{
    115	if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) {
    116		struct kvm_xen_evtchn e;
    117
    118		e.vcpu_id = vcpu->vcpu_id;
    119		e.vcpu_idx = vcpu->vcpu_idx;
    120		e.port = vcpu->arch.xen.timer_virq;
    121		e.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
    122
    123		kvm_xen_set_evtchn(&e, vcpu->kvm);
    124
    125		vcpu->arch.xen.timer_expires = 0;
    126		atomic_set(&vcpu->arch.xen.timer_pending, 0);
    127	}
    128}
    129
    130static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
    131{
    132	struct kvm_vcpu *vcpu = container_of(timer, struct kvm_vcpu,
    133					     arch.xen.timer);
    134	if (atomic_read(&vcpu->arch.xen.timer_pending))
    135		return HRTIMER_NORESTART;
    136
    137	atomic_inc(&vcpu->arch.xen.timer_pending);
    138	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
    139	kvm_vcpu_kick(vcpu);
    140
    141	return HRTIMER_NORESTART;
    142}
    143
    144static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs, s64 delta_ns)
    145{
    146	atomic_set(&vcpu->arch.xen.timer_pending, 0);
    147	vcpu->arch.xen.timer_expires = guest_abs;
    148
    149	if (delta_ns <= 0) {
    150		xen_timer_callback(&vcpu->arch.xen.timer);
    151	} else {
    152		ktime_t ktime_now = ktime_get();
    153		hrtimer_start(&vcpu->arch.xen.timer,
    154			      ktime_add_ns(ktime_now, delta_ns),
    155			      HRTIMER_MODE_ABS_HARD);
    156	}
    157}
    158
    159static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
    160{
    161	hrtimer_cancel(&vcpu->arch.xen.timer);
    162	vcpu->arch.xen.timer_expires = 0;
    163	atomic_set(&vcpu->arch.xen.timer_pending, 0);
    164}
    165
    166static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
    167{
    168	hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC,
    169		     HRTIMER_MODE_ABS_HARD);
    170	vcpu->arch.xen.timer.function = xen_timer_callback;
    171}
    172
    173static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
    174{
    175	struct kvm_vcpu_xen *vx = &v->arch.xen;
    176	u64 now = get_kvmclock_ns(v->kvm);
    177	u64 delta_ns = now - vx->runstate_entry_time;
    178	u64 run_delay = current->sched_info.run_delay;
    179
    180	if (unlikely(!vx->runstate_entry_time))
    181		vx->current_runstate = RUNSTATE_offline;
    182
    183	/*
    184	 * Time waiting for the scheduler isn't "stolen" if the
    185	 * vCPU wasn't running anyway.
    186	 */
    187	if (vx->current_runstate == RUNSTATE_running) {
    188		u64 steal_ns = run_delay - vx->last_steal;
    189
    190		delta_ns -= steal_ns;
    191
    192		vx->runstate_times[RUNSTATE_runnable] += steal_ns;
    193	}
    194	vx->last_steal = run_delay;
    195
    196	vx->runstate_times[vx->current_runstate] += delta_ns;
    197	vx->current_runstate = state;
    198	vx->runstate_entry_time = now;
    199}
    200
    201void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
    202{
    203	struct kvm_vcpu_xen *vx = &v->arch.xen;
    204	struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
    205	uint64_t *user_times;
    206	unsigned long flags;
    207	size_t user_len;
    208	int *user_state;
    209
    210	kvm_xen_update_runstate(v, state);
    211
    212	if (!vx->runstate_cache.active)
    213		return;
    214
    215	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
    216		user_len = sizeof(struct vcpu_runstate_info);
    217	else
    218		user_len = sizeof(struct compat_vcpu_runstate_info);
    219
    220	read_lock_irqsave(&gpc->lock, flags);
    221	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
    222					   user_len)) {
    223		read_unlock_irqrestore(&gpc->lock, flags);
    224
    225		/* When invoked from kvm_sched_out() we cannot sleep */
    226		if (state == RUNSTATE_runnable)
    227			return;
    228
    229		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
    230			return;
    231
    232		read_lock_irqsave(&gpc->lock, flags);
    233	}
    234
    235	/*
    236	 * The only difference between 32-bit and 64-bit versions of the
    237	 * runstate struct us the alignment of uint64_t in 32-bit, which
    238	 * means that the 64-bit version has an additional 4 bytes of
    239	 * padding after the first field 'state'.
    240	 *
    241	 * So we use 'int __user *user_state' to point to the state field,
    242	 * and 'uint64_t __user *user_times' for runstate_entry_time. So
    243	 * the actual array of time[] in each state starts at user_times[1].
    244	 */
    245	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
    246	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
    247	BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
    248#ifdef CONFIG_X86_64
    249	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
    250		     offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
    251	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
    252		     offsetof(struct compat_vcpu_runstate_info, time) + 4);
    253#endif
    254
    255	user_state = gpc->khva;
    256
    257	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
    258		user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
    259						  state_entry_time);
    260	else
    261		user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
    262						  state_entry_time);
    263
    264	/*
    265	 * First write the updated state_entry_time at the appropriate
    266	 * location determined by 'offset'.
    267	 */
    268	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
    269		     sizeof(user_times[0]));
    270	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
    271		     sizeof(user_times[0]));
    272
    273	user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
    274	smp_wmb();
    275
    276	/*
    277	 * Next, write the new runstate. This is in the *same* place
    278	 * for 32-bit and 64-bit guests, asserted here for paranoia.
    279	 */
    280	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) !=
    281		     offsetof(struct compat_vcpu_runstate_info, state));
    282	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state) !=
    283		     sizeof(vx->current_runstate));
    284	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
    285		     sizeof(vx->current_runstate));
    286
    287	*user_state = vx->current_runstate;
    288
    289	/*
    290	 * Write the actual runstate times immediately after the
    291	 * runstate_entry_time.
    292	 */
    293	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
    294		     offsetof(struct vcpu_runstate_info, time) - sizeof(u64));
    295	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state_entry_time) !=
    296		     offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64));
    297	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
    298		     sizeof_field(struct compat_vcpu_runstate_info, time));
    299	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
    300		     sizeof(vx->runstate_times));
    301
    302	memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
    303	smp_wmb();
    304
    305	/*
    306	 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
    307	 * runstate_entry_time field.
    308	 */
    309	user_times[0] &= ~XEN_RUNSTATE_UPDATE;
    310	smp_wmb();
    311
    312	read_unlock_irqrestore(&gpc->lock, flags);
    313
    314	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
    315}
    316
    317static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
    318{
    319	struct kvm_lapic_irq irq = { };
    320	int r;
    321
    322	irq.dest_id = v->vcpu_id;
    323	irq.vector = v->arch.xen.upcall_vector;
    324	irq.dest_mode = APIC_DEST_PHYSICAL;
    325	irq.shorthand = APIC_DEST_NOSHORT;
    326	irq.delivery_mode = APIC_DM_FIXED;
    327	irq.level = 1;
    328
    329	/* The fast version will always work for physical unicast */
    330	WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
    331}
    332
    333/*
    334 * On event channel delivery, the vcpu_info may not have been accessible.
    335 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
    336 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
    337 * Do so now that we can sleep in the context of the vCPU to bring the
    338 * page in, and refresh the pfn cache for it.
    339 */
    340void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
    341{
    342	unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
    343	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
    344	unsigned long flags;
    345
    346	if (!evtchn_pending_sel)
    347		return;
    348
    349	/*
    350	 * Yes, this is an open-coded loop. But that's just what put_user()
    351	 * does anyway. Page it in and retry the instruction. We're just a
    352	 * little more honest about it.
    353	 */
    354	read_lock_irqsave(&gpc->lock, flags);
    355	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
    356					   sizeof(struct vcpu_info))) {
    357		read_unlock_irqrestore(&gpc->lock, flags);
    358
    359		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
    360						 sizeof(struct vcpu_info)))
    361			return;
    362
    363		read_lock_irqsave(&gpc->lock, flags);
    364	}
    365
    366	/* Now gpc->khva is a valid kernel address for the vcpu_info */
    367	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
    368		struct vcpu_info *vi = gpc->khva;
    369
    370		asm volatile(LOCK_PREFIX "orq %0, %1\n"
    371			     "notq %0\n"
    372			     LOCK_PREFIX "andq %0, %2\n"
    373			     : "=r" (evtchn_pending_sel),
    374			       "+m" (vi->evtchn_pending_sel),
    375			       "+m" (v->arch.xen.evtchn_pending_sel)
    376			     : "0" (evtchn_pending_sel));
    377		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
    378	} else {
    379		u32 evtchn_pending_sel32 = evtchn_pending_sel;
    380		struct compat_vcpu_info *vi = gpc->khva;
    381
    382		asm volatile(LOCK_PREFIX "orl %0, %1\n"
    383			     "notl %0\n"
    384			     LOCK_PREFIX "andl %0, %2\n"
    385			     : "=r" (evtchn_pending_sel32),
    386			       "+m" (vi->evtchn_pending_sel),
    387			       "+m" (v->arch.xen.evtchn_pending_sel)
    388			     : "0" (evtchn_pending_sel32));
    389		WRITE_ONCE(vi->evtchn_upcall_pending, 1);
    390	}
    391	read_unlock_irqrestore(&gpc->lock, flags);
    392
    393	/* For the per-vCPU lapic vector, deliver it as MSI. */
    394	if (v->arch.xen.upcall_vector)
    395		kvm_xen_inject_vcpu_vector(v);
    396
    397	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
    398}
    399
    400int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
    401{
    402	struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache;
    403	unsigned long flags;
    404	u8 rc = 0;
    405
    406	/*
    407	 * If the global upcall vector (HVMIRQ_callback_vector) is set and
    408	 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
    409	 */
    410
    411	/* No need for compat handling here */
    412	BUILD_BUG_ON(offsetof(struct vcpu_info, evtchn_upcall_pending) !=
    413		     offsetof(struct compat_vcpu_info, evtchn_upcall_pending));
    414	BUILD_BUG_ON(sizeof(rc) !=
    415		     sizeof_field(struct vcpu_info, evtchn_upcall_pending));
    416	BUILD_BUG_ON(sizeof(rc) !=
    417		     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));
    418
    419	read_lock_irqsave(&gpc->lock, flags);
    420	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
    421					   sizeof(struct vcpu_info))) {
    422		read_unlock_irqrestore(&gpc->lock, flags);
    423
    424		/*
    425		 * This function gets called from kvm_vcpu_block() after setting the
    426		 * task to TASK_INTERRUPTIBLE, to see if it needs to wake immediately
    427		 * from a HLT. So we really mustn't sleep. If the page ended up absent
    428		 * at that point, just return 1 in order to trigger an immediate wake,
    429		 * and we'll end up getting called again from a context where we *can*
    430		 * fault in the page and wait for it.
    431		 */
    432		if (in_atomic() || !task_is_running(current))
    433			return 1;
    434
    435		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
    436						 sizeof(struct vcpu_info))) {
    437			/*
    438			 * If this failed, userspace has screwed up the
    439			 * vcpu_info mapping. No interrupts for you.
    440			 */
    441			return 0;
    442		}
    443		read_lock_irqsave(&gpc->lock, flags);
    444	}
    445
    446	rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending;
    447	read_unlock_irqrestore(&gpc->lock, flags);
    448	return rc;
    449}
    450
    451int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
    452{
    453	int r = -ENOENT;
    454
    455
    456	switch (data->type) {
    457	case KVM_XEN_ATTR_TYPE_LONG_MODE:
    458		if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
    459			r = -EINVAL;
    460		} else {
    461			mutex_lock(&kvm->lock);
    462			kvm->arch.xen.long_mode = !!data->u.long_mode;
    463			mutex_unlock(&kvm->lock);
    464			r = 0;
    465		}
    466		break;
    467
    468	case KVM_XEN_ATTR_TYPE_SHARED_INFO:
    469		mutex_lock(&kvm->lock);
    470		r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
    471		mutex_unlock(&kvm->lock);
    472		break;
    473
    474	case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
    475		if (data->u.vector && data->u.vector < 0x10)
    476			r = -EINVAL;
    477		else {
    478			mutex_lock(&kvm->lock);
    479			kvm->arch.xen.upcall_vector = data->u.vector;
    480			mutex_unlock(&kvm->lock);
    481			r = 0;
    482		}
    483		break;
    484
    485	case KVM_XEN_ATTR_TYPE_EVTCHN:
    486		r = kvm_xen_setattr_evtchn(kvm, data);
    487		break;
    488
    489	case KVM_XEN_ATTR_TYPE_XEN_VERSION:
    490		mutex_lock(&kvm->lock);
    491		kvm->arch.xen.xen_version = data->u.xen_version;
    492		mutex_unlock(&kvm->lock);
    493		r = 0;
    494		break;
    495
    496	default:
    497		break;
    498	}
    499
    500	return r;
    501}
    502
    503int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
    504{
    505	int r = -ENOENT;
    506
    507	mutex_lock(&kvm->lock);
    508
    509	switch (data->type) {
    510	case KVM_XEN_ATTR_TYPE_LONG_MODE:
    511		data->u.long_mode = kvm->arch.xen.long_mode;
    512		r = 0;
    513		break;
    514
    515	case KVM_XEN_ATTR_TYPE_SHARED_INFO:
    516		if (kvm->arch.xen.shinfo_cache.active)
    517			data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
    518		else
    519			data->u.shared_info.gfn = GPA_INVALID;
    520		r = 0;
    521		break;
    522
    523	case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
    524		data->u.vector = kvm->arch.xen.upcall_vector;
    525		r = 0;
    526		break;
    527
    528	case KVM_XEN_ATTR_TYPE_XEN_VERSION:
    529		data->u.xen_version = kvm->arch.xen.xen_version;
    530		r = 0;
    531		break;
    532
    533	default:
    534		break;
    535	}
    536
    537	mutex_unlock(&kvm->lock);
    538	return r;
    539}
    540
    541int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
    542{
    543	int idx, r = -ENOENT;
    544
    545	mutex_lock(&vcpu->kvm->lock);
    546	idx = srcu_read_lock(&vcpu->kvm->srcu);
    547
    548	switch (data->type) {
    549	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
    550		/* No compat necessary here. */
    551		BUILD_BUG_ON(sizeof(struct vcpu_info) !=
    552			     sizeof(struct compat_vcpu_info));
    553		BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
    554			     offsetof(struct compat_vcpu_info, time));
    555
    556		if (data->u.gpa == GPA_INVALID) {
    557			kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
    558			r = 0;
    559			break;
    560		}
    561
    562		r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
    563					      &vcpu->arch.xen.vcpu_info_cache,
    564					      NULL, KVM_HOST_USES_PFN, data->u.gpa,
    565					      sizeof(struct vcpu_info));
    566		if (!r)
    567			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
    568
    569		break;
    570
    571	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
    572		if (data->u.gpa == GPA_INVALID) {
    573			kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
    574						     &vcpu->arch.xen.vcpu_time_info_cache);
    575			r = 0;
    576			break;
    577		}
    578
    579		r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
    580					      &vcpu->arch.xen.vcpu_time_info_cache,
    581					      NULL, KVM_HOST_USES_PFN, data->u.gpa,
    582					      sizeof(struct pvclock_vcpu_time_info));
    583		if (!r)
    584			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
    585		break;
    586
    587	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
    588		if (!sched_info_on()) {
    589			r = -EOPNOTSUPP;
    590			break;
    591		}
    592		if (data->u.gpa == GPA_INVALID) {
    593			kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
    594						     &vcpu->arch.xen.runstate_cache);
    595			r = 0;
    596			break;
    597		}
    598
    599		r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
    600					      &vcpu->arch.xen.runstate_cache,
    601					      NULL, KVM_HOST_USES_PFN, data->u.gpa,
    602					      sizeof(struct vcpu_runstate_info));
    603		break;
    604
    605	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
    606		if (!sched_info_on()) {
    607			r = -EOPNOTSUPP;
    608			break;
    609		}
    610		if (data->u.runstate.state > RUNSTATE_offline) {
    611			r = -EINVAL;
    612			break;
    613		}
    614
    615		kvm_xen_update_runstate(vcpu, data->u.runstate.state);
    616		r = 0;
    617		break;
    618
    619	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
    620		if (!sched_info_on()) {
    621			r = -EOPNOTSUPP;
    622			break;
    623		}
    624		if (data->u.runstate.state > RUNSTATE_offline) {
    625			r = -EINVAL;
    626			break;
    627		}
    628		if (data->u.runstate.state_entry_time !=
    629		    (data->u.runstate.time_running +
    630		     data->u.runstate.time_runnable +
    631		     data->u.runstate.time_blocked +
    632		     data->u.runstate.time_offline)) {
    633			r = -EINVAL;
    634			break;
    635		}
    636		if (get_kvmclock_ns(vcpu->kvm) <
    637		    data->u.runstate.state_entry_time) {
    638			r = -EINVAL;
    639			break;
    640		}
    641
    642		vcpu->arch.xen.current_runstate = data->u.runstate.state;
    643		vcpu->arch.xen.runstate_entry_time =
    644			data->u.runstate.state_entry_time;
    645		vcpu->arch.xen.runstate_times[RUNSTATE_running] =
    646			data->u.runstate.time_running;
    647		vcpu->arch.xen.runstate_times[RUNSTATE_runnable] =
    648			data->u.runstate.time_runnable;
    649		vcpu->arch.xen.runstate_times[RUNSTATE_blocked] =
    650			data->u.runstate.time_blocked;
    651		vcpu->arch.xen.runstate_times[RUNSTATE_offline] =
    652			data->u.runstate.time_offline;
    653		vcpu->arch.xen.last_steal = current->sched_info.run_delay;
    654		r = 0;
    655		break;
    656
    657	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
    658		if (!sched_info_on()) {
    659			r = -EOPNOTSUPP;
    660			break;
    661		}
    662		if (data->u.runstate.state > RUNSTATE_offline &&
    663		    data->u.runstate.state != (u64)-1) {
    664			r = -EINVAL;
    665			break;
    666		}
    667		/* The adjustment must add up */
    668		if (data->u.runstate.state_entry_time !=
    669		    (data->u.runstate.time_running +
    670		     data->u.runstate.time_runnable +
    671		     data->u.runstate.time_blocked +
    672		     data->u.runstate.time_offline)) {
    673			r = -EINVAL;
    674			break;
    675		}
    676
    677		if (get_kvmclock_ns(vcpu->kvm) <
    678		    (vcpu->arch.xen.runstate_entry_time +
    679		     data->u.runstate.state_entry_time)) {
    680			r = -EINVAL;
    681			break;
    682		}
    683
    684		vcpu->arch.xen.runstate_entry_time +=
    685			data->u.runstate.state_entry_time;
    686		vcpu->arch.xen.runstate_times[RUNSTATE_running] +=
    687			data->u.runstate.time_running;
    688		vcpu->arch.xen.runstate_times[RUNSTATE_runnable] +=
    689			data->u.runstate.time_runnable;
    690		vcpu->arch.xen.runstate_times[RUNSTATE_blocked] +=
    691			data->u.runstate.time_blocked;
    692		vcpu->arch.xen.runstate_times[RUNSTATE_offline] +=
    693			data->u.runstate.time_offline;
    694
    695		if (data->u.runstate.state <= RUNSTATE_offline)
    696			kvm_xen_update_runstate(vcpu, data->u.runstate.state);
    697		r = 0;
    698		break;
    699
    700	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
    701		if (data->u.vcpu_id >= KVM_MAX_VCPUS)
    702			r = -EINVAL;
    703		else {
    704			vcpu->arch.xen.vcpu_id = data->u.vcpu_id;
    705			r = 0;
    706		}
    707		break;
    708
    709	case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
    710		if (data->u.timer.port) {
    711			if (data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) {
    712				r = -EINVAL;
    713				break;
    714			}
    715			vcpu->arch.xen.timer_virq = data->u.timer.port;
    716			kvm_xen_init_timer(vcpu);
    717
    718			/* Restart the timer if it's set */
    719			if (data->u.timer.expires_ns)
    720				kvm_xen_start_timer(vcpu, data->u.timer.expires_ns,
    721						    data->u.timer.expires_ns -
    722						    get_kvmclock_ns(vcpu->kvm));
    723		} else if (kvm_xen_timer_enabled(vcpu)) {
    724			kvm_xen_stop_timer(vcpu);
    725			vcpu->arch.xen.timer_virq = 0;
    726		}
    727
    728		r = 0;
    729		break;
    730
    731	case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
    732		if (data->u.vector && data->u.vector < 0x10)
    733			r = -EINVAL;
    734		else {
    735			vcpu->arch.xen.upcall_vector = data->u.vector;
    736			r = 0;
    737		}
    738		break;
    739
    740	default:
    741		break;
    742	}
    743
    744	srcu_read_unlock(&vcpu->kvm->srcu, idx);
    745	mutex_unlock(&vcpu->kvm->lock);
    746	return r;
    747}
    748
    749int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
    750{
    751	int r = -ENOENT;
    752
    753	mutex_lock(&vcpu->kvm->lock);
    754
    755	switch (data->type) {
    756	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
    757		if (vcpu->arch.xen.vcpu_info_cache.active)
    758			data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
    759		else
    760			data->u.gpa = GPA_INVALID;
    761		r = 0;
    762		break;
    763
    764	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
    765		if (vcpu->arch.xen.vcpu_time_info_cache.active)
    766			data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
    767		else
    768			data->u.gpa = GPA_INVALID;
    769		r = 0;
    770		break;
    771
    772	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR:
    773		if (!sched_info_on()) {
    774			r = -EOPNOTSUPP;
    775			break;
    776		}
    777		if (vcpu->arch.xen.runstate_cache.active) {
    778			data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
    779			r = 0;
    780		}
    781		break;
    782
    783	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
    784		if (!sched_info_on()) {
    785			r = -EOPNOTSUPP;
    786			break;
    787		}
    788		data->u.runstate.state = vcpu->arch.xen.current_runstate;
    789		r = 0;
    790		break;
    791
    792	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA:
    793		if (!sched_info_on()) {
    794			r = -EOPNOTSUPP;
    795			break;
    796		}
    797		data->u.runstate.state = vcpu->arch.xen.current_runstate;
    798		data->u.runstate.state_entry_time =
    799			vcpu->arch.xen.runstate_entry_time;
    800		data->u.runstate.time_running =
    801			vcpu->arch.xen.runstate_times[RUNSTATE_running];
    802		data->u.runstate.time_runnable =
    803			vcpu->arch.xen.runstate_times[RUNSTATE_runnable];
    804		data->u.runstate.time_blocked =
    805			vcpu->arch.xen.runstate_times[RUNSTATE_blocked];
    806		data->u.runstate.time_offline =
    807			vcpu->arch.xen.runstate_times[RUNSTATE_offline];
    808		r = 0;
    809		break;
    810
    811	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST:
    812		r = -EINVAL;
    813		break;
    814
    815	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID:
    816		data->u.vcpu_id = vcpu->arch.xen.vcpu_id;
    817		r = 0;
    818		break;
    819
    820	case KVM_XEN_VCPU_ATTR_TYPE_TIMER:
    821		data->u.timer.port = vcpu->arch.xen.timer_virq;
    822		data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
    823		data->u.timer.expires_ns = vcpu->arch.xen.timer_expires;
    824		r = 0;
    825		break;
    826
    827	case KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR:
    828		data->u.vector = vcpu->arch.xen.upcall_vector;
    829		r = 0;
    830		break;
    831
    832	default:
    833		break;
    834	}
    835
    836	mutex_unlock(&vcpu->kvm->lock);
    837	return r;
    838}
    839
    840int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
    841{
    842	struct kvm *kvm = vcpu->kvm;
    843	u32 page_num = data & ~PAGE_MASK;
    844	u64 page_addr = data & PAGE_MASK;
    845	bool lm = is_long_mode(vcpu);
    846
    847	/* Latch long_mode for shared_info pages etc. */
    848	vcpu->kvm->arch.xen.long_mode = lm;
    849
    850	/*
    851	 * If Xen hypercall intercept is enabled, fill the hypercall
    852	 * page with VMCALL/VMMCALL instructions since that's what
    853	 * we catch. Else the VMM has provided the hypercall pages
    854	 * with instructions of its own choosing, so use those.
    855	 */
    856	if (kvm_xen_hypercall_enabled(kvm)) {
    857		u8 instructions[32];
    858		int i;
    859
    860		if (page_num)
    861			return 1;
    862
    863		/* mov imm32, %eax */
    864		instructions[0] = 0xb8;
    865
    866		/* vmcall / vmmcall */
    867		static_call(kvm_x86_patch_hypercall)(vcpu, instructions + 5);
    868
    869		/* ret */
    870		instructions[8] = 0xc3;
    871
    872		/* int3 to pad */
    873		memset(instructions + 9, 0xcc, sizeof(instructions) - 9);
    874
    875		for (i = 0; i < PAGE_SIZE / sizeof(instructions); i++) {
    876			*(u32 *)&instructions[1] = i;
    877			if (kvm_vcpu_write_guest(vcpu,
    878						 page_addr + (i * sizeof(instructions)),
    879						 instructions, sizeof(instructions)))
    880				return 1;
    881		}
    882	} else {
    883		/*
    884		 * Note, truncation is a non-issue as 'lm' is guaranteed to be
    885		 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
    886		 */
    887		hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
    888				     : kvm->arch.xen_hvm_config.blob_addr_32;
    889		u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
    890				  : kvm->arch.xen_hvm_config.blob_size_32;
    891		u8 *page;
    892
    893		if (page_num >= blob_size)
    894			return 1;
    895
    896		blob_addr += page_num * PAGE_SIZE;
    897
    898		page = memdup_user((u8 __user *)blob_addr, PAGE_SIZE);
    899		if (IS_ERR(page))
    900			return PTR_ERR(page);
    901
    902		if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
    903			kfree(page);
    904			return 1;
    905		}
    906	}
    907	return 0;
    908}
    909
    910int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
    911{
    912	/* Only some feature flags need to be *enabled* by userspace */
    913	u32 permitted_flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
    914		KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
    915
    916	if (xhc->flags & ~permitted_flags)
    917		return -EINVAL;
    918
    919	/*
    920	 * With hypercall interception the kernel generates its own
    921	 * hypercall page so it must not be provided.
    922	 */
    923	if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) &&
    924	    (xhc->blob_addr_32 || xhc->blob_addr_64 ||
    925	     xhc->blob_size_32 || xhc->blob_size_64))
    926		return -EINVAL;
    927
    928	mutex_lock(&kvm->lock);
    929
    930	if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
    931		static_branch_inc(&kvm_xen_enabled.key);
    932	else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
    933		static_branch_slow_dec_deferred(&kvm_xen_enabled);
    934
    935	memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
    936
    937	mutex_unlock(&kvm->lock);
    938	return 0;
    939}
    940
    941static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
    942{
    943	kvm_rax_write(vcpu, result);
    944	return kvm_skip_emulated_instruction(vcpu);
    945}
    946
    947static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
    948{
    949	struct kvm_run *run = vcpu->run;
    950
    951	if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip)))
    952		return 1;
    953
    954	return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result);
    955}
    956
    957static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
    958			       evtchn_port_t *ports)
    959{
    960	struct kvm *kvm = vcpu->kvm;
    961	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
    962	unsigned long *pending_bits;
    963	unsigned long flags;
    964	bool ret = true;
    965	int idx, i;
    966
    967	read_lock_irqsave(&gpc->lock, flags);
    968	idx = srcu_read_lock(&kvm->srcu);
    969	if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
    970		goto out_rcu;
    971
    972	ret = false;
    973	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
    974		struct shared_info *shinfo = gpc->khva;
    975		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
    976	} else {
    977		struct compat_shared_info *shinfo = gpc->khva;
    978		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
    979	}
    980
    981	for (i = 0; i < nr_ports; i++) {
    982		if (test_bit(ports[i], pending_bits)) {
    983			ret = true;
    984			break;
    985		}
    986	}
    987
    988 out_rcu:
    989	srcu_read_unlock(&kvm->srcu, idx);
    990	read_unlock_irqrestore(&gpc->lock, flags);
    991
    992	return ret;
    993}
    994
    995static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
    996				 u64 param, u64 *r)
    997{
    998	int idx, i;
    999	struct sched_poll sched_poll;
   1000	evtchn_port_t port, *ports;
   1001	gpa_t gpa;
   1002
   1003	if (!longmode || !lapic_in_kernel(vcpu) ||
   1004	    !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
   1005		return false;
   1006
   1007	idx = srcu_read_lock(&vcpu->kvm->srcu);
   1008	gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
   1009	srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1010
   1011	if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
   1012					sizeof(sched_poll))) {
   1013		*r = -EFAULT;
   1014		return true;
   1015	}
   1016
   1017	if (unlikely(sched_poll.nr_ports > 1)) {
   1018		/* Xen (unofficially) limits number of pollers to 128 */
   1019		if (sched_poll.nr_ports > 128) {
   1020			*r = -EINVAL;
   1021			return true;
   1022		}
   1023
   1024		ports = kmalloc_array(sched_poll.nr_ports,
   1025				      sizeof(*ports), GFP_KERNEL);
   1026		if (!ports) {
   1027			*r = -ENOMEM;
   1028			return true;
   1029		}
   1030	} else
   1031		ports = &port;
   1032
   1033	for (i = 0; i < sched_poll.nr_ports; i++) {
   1034		idx = srcu_read_lock(&vcpu->kvm->srcu);
   1035		gpa = kvm_mmu_gva_to_gpa_system(vcpu,
   1036						(gva_t)(sched_poll.ports + i),
   1037						NULL);
   1038		srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1039
   1040		if (!gpa || kvm_vcpu_read_guest(vcpu, gpa,
   1041						&ports[i], sizeof(port))) {
   1042			*r = -EFAULT;
   1043			goto out;
   1044		}
   1045	}
   1046
   1047	if (sched_poll.nr_ports == 1)
   1048		vcpu->arch.xen.poll_evtchn = port;
   1049	else
   1050		vcpu->arch.xen.poll_evtchn = -1;
   1051
   1052	set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.xen.poll_mask);
   1053
   1054	if (!wait_pending_event(vcpu, sched_poll.nr_ports, ports)) {
   1055		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
   1056
   1057		if (sched_poll.timeout)
   1058			mod_timer(&vcpu->arch.xen.poll_timer,
   1059				  jiffies + nsecs_to_jiffies(sched_poll.timeout));
   1060
   1061		kvm_vcpu_halt(vcpu);
   1062
   1063		if (sched_poll.timeout)
   1064			del_timer(&vcpu->arch.xen.poll_timer);
   1065
   1066		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
   1067		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
   1068	}
   1069
   1070	vcpu->arch.xen.poll_evtchn = 0;
   1071	*r = 0;
   1072out:
   1073	/* Really, this is only needed in case of timeout */
   1074	clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.xen.poll_mask);
   1075
   1076	if (unlikely(sched_poll.nr_ports > 1))
   1077		kfree(ports);
   1078	return true;
   1079}
   1080
   1081static void cancel_evtchn_poll(struct timer_list *t)
   1082{
   1083	struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.xen.poll_timer);
   1084
   1085	kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
   1086	kvm_vcpu_kick(vcpu);
   1087}
   1088
   1089static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode,
   1090				   int cmd, u64 param, u64 *r)
   1091{
   1092	switch (cmd) {
   1093	case SCHEDOP_poll:
   1094		if (kvm_xen_schedop_poll(vcpu, longmode, param, r))
   1095			return true;
   1096		fallthrough;
   1097	case SCHEDOP_yield:
   1098		kvm_vcpu_on_spin(vcpu, true);
   1099		*r = 0;
   1100		return true;
   1101	default:
   1102		break;
   1103	}
   1104
   1105	return false;
   1106}
   1107
   1108struct compat_vcpu_set_singleshot_timer {
   1109    uint64_t timeout_abs_ns;
   1110    uint32_t flags;
   1111} __attribute__((packed));
   1112
   1113static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
   1114				  int vcpu_id, u64 param, u64 *r)
   1115{
   1116	struct vcpu_set_singleshot_timer oneshot;
   1117	s64 delta;
   1118	gpa_t gpa;
   1119	int idx;
   1120
   1121	if (!kvm_xen_timer_enabled(vcpu))
   1122		return false;
   1123
   1124	switch (cmd) {
   1125	case VCPUOP_set_singleshot_timer:
   1126		if (vcpu->arch.xen.vcpu_id != vcpu_id) {
   1127			*r = -EINVAL;
   1128			return true;
   1129		}
   1130		idx = srcu_read_lock(&vcpu->kvm->srcu);
   1131		gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
   1132		srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1133
   1134		/*
   1135		 * The only difference for 32-bit compat is the 4 bytes of
   1136		 * padding after the interesting part of the structure. So
   1137		 * for a faithful emulation of Xen we have to *try* to copy
   1138		 * the padding and return -EFAULT if we can't. Otherwise we
   1139		 * might as well just have copied the 12-byte 32-bit struct.
   1140		 */
   1141		BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
   1142			     offsetof(struct vcpu_set_singleshot_timer, timeout_abs_ns));
   1143		BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, timeout_abs_ns) !=
   1144			     sizeof_field(struct vcpu_set_singleshot_timer, timeout_abs_ns));
   1145		BUILD_BUG_ON(offsetof(struct compat_vcpu_set_singleshot_timer, flags) !=
   1146			     offsetof(struct vcpu_set_singleshot_timer, flags));
   1147		BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
   1148			     sizeof_field(struct vcpu_set_singleshot_timer, flags));
   1149
   1150		if (!gpa ||
   1151		    kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) :
   1152					sizeof(struct compat_vcpu_set_singleshot_timer))) {
   1153			*r = -EFAULT;
   1154			return true;
   1155		}
   1156
   1157		delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
   1158		if ((oneshot.flags & VCPU_SSHOTTMR_future) && delta < 0) {
   1159			*r = -ETIME;
   1160			return true;
   1161		}
   1162
   1163		kvm_xen_start_timer(vcpu, oneshot.timeout_abs_ns, delta);
   1164		*r = 0;
   1165		return true;
   1166
   1167	case VCPUOP_stop_singleshot_timer:
   1168		if (vcpu->arch.xen.vcpu_id != vcpu_id) {
   1169			*r = -EINVAL;
   1170			return true;
   1171		}
   1172		kvm_xen_stop_timer(vcpu);
   1173		*r = 0;
   1174		return true;
   1175	}
   1176
   1177	return false;
   1178}
   1179
   1180static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
   1181				       u64 *r)
   1182{
   1183	if (!kvm_xen_timer_enabled(vcpu))
   1184		return false;
   1185
   1186	if (timeout) {
   1187		uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
   1188		int64_t delta = timeout - guest_now;
   1189
   1190		/* Xen has a 'Linux workaround' in do_set_timer_op() which
   1191		 * checks for negative absolute timeout values (caused by
   1192		 * integer overflow), and for values about 13 days in the
   1193		 * future (2^50ns) which would be caused by jiffies
   1194		 * overflow. For those cases, it sets the timeout 100ms in
   1195		 * the future (not *too* soon, since if a guest really did
   1196		 * set a long timeout on purpose we don't want to keep
   1197		 * churning CPU time by waking it up).
   1198		 */
   1199		if (unlikely((int64_t)timeout < 0 ||
   1200			     (delta > 0 && (uint32_t) (delta >> 50) != 0))) {
   1201			delta = 100 * NSEC_PER_MSEC;
   1202			timeout = guest_now + delta;
   1203		}
   1204
   1205		kvm_xen_start_timer(vcpu, timeout, delta);
   1206	} else {
   1207		kvm_xen_stop_timer(vcpu);
   1208	}
   1209
   1210	*r = 0;
   1211	return true;
   1212}
   1213
   1214int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
   1215{
   1216	bool longmode;
   1217	u64 input, params[6], r = -ENOSYS;
   1218	bool handled = false;
   1219
   1220	input = (u64)kvm_register_read(vcpu, VCPU_REGS_RAX);
   1221
   1222	/* Hyper-V hypercalls get bit 31 set in EAX */
   1223	if ((input & 0x80000000) &&
   1224	    kvm_hv_hypercall_enabled(vcpu))
   1225		return kvm_hv_hypercall(vcpu);
   1226
   1227	longmode = is_64_bit_hypercall(vcpu);
   1228	if (!longmode) {
   1229		params[0] = (u32)kvm_rbx_read(vcpu);
   1230		params[1] = (u32)kvm_rcx_read(vcpu);
   1231		params[2] = (u32)kvm_rdx_read(vcpu);
   1232		params[3] = (u32)kvm_rsi_read(vcpu);
   1233		params[4] = (u32)kvm_rdi_read(vcpu);
   1234		params[5] = (u32)kvm_rbp_read(vcpu);
   1235	}
   1236#ifdef CONFIG_X86_64
   1237	else {
   1238		params[0] = (u64)kvm_rdi_read(vcpu);
   1239		params[1] = (u64)kvm_rsi_read(vcpu);
   1240		params[2] = (u64)kvm_rdx_read(vcpu);
   1241		params[3] = (u64)kvm_r10_read(vcpu);
   1242		params[4] = (u64)kvm_r8_read(vcpu);
   1243		params[5] = (u64)kvm_r9_read(vcpu);
   1244	}
   1245#endif
   1246	trace_kvm_xen_hypercall(input, params[0], params[1], params[2],
   1247				params[3], params[4], params[5]);
   1248
   1249	switch (input) {
   1250	case __HYPERVISOR_xen_version:
   1251		if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
   1252			r = vcpu->kvm->arch.xen.xen_version;
   1253			handled = true;
   1254		}
   1255		break;
   1256	case __HYPERVISOR_event_channel_op:
   1257		if (params[0] == EVTCHNOP_send)
   1258			handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r);
   1259		break;
   1260	case __HYPERVISOR_sched_op:
   1261		handled = kvm_xen_hcall_sched_op(vcpu, longmode, params[0],
   1262						 params[1], &r);
   1263		break;
   1264	case __HYPERVISOR_vcpu_op:
   1265		handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1],
   1266						params[2], &r);
   1267		break;
   1268	case __HYPERVISOR_set_timer_op: {
   1269		u64 timeout = params[0];
   1270		/* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */
   1271		if (!longmode)
   1272			timeout |= params[1] << 32;
   1273		handled = kvm_xen_hcall_set_timer_op(vcpu, timeout, &r);
   1274		break;
   1275	}
   1276	default:
   1277		break;
   1278	}
   1279
   1280	if (handled)
   1281		return kvm_xen_hypercall_set_result(vcpu, r);
   1282
   1283	vcpu->run->exit_reason = KVM_EXIT_XEN;
   1284	vcpu->run->xen.type = KVM_EXIT_XEN_HCALL;
   1285	vcpu->run->xen.u.hcall.longmode = longmode;
   1286	vcpu->run->xen.u.hcall.cpl = static_call(kvm_x86_get_cpl)(vcpu);
   1287	vcpu->run->xen.u.hcall.input = input;
   1288	vcpu->run->xen.u.hcall.params[0] = params[0];
   1289	vcpu->run->xen.u.hcall.params[1] = params[1];
   1290	vcpu->run->xen.u.hcall.params[2] = params[2];
   1291	vcpu->run->xen.u.hcall.params[3] = params[3];
   1292	vcpu->run->xen.u.hcall.params[4] = params[4];
   1293	vcpu->run->xen.u.hcall.params[5] = params[5];
   1294	vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu);
   1295	vcpu->arch.complete_userspace_io =
   1296		kvm_xen_hypercall_complete_userspace;
   1297
   1298	return 0;
   1299}
   1300
   1301static inline int max_evtchn_port(struct kvm *kvm)
   1302{
   1303	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
   1304		return EVTCHN_2L_NR_CHANNELS;
   1305	else
   1306		return COMPAT_EVTCHN_2L_NR_CHANNELS;
   1307}
   1308
   1309static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
   1310{
   1311	int poll_evtchn = vcpu->arch.xen.poll_evtchn;
   1312
   1313	if ((poll_evtchn == port || poll_evtchn == -1) &&
   1314	    test_and_clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.xen.poll_mask)) {
   1315		kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
   1316		kvm_vcpu_kick(vcpu);
   1317	}
   1318}
   1319
   1320/*
   1321 * The return value from this function is propagated to kvm_set_irq() API,
   1322 * so it returns:
   1323 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
   1324 *  = 0   Interrupt was coalesced (previous irq is still pending)
   1325 *  > 0   Number of CPUs interrupt was delivered to
   1326 *
   1327 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
   1328 * only check on its return value is a comparison with -EWOULDBLOCK'.
   1329 */
   1330int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
   1331{
   1332	struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
   1333	struct kvm_vcpu *vcpu;
   1334	unsigned long *pending_bits, *mask_bits;
   1335	unsigned long flags;
   1336	int port_word_bit;
   1337	bool kick_vcpu = false;
   1338	int vcpu_idx, idx, rc;
   1339
   1340	vcpu_idx = READ_ONCE(xe->vcpu_idx);
   1341	if (vcpu_idx >= 0)
   1342		vcpu = kvm_get_vcpu(kvm, vcpu_idx);
   1343	else {
   1344		vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
   1345		if (!vcpu)
   1346			return -EINVAL;
   1347		WRITE_ONCE(xe->vcpu_idx, kvm_vcpu_get_idx(vcpu));
   1348	}
   1349
   1350	if (!vcpu->arch.xen.vcpu_info_cache.active)
   1351		return -EINVAL;
   1352
   1353	if (xe->port >= max_evtchn_port(kvm))
   1354		return -EINVAL;
   1355
   1356	rc = -EWOULDBLOCK;
   1357
   1358	idx = srcu_read_lock(&kvm->srcu);
   1359
   1360	read_lock_irqsave(&gpc->lock, flags);
   1361	if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
   1362		goto out_rcu;
   1363
   1364	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
   1365		struct shared_info *shinfo = gpc->khva;
   1366		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
   1367		mask_bits = (unsigned long *)&shinfo->evtchn_mask;
   1368		port_word_bit = xe->port / 64;
   1369	} else {
   1370		struct compat_shared_info *shinfo = gpc->khva;
   1371		pending_bits = (unsigned long *)&shinfo->evtchn_pending;
   1372		mask_bits = (unsigned long *)&shinfo->evtchn_mask;
   1373		port_word_bit = xe->port / 32;
   1374	}
   1375
   1376	/*
   1377	 * If this port wasn't already set, and if it isn't masked, then
   1378	 * we try to set the corresponding bit in the in-kernel shadow of
   1379	 * evtchn_pending_sel for the target vCPU. And if *that* wasn't
   1380	 * already set, then we kick the vCPU in question to write to the
   1381	 * *real* evtchn_pending_sel in its own guest vcpu_info struct.
   1382	 */
   1383	if (test_and_set_bit(xe->port, pending_bits)) {
   1384		rc = 0; /* It was already raised */
   1385	} else if (test_bit(xe->port, mask_bits)) {
   1386		rc = -ENOTCONN; /* Masked */
   1387		kvm_xen_check_poller(vcpu, xe->port);
   1388	} else {
   1389		rc = 1; /* Delivered to the bitmap in shared_info. */
   1390		/* Now switch to the vCPU's vcpu_info to set the index and pending_sel */
   1391		read_unlock_irqrestore(&gpc->lock, flags);
   1392		gpc = &vcpu->arch.xen.vcpu_info_cache;
   1393
   1394		read_lock_irqsave(&gpc->lock, flags);
   1395		if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
   1396			/*
   1397			 * Could not access the vcpu_info. Set the bit in-kernel
   1398			 * and prod the vCPU to deliver it for itself.
   1399			 */
   1400			if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
   1401				kick_vcpu = true;
   1402			goto out_rcu;
   1403		}
   1404
   1405		if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
   1406			struct vcpu_info *vcpu_info = gpc->khva;
   1407			if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) {
   1408				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
   1409				kick_vcpu = true;
   1410			}
   1411		} else {
   1412			struct compat_vcpu_info *vcpu_info = gpc->khva;
   1413			if (!test_and_set_bit(port_word_bit,
   1414					      (unsigned long *)&vcpu_info->evtchn_pending_sel)) {
   1415				WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1);
   1416				kick_vcpu = true;
   1417			}
   1418		}
   1419
   1420		/* For the per-vCPU lapic vector, deliver it as MSI. */
   1421		if (kick_vcpu && vcpu->arch.xen.upcall_vector) {
   1422			kvm_xen_inject_vcpu_vector(vcpu);
   1423			kick_vcpu = false;
   1424		}
   1425	}
   1426
   1427 out_rcu:
   1428	read_unlock_irqrestore(&gpc->lock, flags);
   1429	srcu_read_unlock(&kvm->srcu, idx);
   1430
   1431	if (kick_vcpu) {
   1432		kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
   1433		kvm_vcpu_kick(vcpu);
   1434	}
   1435
   1436	return rc;
   1437}
   1438
   1439static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
   1440{
   1441	bool mm_borrowed = false;
   1442	int rc;
   1443
   1444	rc = kvm_xen_set_evtchn_fast(xe, kvm);
   1445	if (rc != -EWOULDBLOCK)
   1446		return rc;
   1447
   1448	if (current->mm != kvm->mm) {
   1449		/*
   1450		 * If not on a thread which already belongs to this KVM,
   1451		 * we'd better be in the irqfd workqueue.
   1452		 */
   1453		if (WARN_ON_ONCE(current->mm))
   1454			return -EINVAL;
   1455
   1456		kthread_use_mm(kvm->mm);
   1457		mm_borrowed = true;
   1458	}
   1459
   1460	/*
   1461	 * For the irqfd workqueue, using the main kvm->lock mutex is
   1462	 * fine since this function is invoked from kvm_set_irq() with
   1463	 * no other lock held, no srcu. In future if it will be called
   1464	 * directly from a vCPU thread (e.g. on hypercall for an IPI)
   1465	 * then it may need to switch to using a leaf-node mutex for
   1466	 * serializing the shared_info mapping.
   1467	 */
   1468	mutex_lock(&kvm->lock);
   1469
   1470	/*
   1471	 * It is theoretically possible for the page to be unmapped
   1472	 * and the MMU notifier to invalidate the shared_info before
   1473	 * we even get to use it. In that case, this looks like an
   1474	 * infinite loop. It was tempting to do it via the userspace
   1475	 * HVA instead... but that just *hides* the fact that it's
   1476	 * an infinite loop, because if a fault occurs and it waits
   1477	 * for the page to come back, it can *still* immediately
   1478	 * fault and have to wait again, repeatedly.
   1479	 *
   1480	 * Conversely, the page could also have been reinstated by
   1481	 * another thread before we even obtain the mutex above, so
   1482	 * check again *first* before remapping it.
   1483	 */
   1484	do {
   1485		struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
   1486		int idx;
   1487
   1488		rc = kvm_xen_set_evtchn_fast(xe, kvm);
   1489		if (rc != -EWOULDBLOCK)
   1490			break;
   1491
   1492		idx = srcu_read_lock(&kvm->srcu);
   1493		rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
   1494		srcu_read_unlock(&kvm->srcu, idx);
   1495	} while(!rc);
   1496
   1497	mutex_unlock(&kvm->lock);
   1498
   1499	if (mm_borrowed)
   1500		kthread_unuse_mm(kvm->mm);
   1501
   1502	return rc;
   1503}
   1504
   1505/* This is the version called from kvm_set_irq() as the .set function */
   1506static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
   1507			 int irq_source_id, int level, bool line_status)
   1508{
   1509	if (!level)
   1510		return -EINVAL;
   1511
   1512	return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
   1513}
   1514
   1515/*
   1516 * Set up an event channel interrupt from the KVM IRQ routing table.
   1517 * Used for e.g. PIRQ from passed through physical devices.
   1518 */
   1519int kvm_xen_setup_evtchn(struct kvm *kvm,
   1520			 struct kvm_kernel_irq_routing_entry *e,
   1521			 const struct kvm_irq_routing_entry *ue)
   1522
   1523{
   1524	struct kvm_vcpu *vcpu;
   1525
   1526	if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
   1527		return -EINVAL;
   1528
   1529	/* We only support 2 level event channels for now */
   1530	if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
   1531		return -EINVAL;
   1532
   1533	/*
   1534	 * Xen gives us interesting mappings from vCPU index to APIC ID,
   1535	 * which means kvm_get_vcpu_by_id() has to iterate over all vCPUs
   1536	 * to find it. Do that once at setup time, instead of every time.
   1537	 * But beware that on live update / live migration, the routing
   1538	 * table might be reinstated before the vCPU threads have finished
   1539	 * recreating their vCPUs.
   1540	 */
   1541	vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
   1542	if (vcpu)
   1543		e->xen_evtchn.vcpu_idx = kvm_vcpu_get_idx(vcpu);
   1544	else
   1545		e->xen_evtchn.vcpu_idx = -1;
   1546
   1547	e->xen_evtchn.port = ue->u.xen_evtchn.port;
   1548	e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu;
   1549	e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
   1550	e->set = evtchn_set_fn;
   1551
   1552	return 0;
   1553}
   1554
   1555/*
   1556 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
   1557 */
   1558int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
   1559{
   1560	struct kvm_xen_evtchn e;
   1561	int ret;
   1562
   1563	if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
   1564		return -EINVAL;
   1565
   1566	/* We only support 2 level event channels for now */
   1567	if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
   1568		return -EINVAL;
   1569
   1570	e.port = uxe->port;
   1571	e.vcpu_id = uxe->vcpu;
   1572	e.vcpu_idx = -1;
   1573	e.priority = uxe->priority;
   1574
   1575	ret = kvm_xen_set_evtchn(&e, kvm);
   1576
   1577	/*
   1578	 * None of that 'return 1 if it actually got delivered' nonsense.
   1579	 * We don't care if it was masked (-ENOTCONN) either.
   1580	 */
   1581	if (ret > 0 || ret == -ENOTCONN)
   1582		ret = 0;
   1583
   1584	return ret;
   1585}
   1586
   1587/*
   1588 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
   1589 */
   1590struct evtchnfd {
   1591	u32 send_port;
   1592	u32 type;
   1593	union {
   1594		struct kvm_xen_evtchn port;
   1595		struct {
   1596			u32 port; /* zero */
   1597			struct eventfd_ctx *ctx;
   1598		} eventfd;
   1599	} deliver;
   1600};
   1601
   1602/*
   1603 * Update target vCPU or priority for a registered sending channel.
   1604 */
   1605static int kvm_xen_eventfd_update(struct kvm *kvm,
   1606				  struct kvm_xen_hvm_attr *data)
   1607{
   1608	u32 port = data->u.evtchn.send_port;
   1609	struct evtchnfd *evtchnfd;
   1610
   1611	if (!port || port >= max_evtchn_port(kvm))
   1612		return -EINVAL;
   1613
   1614	mutex_lock(&kvm->lock);
   1615	evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
   1616	mutex_unlock(&kvm->lock);
   1617
   1618	if (!evtchnfd)
   1619		return -ENOENT;
   1620
   1621	/* For an UPDATE, nothing may change except the priority/vcpu */
   1622	if (evtchnfd->type != data->u.evtchn.type)
   1623		return -EINVAL;
   1624
   1625	/*
   1626	 * Port cannot change, and if it's zero that was an eventfd
   1627	 * which can't be changed either.
   1628	 */
   1629	if (!evtchnfd->deliver.port.port ||
   1630	    evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
   1631		return -EINVAL;
   1632
   1633	/* We only support 2 level event channels for now */
   1634	if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
   1635		return -EINVAL;
   1636
   1637	mutex_lock(&kvm->lock);
   1638	evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
   1639	if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
   1640		evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
   1641		evtchnfd->deliver.port.vcpu_idx = -1;
   1642	}
   1643	mutex_unlock(&kvm->lock);
   1644	return 0;
   1645}
   1646
   1647/*
   1648 * Configure the target (eventfd or local port delivery) for sending on
   1649 * a given event channel.
   1650 */
   1651static int kvm_xen_eventfd_assign(struct kvm *kvm,
   1652				  struct kvm_xen_hvm_attr *data)
   1653{
   1654	u32 port = data->u.evtchn.send_port;
   1655	struct eventfd_ctx *eventfd = NULL;
   1656	struct evtchnfd *evtchnfd = NULL;
   1657	int ret = -EINVAL;
   1658
   1659	if (!port || port >= max_evtchn_port(kvm))
   1660		return -EINVAL;
   1661
   1662	evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
   1663	if (!evtchnfd)
   1664		return -ENOMEM;
   1665
   1666	switch(data->u.evtchn.type) {
   1667	case EVTCHNSTAT_ipi:
   1668		/* IPI  must map back to the same port# */
   1669		if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
   1670			goto out; /* -EINVAL */
   1671		break;
   1672
   1673	case EVTCHNSTAT_interdomain:
   1674		if (data->u.evtchn.deliver.port.port) {
   1675			if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
   1676				goto out; /* -EINVAL */
   1677		} else {
   1678			eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
   1679			if (IS_ERR(eventfd)) {
   1680				ret = PTR_ERR(eventfd);
   1681				goto out;
   1682			}
   1683		}
   1684		break;
   1685
   1686	case EVTCHNSTAT_virq:
   1687	case EVTCHNSTAT_closed:
   1688	case EVTCHNSTAT_unbound:
   1689	case EVTCHNSTAT_pirq:
   1690	default: /* Unknown event channel type */
   1691		goto out; /* -EINVAL */
   1692	}
   1693
   1694	evtchnfd->send_port = data->u.evtchn.send_port;
   1695	evtchnfd->type = data->u.evtchn.type;
   1696	if (eventfd) {
   1697		evtchnfd->deliver.eventfd.ctx = eventfd;
   1698	} else {
   1699		/* We only support 2 level event channels for now */
   1700		if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
   1701			goto out; /* -EINVAL; */
   1702
   1703		evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port;
   1704		evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
   1705		evtchnfd->deliver.port.vcpu_idx = -1;
   1706		evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
   1707	}
   1708
   1709	mutex_lock(&kvm->lock);
   1710	ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
   1711			GFP_KERNEL);
   1712	mutex_unlock(&kvm->lock);
   1713	if (ret >= 0)
   1714		return 0;
   1715
   1716	if (ret == -ENOSPC)
   1717		ret = -EEXIST;
   1718out:
   1719	if (eventfd)
   1720		eventfd_ctx_put(eventfd);
   1721	kfree(evtchnfd);
   1722	return ret;
   1723}
   1724
   1725static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
   1726{
   1727	struct evtchnfd *evtchnfd;
   1728
   1729	mutex_lock(&kvm->lock);
   1730	evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
   1731	mutex_unlock(&kvm->lock);
   1732
   1733	if (!evtchnfd)
   1734		return -ENOENT;
   1735
   1736	if (kvm)
   1737		synchronize_srcu(&kvm->srcu);
   1738	if (!evtchnfd->deliver.port.port)
   1739		eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
   1740	kfree(evtchnfd);
   1741	return 0;
   1742}
   1743
   1744static int kvm_xen_eventfd_reset(struct kvm *kvm)
   1745{
   1746	struct evtchnfd *evtchnfd;
   1747	int i;
   1748
   1749	mutex_lock(&kvm->lock);
   1750	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
   1751		idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
   1752		synchronize_srcu(&kvm->srcu);
   1753		if (!evtchnfd->deliver.port.port)
   1754			eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
   1755		kfree(evtchnfd);
   1756	}
   1757	mutex_unlock(&kvm->lock);
   1758
   1759	return 0;
   1760}
   1761
   1762static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
   1763{
   1764	u32 port = data->u.evtchn.send_port;
   1765
   1766	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET)
   1767		return kvm_xen_eventfd_reset(kvm);
   1768
   1769	if (!port || port >= max_evtchn_port(kvm))
   1770		return -EINVAL;
   1771
   1772	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN)
   1773		return kvm_xen_eventfd_deassign(kvm, port);
   1774	if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE)
   1775		return kvm_xen_eventfd_update(kvm, data);
   1776	if (data->u.evtchn.flags)
   1777		return -EINVAL;
   1778
   1779	return kvm_xen_eventfd_assign(kvm, data);
   1780}
   1781
   1782static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
   1783{
   1784	struct evtchnfd *evtchnfd;
   1785	struct evtchn_send send;
   1786	gpa_t gpa;
   1787	int idx;
   1788
   1789	idx = srcu_read_lock(&vcpu->kvm->srcu);
   1790	gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
   1791	srcu_read_unlock(&vcpu->kvm->srcu, idx);
   1792
   1793	if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
   1794		*r = -EFAULT;
   1795		return true;
   1796	}
   1797
   1798	/* The evtchn_ports idr is protected by vcpu->kvm->srcu */
   1799	evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
   1800	if (!evtchnfd)
   1801		return false;
   1802
   1803	if (evtchnfd->deliver.port.port) {
   1804		int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
   1805		if (ret < 0 && ret != -ENOTCONN)
   1806			return false;
   1807	} else {
   1808		eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
   1809	}
   1810
   1811	*r = 0;
   1812	return true;
   1813}
   1814
   1815void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
   1816{
   1817	vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
   1818	vcpu->arch.xen.poll_evtchn = 0;
   1819	timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
   1820}
   1821
   1822void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
   1823{
   1824	if (kvm_xen_timer_enabled(vcpu))
   1825		kvm_xen_stop_timer(vcpu);
   1826
   1827	kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
   1828				     &vcpu->arch.xen.runstate_cache);
   1829	kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
   1830				     &vcpu->arch.xen.vcpu_info_cache);
   1831	kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
   1832				     &vcpu->arch.xen.vcpu_time_info_cache);
   1833	del_timer_sync(&vcpu->arch.xen.poll_timer);
   1834}
   1835
   1836void kvm_xen_init_vm(struct kvm *kvm)
   1837{
   1838	idr_init(&kvm->arch.xen.evtchn_ports);
   1839}
   1840
   1841void kvm_xen_destroy_vm(struct kvm *kvm)
   1842{
   1843	struct evtchnfd *evtchnfd;
   1844	int i;
   1845
   1846	kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
   1847
   1848	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
   1849		if (!evtchnfd->deliver.port.port)
   1850			eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
   1851		kfree(evtchnfd);
   1852	}
   1853	idr_destroy(&kvm->arch.xen.evtchn_ports);
   1854
   1855	if (kvm->arch.xen_hvm_config.msr)
   1856		static_branch_slow_dec_deferred(&kvm_xen_enabled);
   1857}